repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/gather.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/copy.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/lists/detail/gather.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/detail/gather.cuh>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <algorithm>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
namespace cudf {
namespace detail {
/**
* @brief Function object to check if an index is within the bounds [begin, end).
*/
template <typename map_type>
struct bounds_checker {
size_type begin;
size_type end;
__device__ bounds_checker(size_type begin_, size_type end_) : begin{begin_}, end{end_} {}
__device__ bool operator()(map_type const index) { return ((index >= begin) && (index < end)); }
};
/**
* @brief The operation to perform when a gather map index is out of bounds
*/
enum class gather_bitmask_op {
DONT_CHECK, ///< Don't check for out of bounds indices
PASSTHROUGH, ///< Preserve mask at rows with out of bounds indices
NULLIFY, ///< Nullify rows with out of bounds indices
};
template <gather_bitmask_op Op, typename MapIterator>
struct gather_bitmask_functor {
table_device_view input;
bitmask_type** masks;
MapIterator gather_map;
__device__ bool operator()(size_type mask_idx, size_type bit_idx)
{
auto row_idx = gather_map[bit_idx];
auto col = input.column(mask_idx);
if (Op != gather_bitmask_op::DONT_CHECK) {
bool out_of_range = is_signed_iterator<MapIterator>() ? (row_idx < 0 || row_idx >= col.size())
: row_idx >= col.size();
if (out_of_range) {
if (Op == gather_bitmask_op::PASSTHROUGH) {
return bit_is_set(masks[mask_idx], bit_idx);
} else if (Op == gather_bitmask_op::NULLIFY) {
return false;
}
}
}
return col.is_valid(row_idx);
}
};
/**
* @brief Function for calling gather using iterators.
*
* Used by column_gatherer_impl definitions below.
*
* @tparam InputIterator Type for gather source data
* @tparam OutputIterator Type for gather results
* @tparam MapIterator Iterator type for the gather map
*
* @param source_itr Source data up to `source_size`
* @param source_size Maximum index value for source data
* @param target_itr Output iterator for gather result
* @param gather_map_begin Start of the gather map
* @param gather_map_end End of the gather map
* @param nullify_out_of_bounds True if map values are checked against `source_size`
* @param stream CUDA stream used for kernel launches.
*/
template <typename InputItr, typename OutputItr, typename MapIterator>
void gather_helper(InputItr source_itr,
size_type source_size,
OutputItr target_itr,
MapIterator gather_map_begin,
MapIterator gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream)
{
using map_type = typename std::iterator_traits<MapIterator>::value_type;
if (nullify_out_of_bounds) {
thrust::gather_if(rmm::exec_policy_nosync(stream),
gather_map_begin,
gather_map_end,
gather_map_begin,
source_itr,
target_itr,
bounds_checker<map_type>{0, source_size});
} else {
thrust::gather(
rmm::exec_policy_nosync(stream), gather_map_begin, gather_map_end, source_itr, target_itr);
}
}
// Error case when no other overload or specialization is available
template <typename Element, typename Enable = void>
struct column_gatherer_impl {
template <typename... Args>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Unsupported type in gather.");
}
};
/**
* @brief Function object for gathering a type-erased
* column. To be used with the cudf::type_dispatcher.
*/
struct column_gatherer {
/**
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`.
*
* @tparam Element Dispatched type for the column being gathered
* @tparam MapIterator Iterator type for the gather map
* @param source_column View into the column to gather from
* @param gather_map_begin Beginning of iterator range of integral values representing the gather
* map
* @param gather_map_end End of iterator range of integral values representing the gather map
* @param nullify_out_of_bounds Nullify values in `gather_map` that are out of bounds
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*/
template <typename Element, typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source_column,
MapIterator gather_map_begin,
MapIterator gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
column_gatherer_impl<Element> gatherer{};
return gatherer(
source_column, gather_map_begin, gather_map_end, nullify_out_of_bounds, stream, mr);
}
};
/**
* @brief Function object for gathering a type-erased column.
*
* To be used with column_gatherer to provide specialization to handle
* fixed-width, string and other types.
*
* @tparam Element Dispatched type for the column being gathered
* @tparam MapIterator Iterator type for the gather map
*/
template <typename Element>
struct column_gatherer_impl<Element, std::enable_if_t<is_rep_layout_compatible<Element>()>> {
/**
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`.
*
* This handles fixed width type column_views only.
*
* @param source_column View into the column to gather from
* @param gather_map_begin Beginning of iterator range of integral values representing the gather
* map
* @param gather_map_end End of iterator range of integral values representing the gather map
* @param nullify_out_of_bounds Nullify values in `gather_map` that are out of bounds
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*/
template <typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source_column,
MapIterator gather_map_begin,
MapIterator gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = cudf::distance(gather_map_begin, gather_map_end);
auto const policy = cudf::mask_allocation_policy::NEVER;
auto destination_column =
cudf::detail::allocate_like(source_column, num_rows, policy, stream, mr);
gather_helper(source_column.data<Element>(),
source_column.size(),
destination_column->mutable_view().template begin<Element>(),
gather_map_begin,
gather_map_end,
nullify_out_of_bounds,
stream);
return destination_column;
}
};
/**
* @brief Function object for gathering a type-erased
* column. To be used with column_gatherer to provide specialization for
* string_view.
*
* @tparam MapIterator Iterator type for the gather map
*/
template <>
struct column_gatherer_impl<string_view> {
/**
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`. This handles string_view type column_views only.
*
* @param source_column View into the column to gather from
* @param gather_map_begin Beginning of iterator range of integral values representing the gather
* map
* @param gather_map_end End of iterator range of integral values representing the gather map
* @param nullify_out_of_bounds Nullify values in `gather_map` that are out of bounds
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*/
template <typename MapItType>
std::unique_ptr<column> operator()(column_view const& source_column,
MapItType gather_map_begin,
MapItType gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (true == nullify_out_of_bounds) {
return cudf::strings::detail::gather<true>(
strings_column_view(source_column), gather_map_begin, gather_map_end, stream, mr);
} else {
return cudf::strings::detail::gather<false>(
strings_column_view(source_column), gather_map_begin, gather_map_end, stream, mr);
}
}
};
/**
* @brief Column gather specialization for list_view column type.
*
* @tparam MapItRoot Iterator type to access the incoming root column.
*
* This functor is invoked only on the root column of a hierarchy of list
* columns. Recursion is handled internally.
*/
template <>
struct column_gatherer_impl<list_view> {
/**
* @brief Gather a list column from a hierarchy of list columns.
*
* This function is similar to gather_list_nested() but the difference is
* significant. This particular level takes a templated gather map iterator of
* any type. As we start recursing, we need to be able to generate new gather
* maps for each level. To do this requires manifesting a buffer of intermediate
* data. If we were to do that at level N and then wrap it in an anonymous iterator
* to be passed to level N+1, these buffers of data would remain resident for the
* entirety of the recursion. But if level N+1 could create it's own iterator
* internally from a buffer passed to it by level N, it could then -delete- that
* buffer of data after using it, keeping the amount of extra memory needed
* to a minimum. see comment on "memory optimization" inside cudf::list::gather_list_nested
*
* The tree of calls can be visualized like this:
*
* @code{.pseudo}
* R : this operator
* N : lists::detail::gather_list_nested
* L : lists::detail::gather_list_leaf
*
* R
* / \
* L N
* \
* N
* \
* ...
* \
* L
* @endcode
*
* This is the start of the recursion - we will only ever get in here once.
* We will only ever travel down the left branch or the right branch, and we
* will always end up in a final call to gather_list_leaf.
*
* @param column View into the column to gather from
* @param gather_map_begin iterator representing the start of the range to gather from
* @param gather_map_end iterator representing the end of the range to gather from
* @param nullify_out_of_bounds Nullify values in the gather map that are out of bounds
* @param stream CUDA stream on which to execute kernels
* @param mr Memory resource to use for all allocations
*
* @returns column with elements gathered based on the gather map
*
*/
template <typename MapItRoot>
std::unique_ptr<column> operator()(column_view const& column,
MapItRoot gather_map_begin,
MapItRoot gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
lists_column_view list(column);
auto gather_map_size = std::distance(gather_map_begin, gather_map_end);
// if the gather map is empty, return an empty column
if (gather_map_size == 0) { return empty_like(column); }
// generate gather_data for the next level (N+1)
lists::detail::gather_data gd = nullify_out_of_bounds
? lists::detail::make_gather_data<true>(
column, gather_map_begin, gather_map_size, stream, mr)
: lists::detail::make_gather_data<false>(
column, gather_map_begin, gather_map_size, stream, mr);
// the nesting case.
if (list.child().type() == cudf::data_type{type_id::LIST}) {
// gather children
auto child = lists::detail::gather_list_nested(list.get_sliced_child(stream), gd, stream, mr);
// return the final column
return make_lists_column(gather_map_size,
std::move(gd.offsets),
std::move(child),
0,
rmm::device_buffer{0, stream, mr});
}
// it's a leaf. do a regular gather
auto child = lists::detail::gather_list_leaf(list.get_sliced_child(stream), gd, stream, mr);
// assemble final column
return make_lists_column(gather_map_size,
std::move(gd.offsets),
std::move(child),
0,
rmm::device_buffer{0, stream, mr});
}
};
/**
* @brief Column gather specialization for dictionary column type.
*/
template <>
struct column_gatherer_impl<dictionary32> {
/**
* @brief Type-dispatched function to gather from one column to another based
* on a `gather_map`.
*
* @param source_column View into the column to gather from
* @param gather_map_begin Beginning of iterator range of integral values representing the gather
* map
* @param gather_map_end End of iterator range of integral values representing the gather map
* @param nullify_out_of_bounds Nullify values in `gather_map` that are out of bounds
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New dictionary column with gathered rows.
*/
template <typename MapItType>
std::unique_ptr<column> operator()(column_view const& source_column,
MapItType gather_map_begin,
MapItType gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
dictionary_column_view dictionary(source_column);
auto output_count = std::distance(gather_map_begin, gather_map_end);
if (output_count == 0) return make_empty_column(type_id::DICTIONARY32);
// The gather could cause some keys to be abandoned -- no indices point to them.
// In this case, we could do further work to remove the abandoned keys and
// reshuffle the indices values.
// We decided we will copy the keys for gather since the keys column should
// be relatively smallish.
// Also, there are scenarios where the keys are common with other dictionaries
// and the original intention was to share the keys here.
auto keys_copy = std::make_unique<column>(dictionary.keys(), stream, mr);
// Perform gather on just the indices
column_view indices = dictionary.get_indices_annotated();
auto new_indices = cudf::detail::allocate_like(
indices, output_count, cudf::mask_allocation_policy::NEVER, stream, mr);
gather_helper(
cudf::detail::indexalator_factory::make_input_iterator(indices),
indices.size(),
cudf::detail::indexalator_factory::make_output_iterator(new_indices->mutable_view()),
gather_map_begin,
gather_map_end,
nullify_out_of_bounds,
stream);
// dissect the column's contents
auto indices_type = new_indices->type();
auto null_count = new_indices->null_count(); // get this before calling release()
auto contents = new_indices->release(); // new_indices will now be empty
// build the output indices column from the contents' data component
auto indices_column = std::make_unique<column>(indices_type,
static_cast<size_type>(output_count),
std::move(*(contents.data.release())),
rmm::device_buffer{0, stream, mr},
0); // set null count to 0
// finally, build the dictionary with the null_mask component and the keys and indices
return make_dictionary_column(std::move(keys_copy),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
};
template <>
struct column_gatherer_impl<struct_view> {
template <typename MapItRoot>
std::unique_ptr<column> operator()(column_view const& column,
MapItRoot gather_map_begin,
MapItRoot gather_map_end,
bool nullify_out_of_bounds,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const gather_map_size = std::distance(gather_map_begin, gather_map_end);
if (gather_map_size == 0) { return empty_like(column); }
// Gathering needs to operate on the sliced children since they need to take into account the
// offset of the parent structs column.
std::vector<cudf::column_view> sliced_children;
std::transform(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(column.num_children()),
std::back_inserter(sliced_children),
[&stream, structs_view = structs_column_view{column}](auto const idx) {
return structs_view.get_sliced_child(idx, stream);
});
std::vector<std::unique_ptr<cudf::column>> output_struct_members;
std::transform(sliced_children.begin(),
sliced_children.end(),
std::back_inserter(output_struct_members),
[&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
column_gatherer{},
col,
gather_map_begin,
gather_map_end,
nullify_out_of_bounds,
stream,
mr);
});
auto const nullable =
nullify_out_of_bounds || std::any_of(sliced_children.begin(),
sliced_children.end(),
[](auto const& col) { return col.nullable(); });
if (nullable) {
gather_bitmask(
// Table view of struct column.
cudf::table_view{
std::vector<cudf::column_view>{sliced_children.begin(), sliced_children.end()}},
gather_map_begin,
output_struct_members,
nullify_out_of_bounds ? gather_bitmask_op::NULLIFY : gather_bitmask_op::DONT_CHECK,
stream,
mr);
}
return cudf::make_structs_column(
gather_map_size,
std::move(output_struct_members),
0,
rmm::device_buffer{0, stream, mr}, // Null mask will be fixed up in cudf::gather().
stream,
mr);
}
};
/**
* @brief Function object for applying a transformation on the gathermap
* that converts negative indices to positive indices
*
* A negative index `i` is transformed to `i + size`, where `size` is
* the number of elements in the column being gathered from.
* Allowable values for the index `i` are in the range `[-size, size)`.
* Thus, when gathering from a column of size `10`, the index `-1`
* is transformed to `9` (i.e., the last element), `-2` is transformed
* to `8` (the second-to-last element) and so on.
* Positive indices are unchanged by this transformation.
*/
template <typename map_type>
struct index_converter : public thrust::unary_function<map_type, map_type> {
index_converter(size_type n_rows) : n_rows(n_rows) {}
__device__ map_type operator()(map_type in) const { return ((in % n_rows) + n_rows) % n_rows; }
size_type n_rows;
};
template <gather_bitmask_op Op, typename GatherMap>
void gather_bitmask(table_device_view input,
GatherMap gather_map_begin,
bitmask_type** masks,
size_type mask_count,
size_type mask_size,
size_type* valid_counts,
rmm::cuda_stream_view stream)
{
if (mask_size == 0) { return; }
constexpr size_type block_size = 256;
using Selector = gather_bitmask_functor<Op, decltype(gather_map_begin)>;
auto selector = Selector{input, masks, gather_map_begin};
auto counting_it = thrust::make_counting_iterator(0);
auto kernel =
valid_if_n_kernel<decltype(counting_it), decltype(counting_it), Selector, block_size>;
cudf::detail::grid_1d grid{mask_size, block_size, 1};
kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
counting_it, counting_it, selector, masks, mask_count, mask_size, valid_counts);
}
template <typename MapIterator>
void gather_bitmask(table_view const& source,
MapIterator gather_map,
std::vector<std::unique_ptr<column>>& target,
gather_bitmask_op op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (target.empty()) { return; }
// Validate that all target columns have the same size
auto const target_rows = target.front()->size();
CUDF_EXPECTS(std::all_of(target.begin(),
target.end(),
[target_rows](auto const& col) { return target_rows == col->size(); }),
"Column size mismatch");
// Create null mask if source is nullable but target is not
for (size_t i = 0; i < target.size(); ++i) {
if ((source.column(i).nullable() or op == gather_bitmask_op::NULLIFY) and
not target[i]->nullable()) {
auto const state =
op == gather_bitmask_op::PASSTHROUGH ? mask_state::ALL_VALID : mask_state::UNINITIALIZED;
auto mask = detail::create_null_mask(target[i]->size(), state, stream, mr);
target[i]->set_null_mask(std::move(mask), 0);
}
}
// Make device array of target bitmask pointers
std::vector<bitmask_type*> target_masks(target.size());
std::transform(target.begin(), target.end(), target_masks.begin(), [](auto const& col) {
return col->mutable_view().null_mask();
});
auto d_target_masks =
make_device_uvector_async(target_masks, stream, rmm::mr::get_current_device_resource());
auto const device_source = table_device_view::create(source, stream);
auto d_valid_counts = make_zeroed_device_uvector_async<size_type>(
target.size(), stream, rmm::mr::get_current_device_resource());
// Dispatch operation enum to get implementation
auto const impl = [op]() {
switch (op) {
case gather_bitmask_op::DONT_CHECK:
return gather_bitmask<gather_bitmask_op::DONT_CHECK, MapIterator>;
case gather_bitmask_op::PASSTHROUGH:
return gather_bitmask<gather_bitmask_op::PASSTHROUGH, MapIterator>;
case gather_bitmask_op::NULLIFY:
return gather_bitmask<gather_bitmask_op::NULLIFY, MapIterator>;
default: CUDF_FAIL("Invalid gather_bitmask_op");
}
}();
impl(*device_source,
gather_map,
d_target_masks.data(),
target.size(),
target_rows,
d_valid_counts.data(),
stream);
// Copy the valid counts into each column
auto const valid_counts = make_std_vector_sync(d_valid_counts, stream);
for (size_t i = 0; i < target.size(); ++i) {
if (target[i]->nullable()) {
auto const null_count = target_rows - valid_counts[i];
target[i]->set_null_count(null_count);
}
}
}
/**
* @brief Gathers the specified rows of a set of columns according to a gather map.
*
* Gathers the rows of the source columns according to `gather_map` such that row "i"
* in the resulting table's columns will contain row "gather_map[i]" from the source columns.
* The number of rows in the result table will be equal to the number of elements in
* `gather_map`.
*
* A negative value `i` in the `gather_map` is interpreted as `i+n`, where
* `n` is the number of rows in the `source_table`.
*
* tparam MapIterator Iterator type for the gather map
* @param[in] source_table View into the table containing the input columns whose rows will be
* gathered
* @param[in] gather_map_begin Beginning of iterator range of integer indices that map the rows in
* the source columns to rows in the destination columns
* @param[in] gather_map_end End of iterator range of integer indices that map the rows in the
* source columns to rows in the destination columns
* @param[in] bounds_policy Policy to apply to account for possible out-of-bound indices
* `DONT_CHECK` skips all bound checking for gather map values. `NULLIFY` coerces rows that
* corresponds to out-of-bound indices in the gather map to be null elements. Callers should
* use `DONT_CHECK` when they are certain that the gather_map contains only valid indices for
* better performance. In case there are out-of-bound indices in the gather map, the behavior
* is undefined. Defaults to `DONT_CHECK`.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return cudf::table Result of the gather
*/
template <typename MapIterator>
std::unique_ptr<table> gather(table_view const& source_table,
MapIterator gather_map_begin,
MapIterator gather_map_end,
out_of_bounds_policy bounds_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<std::unique_ptr<column>> destination_columns;
// TODO: Could be beneficial to use streams internally here
for (auto const& source_column : source_table) {
// The data gather for n columns will be put on the first n streams
destination_columns.push_back(
cudf::type_dispatcher<dispatch_storage_type>(source_column.type(),
column_gatherer{},
source_column,
gather_map_begin,
gather_map_end,
bounds_policy == out_of_bounds_policy::NULLIFY,
stream,
mr));
}
auto needs_new_bitmask = bounds_policy == out_of_bounds_policy::NULLIFY ||
cudf::has_nested_nullable_columns(source_table);
if (needs_new_bitmask) {
needs_new_bitmask = needs_new_bitmask || cudf::has_nested_nulls(source_table);
if (needs_new_bitmask) {
auto const op = bounds_policy == out_of_bounds_policy::NULLIFY
? gather_bitmask_op::NULLIFY
: gather_bitmask_op::DONT_CHECK;
gather_bitmask(source_table, gather_map_begin, destination_columns, op, stream, mr);
} else {
for (size_type i = 0; i < source_table.num_columns(); ++i) {
set_all_valid_null_masks(source_table.column(i), *destination_columns[i], stream, mr);
}
}
}
return std::make_unique<table>(std::move(destination_columns));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/offsets_iterator_factory.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/detail/offsets_iterator.cuh>
namespace cudf {
namespace detail {
/**
* @brief Use this class to create an offsetalator instance.
*/
struct offsetalator_factory {
/**
* @brief Create an input offsetalator instance from an offsets column
*/
static input_offsetalator make_input_iterator(column_view const& offsets)
{
return input_offsetalator(offsets.head(), offsets.type());
}
/**
* @brief Create an output offsetalator instance from an offsets column
*/
static output_offsetalator make_output_iterator(mutable_column_view const& offsets)
{
return output_offsetalator(offsets.head(), offsets.type());
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/label_bins.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/labeling/label_bins.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace detail {
/**
* @addtogroup label_bins
* @{
* @file
* @brief Internal APIs for labeling values by bin.
*/
/**
* @copydoc cudf::label_bins(column_view const& input, column_view const& left_edges, inclusive
* left_inclusive, column_view const& right_edges, inclusive right_inclusive, rmm::cuda_stream_view,
* rmm::mr::device_memory_resource* mr)
*
* @param stream Stream view on which to allocate resources and queue execution.
*/
std::unique_ptr<column> label_bins(column_view const& input,
column_view const& left_edges,
inclusive left_inclusive,
column_view const& right_edges,
inclusive right_inclusive,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/** @} */ // end of group
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/quantiles.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/quantiles.hpp>
#include <cudf/tdigest/tdigest_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::quantile()
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> quantile(column_view const& input,
std::vector<double> const& q,
interpolation interp,
column_view const& ordered_indices,
bool exact,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::quantiles()
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> quantiles(table_view const& input,
std::vector<double> const& q,
interpolation interp,
cudf::sorted is_input_sorted,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::percentile_approx(tdigest_column_view const&, column_view const&,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> percentile_approx(tdigest::tdigest_column_view const& input,
column_view const& percentiles,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/sequence.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/sequence.hpp>
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::sequence(size_type size, scalar const& init, scalar const& step,
* rmm::mr::device_memory_resource* mr =
*rmm::mr::get_current_device_resource())
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
scalar const& step,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::sequence(size_type size, scalar const& init,
rmm::mr::device_memory_resource* mr =
rmm::mr::get_current_device_resource())
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::calendrical_month_sequence(size_type size,
* scalar const& init,
* size_type months,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> calendrical_month_sequence(size_type size,
scalar const& init,
size_type months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/indexalator.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/normalizing_iterator.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
namespace cudf {
namespace detail {
/**
* @brief The index normalizing input iterator.
*
* This is an iterator that can be used for index types (integers) without
* requiring a type-specific instance. It can be used for any iterator
* interface for reading an array of integer values of type
* int8, int16, int32, int64, uint8, uint16, uint32, or uint64.
* Reading specific elements always return a `size_type` integer.
*
* Use the indexalator_factory to create an appropriate input iterator
* from a column_view.
*
* Example input iterator usage.
* @code
* auto begin = indexalator_factory::create_input_iterator(gather_map);
* auto end = begin + gather_map.size();
* auto result = detail::gather( source, begin, end, IGNORE, stream, mr );
* @endcode
*
* @code
* auto begin = indexalator_factory::create_input_iterator(indices);
* auto end = begin + indices.size();
* auto result = thrust::find(thrust::device, begin, end, size_type{12} );
* @endcode
*/
struct input_indexalator : base_normalator<input_indexalator, cudf::size_type> {
friend struct base_normalator<input_indexalator, cudf::size_type>; // for CRTP
using reference = cudf::size_type const; // this keeps STL and thrust happy
input_indexalator() = default;
input_indexalator(input_indexalator const&) = default;
input_indexalator(input_indexalator&&) = default;
input_indexalator& operator=(input_indexalator const&) = default;
input_indexalator& operator=(input_indexalator&&) = default;
/**
* @brief Indirection operator returns the value at the current iterator position
*/
__device__ inline cudf::size_type operator*() const { return operator[](0); }
/**
* @brief Dispatch functor for resolving a Integer value from any integer type
*/
struct normalize_type {
template <typename T, CUDF_ENABLE_IF(cudf::is_index_type<T>())>
__device__ cudf::size_type operator()(void const* tp)
{
return static_cast<cudf::size_type>(*static_cast<T const*>(tp));
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_index_type<T>())>
__device__ cudf::size_type operator()(void const*)
{
CUDF_UNREACHABLE("only integral types are supported");
}
};
/**
* @brief Array subscript operator returns a value at the input
* `idx` position as a `Integer` value.
*/
__device__ inline cudf::size_type operator[](size_type idx) const
{
void const* tp = p_ + (idx * this->width_);
return type_dispatcher(this->dtype_, normalize_type{}, tp);
}
/**
* @brief Create an input index normalizing iterator
*
* Use the indexalator_factory to create an iterator instance.
*
* @param data Pointer to an integer array in device memory.
* @param dtype Type of data in data
* @param offset Applied to the data pointer per size of the type
*/
CUDF_HOST_DEVICE input_indexalator(void const* data, data_type dtype, cudf::size_type offset = 0)
: base_normalator<input_indexalator, cudf::size_type>(dtype), p_{static_cast<char const*>(data)}
{
p_ += offset * this->width_;
}
protected:
char const* p_; /// pointer to the integer data in device memory
};
/**
* @brief The index normalizing output iterator
*
* This is an iterator that can be used for index types (integers) without
* requiring a type-specific instance. It can be used for any iterator
* interface for writing an array of integer values of type
* int8, int16, int32, int64, uint8, uint16, uint32, or uint64.
* Setting specific elements always accept `size_type` integer values.
*
* Use the indexalator_factory to create an appropriate output iterator
* from a mutable_column_view.
*
* Example output iterator usage.
* @code
* auto result_itr = indexalator_factory::create_output_iterator(indices->mutable_view());
* thrust::lower_bound(rmm::exec_policy(stream),
* input->begin<Element>(),
* input->end<Element>(),
* values->begin<Element>(),
* values->end<Element>(),
* result_itr,
* thrust::less<Element>());
* @endcode
*/
struct output_indexalator : base_normalator<output_indexalator, cudf::size_type> {
friend struct base_normalator<output_indexalator, cudf::size_type>; // for CRTP
using reference = output_indexalator const&; // required for output iterators
output_indexalator() = default;
output_indexalator(output_indexalator const&) = default;
output_indexalator(output_indexalator&&) = default;
output_indexalator& operator=(output_indexalator const&) = default;
output_indexalator& operator=(output_indexalator&&) = default;
/**
* @brief Indirection operator returns this iterator instance in order
* to capture the `operator=(Integer)` calls.
*/
__device__ inline reference operator*() const { return *this; }
/**
* @brief Array subscript operator returns an iterator instance at the specified `idx` position.
*
* This allows capturing the subsequent `operator=(Integer)` call in this class.
*/
__device__ inline output_indexalator const operator[](size_type idx) const
{
output_indexalator tmp{*this};
tmp.p_ += (idx * this->width_);
return tmp;
}
/**
* @brief Dispatch functor for setting the index value from a size_type value.
*/
struct normalize_type {
template <typename T, CUDF_ENABLE_IF(cudf::is_index_type<T>())>
__device__ void operator()(void* tp, cudf::size_type const value)
{
(*static_cast<T*>(tp)) = static_cast<T>(value);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_index_type<T>())>
__device__ void operator()(void*, cudf::size_type const)
{
CUDF_UNREACHABLE("only index types are supported");
}
};
/**
* @brief Assign an Integer value to the current iterator position
*/
__device__ inline reference operator=(cudf::size_type const value) const
{
void* tp = p_;
type_dispatcher(this->dtype_, normalize_type{}, tp, value);
return *this;
}
/**
* @brief Create an output normalizing iterator
*
* @param data Pointer to an integer array in device memory.
* @param dtype Type of data in data
*/
CUDF_HOST_DEVICE output_indexalator(void* data, data_type dtype)
: base_normalator<output_indexalator, cudf::size_type>(dtype), p_{static_cast<char*>(data)}
{
}
protected:
char* p_; /// pointer to the integer data in device memory
};
/**
* @brief Use this class to create an indexalator instance.
*/
struct indexalator_factory {
/**
* @brief A type_dispatcher functor to create an input iterator from an indices column.
*/
struct input_indexalator_fn {
template <typename IndexType, CUDF_ENABLE_IF(is_index_type<IndexType>())>
input_indexalator operator()(column_view const& indices)
{
return input_indexalator(indices.data<IndexType>(), indices.type());
}
template <typename IndexType, typename... Args, CUDF_ENABLE_IF(not is_index_type<IndexType>())>
input_indexalator operator()(Args&&... args)
{
CUDF_FAIL("indices must be an index type");
}
};
/**
* @brief Use this class to create an indexalator to a scalar index.
*/
struct input_indexalator_scalar_fn {
template <typename IndexType, CUDF_ENABLE_IF(is_index_type<IndexType>())>
input_indexalator operator()(scalar const& index)
{
// note: using static_cast<scalar_type_t<IndexType> const&>(index) creates a copy
auto const scalar_impl = static_cast<scalar_type_t<IndexType> const*>(&index);
return input_indexalator(scalar_impl->data(), index.type());
}
template <typename IndexType, typename... Args, CUDF_ENABLE_IF(not is_index_type<IndexType>())>
input_indexalator operator()(Args&&... args)
{
CUDF_FAIL("scalar must be an index type");
}
};
/**
* @brief A type_dispatcher functor to create an output iterator from an indices column.
*/
struct output_indexalator_fn {
template <typename IndexType, CUDF_ENABLE_IF(is_index_type<IndexType>())>
output_indexalator operator()(mutable_column_view const& indices)
{
return output_indexalator(indices.data<IndexType>(), indices.type());
}
template <typename IndexType, typename... Args, CUDF_ENABLE_IF(not is_index_type<IndexType>())>
output_indexalator operator()(Args&&... args)
{
CUDF_FAIL("indices must be an index type");
}
};
/**
* @brief Create an input indexalator instance from an indices column.
*/
static input_indexalator make_input_iterator(column_view const& indices)
{
return type_dispatcher(indices.type(), input_indexalator_fn{}, indices);
}
/**
* @brief Create an input indexalator instance from an index scalar.
*/
static input_indexalator make_input_iterator(cudf::scalar const& index)
{
return type_dispatcher(index.type(), input_indexalator_scalar_fn{}, index);
}
/**
* @brief Create an output indexalator instance from an indices column.
*/
static output_indexalator make_output_iterator(mutable_column_view const& indices)
{
return type_dispatcher(indices.type(), output_indexalator_fn{}, indices);
}
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is suitable as a `pair_iterator` for calling functions like `copy_if_else`.
*/
struct nullable_index_accessor {
input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
bool const has_nulls{};
/**
* @brief Create an accessor from a column_view.
*/
nullable_index_accessor(column_view const& col, bool has_nulls = false)
: null_mask{col.null_mask()}, offset{col.offset()}, has_nulls{has_nulls}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = make_input_iterator(col);
}
__device__ thrust::pair<size_type, bool> operator()(size_type i) const
{
return {iter[i], (has_nulls ? bit_is_set(null_mask, i + offset) : true)};
}
};
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is suitable as a `pair_iterator`.
*/
struct scalar_nullable_index_accessor {
input_indexalator iter;
bool const is_null;
/**
* @brief Create an accessor from a scalar.
*/
scalar_nullable_index_accessor(scalar const& input) : is_null{!input.is_valid()}
{
iter = indexalator_factory::make_input_iterator(input);
}
__device__ thrust::pair<size_type, bool> operator()(size_type) const
{
return {*iter, is_null};
}
};
/**
* @brief Create an index iterator with a nullable index accessor.
*/
static auto make_input_pair_iterator(column_view const& col)
{
return make_counting_transform_iterator(0, nullable_index_accessor{col, col.has_nulls()});
}
/**
* @brief Create an index iterator with a nullable index accessor for a scalar.
*/
static auto make_input_pair_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_nullable_index_accessor{input});
}
/**
* @brief An index accessor that returns an index value if corresponding validity flag is true.
*
* This is suitable as an `optional_iterator`.
*/
struct optional_index_accessor {
input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
bool const has_nulls{};
/**
* @brief Create an accessor from a column_view.
*/
optional_index_accessor(column_view const& col, bool has_nulls = false)
: null_mask{col.null_mask()}, offset{col.offset()}, has_nulls{has_nulls}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = make_input_iterator(col);
}
__device__ thrust::optional<size_type> operator()(size_type i) const
{
return has_nulls && !bit_is_set(null_mask, i + offset) ? thrust::nullopt
: thrust::make_optional(iter[i]);
}
};
/**
* @brief An index accessor that returns an index value if the scalar's validity flag is true.
*
* This is suitable as an `optional_iterator`.
*/
struct scalar_optional_index_accessor {
input_indexalator iter;
bool const is_null;
/**
* @brief Create an accessor from a scalar.
*/
scalar_optional_index_accessor(scalar const& input) : is_null{!input.is_valid()}
{
iter = indexalator_factory::make_input_iterator(input);
}
__device__ thrust::optional<size_type> operator()(size_type) const
{
return is_null ? thrust::nullopt : thrust::make_optional(*iter);
}
};
/**
* @brief Create an index iterator with an optional index accessor.
*/
static auto make_input_optional_iterator(column_view const& col)
{
return make_counting_transform_iterator(0, optional_index_accessor{col, col.has_nulls()});
}
/**
* @brief Create an index iterator with an optional index accessor for a scalar.
*/
static auto make_input_optional_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_optional_index_accessor{input});
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/offsets_iterator.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/normalizing_iterator.cuh>
#include <cudf/types.hpp>
namespace cudf {
namespace detail {
/**
* @brief The offsets normalizing input iterator
*
* This is an iterator that can be used for offsets where the underlying
* type may be int32_t or int64_t.
*
* Use the offsetalator_factory to create an appropriate input iterator
* from an offsets column_view.
*/
struct input_offsetalator : base_normalator<input_offsetalator, int64_t> {
friend struct base_normalator<input_offsetalator, int64_t>; // for CRTP
using reference = int64_t const; // this keeps STL and thrust happy
input_offsetalator() = default;
input_offsetalator(input_offsetalator const&) = default;
input_offsetalator(input_offsetalator&&) = default;
input_offsetalator& operator=(input_offsetalator const&) = default;
input_offsetalator& operator=(input_offsetalator&&) = default;
/**
* @brief Indirection operator returns the value at the current iterator position
*/
__device__ inline int64_t operator*() const { return operator[](0); }
/**
* @brief Array subscript operator returns a value at the input
* `idx` position as a int64_t value.
*/
__device__ inline int64_t operator[](size_type idx) const
{
void const* tp = p_ + (idx * this->width_);
return this->width_ == sizeof(int32_t) ? static_cast<int64_t>(*static_cast<int32_t const*>(tp))
: *static_cast<int64_t const*>(tp);
}
/**
* @brief Create an input index normalizing iterator.
*
* Use the indexalator_factory to create an iterator instance.
*
* @param data Pointer to an integer array in device memory.
* @param dtype Type of data in data
*/
CUDF_HOST_DEVICE input_offsetalator(void const* data, data_type dtype)
: base_normalator<input_offsetalator, int64_t>(
dtype, dtype.id() == type_id::INT32 ? sizeof(int32_t) : sizeof(int64_t)),
p_{static_cast<char const*>(data)}
{
#ifndef __CUDA_ARCH__
CUDF_EXPECTS(dtype.id() == type_id::INT32 || dtype.id() == type_id::INT64,
"Unexpected offsets type");
#else
cudf_assert((dtype.id() == type_id::INT32 || dtype.id() == type_id::INT64) &&
"Unexpected offsets type");
#endif
}
protected:
char const* p_; /// pointer to the integer data in device memory
};
/**
* @brief The offsets normalizing output iterator
*
* This is an iterator that can be used for storing offsets values
* where the underlying type may be either int32_t or int64_t.
*
* Use the offsetalator_factory to create an appropriate output iterator
* from a mutable_column_view.
*
*/
struct output_offsetalator : base_normalator<output_offsetalator, int64_t> {
friend struct base_normalator<output_offsetalator, int64_t>; // for CRTP
using reference = output_offsetalator const&; // required for output iterators
output_offsetalator() = default;
output_offsetalator(output_offsetalator const&) = default;
output_offsetalator(output_offsetalator&&) = default;
output_offsetalator& operator=(output_offsetalator const&) = default;
output_offsetalator& operator=(output_offsetalator&&) = default;
/**
* @brief Indirection operator returns this iterator instance in order
* to capture the `operator=(int64)` calls.
*/
__device__ inline output_offsetalator const& operator*() const { return *this; }
/**
* @brief Array subscript operator returns an iterator instance at the specified `idx` position.
*
* This allows capturing the subsequent `operator=(int64)` call in this class.
*/
__device__ inline output_offsetalator const operator[](size_type idx) const
{
output_offsetalator tmp{*this};
tmp.p_ += (idx * this->width_);
return tmp;
}
/**
* @brief Assign an offset value to the current iterator position
*/
__device__ inline output_offsetalator const& operator=(int64_t const value) const
{
void* tp = p_;
if (this->width_ == sizeof(int32_t)) {
(*static_cast<int32_t*>(tp)) = static_cast<int32_t>(value);
} else {
(*static_cast<int64_t*>(tp)) = value;
}
return *this;
}
/**
* @brief Create an output offsets iterator
*
* @param data Pointer to an integer array in device memory.
* @param dtype Type of data in data
*/
CUDF_HOST_DEVICE output_offsetalator(void* data, data_type dtype)
: base_normalator<output_offsetalator, int64_t>(
dtype, dtype.id() == type_id::INT32 ? sizeof(int32_t) : sizeof(int64_t)),
p_{static_cast<char*>(data)}
{
#ifndef __CUDA_ARCH__
CUDF_EXPECTS(dtype.id() == type_id::INT32 || dtype.id() == type_id::INT64,
"Unexpected offsets type");
#else
cudf_assert((dtype.id() == type_id::INT32 || dtype.id() == type_id::INT64) &&
"Unexpected offsets type");
#endif
}
protected:
char* p_; /// pointer to the integer data in device memory
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/iterator.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief provides column input iterator with nulls replaced with a specified value
* @file iterator.cuh
*
* The column input iterator is designed to be used as an input
* iterator for thrust and cub.
*
* Usage:
* auto iter = make_null_replacement_iterator(column, null_value);
*
* The column input iterator returns only a scalar value of data at [id] or
* the null_replacement value passed while creating the iterator.
* For non-null column, use
* auto iter = column.begin<Element>();
*
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
#include <utility>
namespace cudf {
namespace detail {
/**
* @brief Convenience wrapper for creating a `thrust::transform_iterator` over a
* `thrust::counting_iterator`.
*
* Example:
* @code{.cpp}
* // Returns square of the value of the counting iterator
* auto iter = make_counting_transform_iterator(0, [](auto i){ return (i * i);});
* iter[0] == 0
* iter[1] == 1
* iter[2] == 4
* ...
* iter[n] == n * n
* @endcode
*
* @param start The starting value of the counting iterator
* @param f The unary function to apply to the counting iterator.
* @return A transform iterator that applies `f` to a counting iterator
*/
template <typename UnaryFunction>
CUDF_HOST_DEVICE inline auto make_counting_transform_iterator(cudf::size_type start,
UnaryFunction f)
{
return thrust::make_transform_iterator(thrust::make_counting_iterator(start), f);
}
/**
* @brief Value accessor of column that may have a null bitmask.
*
* This unary functor returns scalar value at `id`.
* The `operator()(cudf::size_type id)` computes the `element` and valid flag at `id`.
*
* The return value for element `i` will return `column[i]`
* if it is valid, or `null_replacement` if it is null.
*
* @tparam Element The type of elements in the column
*/
template <typename Element>
struct null_replaced_value_accessor {
column_device_view const col; ///< column view of column in device
Element const null_replacement{}; ///< value returned when element is null
bool const has_nulls; ///< true if col has null elements
/**
* @brief Creates an accessor for a null-replacement iterator.
*
* @throws cudf::logic_error if `col` type does not match Element type.
* @throws cudf::logic_error if `has_nulls` is true but `col` does not have a validity mask.
*
* @param[in] col column device view of cudf column
* @param[in] null_replacement The value to return for null elements
* @param[in] has_nulls Must be set to true if `col` has nulls.
*/
null_replaced_value_accessor(column_device_view const& col,
Element null_val,
bool has_nulls = true)
: col{col}, null_replacement{null_val}, has_nulls{has_nulls}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<Element>(col.type().id()),
"the data type mismatch");
if (has_nulls) CUDF_EXPECTS(col.nullable(), "column with nulls must have a validity bitmask");
}
__device__ inline Element const operator()(cudf::size_type i) const
{
return has_nulls && col.is_null_nocheck(i) ? null_replacement : col.element<Element>(i);
}
};
/**
* @brief validity accessor of column with null bitmask
* A unary functor that returns validity at index `i`.
*
* @tparam safe If false, the accessor will throw a logic_error if the column is not nullable. If
* true, the accessor checks for nullability and if col is not nullable, returns true.
*/
template <bool safe = false>
struct validity_accessor {
column_device_view const col;
/**
* @brief constructor
*
* @throws cudf::logic_error if not safe and `col` does not have a validity bitmask
*
* @param[in] _col column device view of cudf column
*/
CUDF_HOST_DEVICE validity_accessor(column_device_view const& _col) : col{_col}
{
if constexpr (not safe) {
// verify col is nullable, otherwise, is_valid_nocheck() will crash
#if defined(__CUDA_ARCH__)
cudf_assert(_col.nullable() && "Unexpected non-nullable column.");
#else
CUDF_EXPECTS(_col.nullable(), "Unexpected non-nullable column.");
#endif
}
}
__device__ inline bool operator()(cudf::size_type i) const
{
if constexpr (safe) {
return col.is_valid(i);
} else {
return col.is_valid_nocheck(i);
}
}
};
/**
* @brief Constructs an iterator over a column's values that replaces null
* elements with a specified value.
*
* Dereferencing the returned iterator for element `i` will return `column[i]`
* if it is valid, or `null_replacement` if it is null.
* This iterator is only allowed for both nullable and non-nullable columns.
*
* @throws cudf::logic_error if the column is not nullable.
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam Element The type of elements in the column
* @param column The column to iterate
* @param null_replacement The value to return for null elements
* @param has_nulls Must be set to true if `column` has nulls.
* @return Iterator that returns valid column elements, or a null
* replacement value for null elements.
*/
template <typename Element>
auto make_null_replacement_iterator(column_device_view const& column,
Element const null_replacement = Element{0},
bool has_nulls = true)
{
return make_counting_transform_iterator(
0, null_replaced_value_accessor<Element>{column, null_replacement, has_nulls});
}
/**
* @brief Constructs an optional iterator over a column's values and its validity.
*
* Dereferencing the returned iterator returns a `thrust::optional<Element>`.
*
* The element of this iterator contextually converts to bool. The conversion returns true
* if the object contains a value and false if it does not contain a value.
*
* Calling this function with `nullate::DYNAMIC` defers the assumption
* of nullability to runtime with the caller indicating if the column has nulls.
* This is useful when an algorithm is going to execute on multiple iterators and all
* the combinations of iterator types are not required at compile time.
*
* @code{.cpp}
* template<typename T>
* void some_function(cudf::column_view<T> const& col_view){
* auto d_col = cudf::column_device_view::create(col_view);
* // Create a `DYNAMIC` optional iterator
* auto optional_iterator =
* cudf::detail::make_optional_iterator<T>(
* d_col, cudf::nullate::DYNAMIC{col_view.has_nulls()});
* }
* @endcode
*
* Calling this function with `nullate::YES` means that the column supports
* nulls and the optional returned might not contain a value.
* Calling this function with `nullate::NO` means that the column has no
* null values and the optional returned will always contain a value.
*
* @code{.cpp}
* template<typename T, bool has_nulls>
* void some_function(cudf::column_view<T> const& col_view){
* auto d_col = cudf::column_device_view::create(col_view);
* if constexpr(has_nulls) {
* auto optional_iterator =
* cudf::detail::make_optional_iterator<T>(d_col, cudf::nullate::YES{});
* //use optional_iterator
* } else {
* auto optional_iterator =
* cudf::detail::make_optional_iterator<T>(d_col, cudf::nullate::NO{});
* //use optional_iterator
* }
* }
* @endcode
*
* @throws cudf::logic_error if the column is not nullable and `has_nulls` is true.
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam Element The type of elements in the column.
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*
* @param column The column to iterate
* @param has_nulls Indicates whether `column` is checked for nulls.
* @return Iterator that returns valid column elements and the validity of the
* element in a `thrust::optional`
*/
template <typename Element, typename Nullate>
auto make_optional_iterator(column_device_view const& column, Nullate has_nulls)
{
return column.optional_begin<Element, Nullate>(has_nulls);
}
/**
* @brief Constructs a pair iterator over a column's values and its validity.
*
* Dereferencing the returned iterator returns a `thrust::pair<Element, bool>`.
*
* If an element at position `i` is valid (or `has_nulls == false`), then for `p = *(iter + i)`,
* `p.first` contains the value of the element at `i` and `p.second == true`.
*
* Else, if the element at `i` is null, then the value of `p.first` is undefined and `p.second ==
* false`. `pair(column[i], validity)`. `validity` is `true` if `has_nulls=false`. `validity` is
* validity of the element at `i` if `has_nulls=true` and the column is nullable.
*
* @throws cudf::logic_error if the column is nullable.
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam Element The type of elements in the column
* @tparam has_nulls boolean indicating to treat the column is nullable
* @param column The column to iterate
* @return auto Iterator that returns valid column elements, and validity of the
* element in a pair
*/
template <typename Element, bool has_nulls = false>
auto make_pair_iterator(column_device_view const& column)
{
return column.pair_begin<Element, has_nulls>();
}
/**
* @brief Constructs a pair rep iterator over a column's representative values and its validity.
*
* Dereferencing the returned iterator returns a `thrust::pair<rep_type, bool>`,
* where `rep_type` is `device_storage_type<T>`, the type used to store
* the value on the device.
*
* If an element at position `i` is valid (or `has_nulls == false`), then for `p = *(iter + i)`,
* `p.first` contains the value of the element at `i` and `p.second == true`.
*
* Else, if the element at `i` is null, then the value of `p.first` is undefined and `p.second ==
* false`. `pair(column[i], validity)`. `validity` is `true` if `has_nulls=false`. `validity` is
* validity of the element at `i` if `has_nulls=true` and the column is nullable.
*
* @throws cudf::logic_error if the column is nullable.
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam Element The type of elements in the column
* @tparam has_nulls boolean indicating to treat the column is nullable
* @param column The column to iterate
* @return auto Iterator that returns valid column elements, and validity of the
* element in a pair
*/
template <typename Element, bool has_nulls = false>
auto make_pair_rep_iterator(column_device_view const& column)
{
return column.pair_rep_begin<Element, has_nulls>();
}
/**
* @brief Constructs an iterator over a column's validities.
*
* Dereferencing the returned iterator for element `i` will return the validity
* of `column[i]`
* If `safe` = false, the column must be nullable.
* When safe = true, if the column is not nullable then the validity is always true.
*
* @throws cudf::logic_error if the column is not nullable and safe = false
*
* @tparam safe If false, the accessor will throw a logic_error if the column is not nullable. If
* true, the accessor checks for nullability and if col is not nullable, returns true.
* @param column The column to iterate
* @return auto Iterator that returns validities of column elements.
*/
template <bool safe = false>
CUDF_HOST_DEVICE auto inline make_validity_iterator(column_device_view const& column)
{
return make_counting_transform_iterator(cudf::size_type{0}, validity_accessor<safe>{column});
}
/**
* @brief Constructs a constant device iterator over a scalar's validity.
*
* Dereferencing the returned iterator returns a `bool`.
*
* For `p = *(iter + i)`, `p` is the validity of the scalar.
*
* @tparam bool unused. This template parameter exists to enforce the same
* template interface as @ref make_validity_iterator(column_device_view const&).
* @param scalar_value The scalar to iterate
* @return auto Iterator that returns scalar validity
*/
template <bool safe = false>
auto inline make_validity_iterator(scalar const& scalar_value)
{
return thrust::make_constant_iterator(scalar_value.is_valid());
}
/**
* @brief value accessor for scalar with valid data.
* The unary functor returns data of Element type of the scalar.
*
* @throws `cudf::logic_error` if scalar datatype and Element type mismatch.
*
* @tparam Element The type of return type of functor
*/
template <typename Element>
struct scalar_value_accessor {
using ScalarType = scalar_type_t<Element>;
using ScalarDeviceType = scalar_device_type_t<Element>;
ScalarDeviceType const dscalar; ///< scalar device view
scalar_value_accessor(scalar const& scalar_value)
: dscalar(get_scalar_device_view(static_cast<ScalarType&>(const_cast<scalar&>(scalar_value))))
{
CUDF_EXPECTS(type_id_matches_device_storage_type<Element>(scalar_value.type().id()),
"the data type mismatch");
}
__device__ inline Element const operator()(size_type) const { return dscalar.value(); }
};
/**
* @brief Constructs a constant device iterator over a scalar's value.
*
* Dereferencing the returned iterator returns a `Element`.
*
* For `p = *(iter + i)`, `p` is the value stored in the scalar.
*
* The behavior is undefined if the scalar is destroyed before iterator dereferencing.
*
* @throws cudf::logic_error if scalar datatype and Element type mismatch.
* @throws cudf::logic_error if scalar is null.
* @throws cudf::logic_error if the returned iterator is dereferenced in host
*
* @tparam Element The type of element in the scalar
* @param scalar_value The scalar to iterate
* @return auto Iterator that returns scalar value
*/
template <typename Element>
auto inline make_scalar_iterator(scalar const& scalar_value)
{
CUDF_EXPECTS(data_type(type_to_id<Element>()) == scalar_value.type(), "the data type mismatch");
CUDF_EXPECTS(scalar_value.is_valid(), "the scalar value must be valid");
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_value_accessor<Element>{scalar_value});
}
/**
* @brief Optional accessor for a scalar
*
* The `scalar_optional_accessor` always returns a `thrust::optional` of the scalar.
* The validity of the optional is determined by the `Nullate` parameter which may
* be one of the following:
*
* - `nullate::YES` means that the scalar may be valid or invalid and the optional returned
* will contain a value only if the scalar is valid.
*
* - `nullate::NO` means the caller attests that the scalar will always be valid,
* no checks will occur and `thrust::optional{column[i]}` will return a value
* for each `i`.
*
* - `nullate::DYNAMIC` defers the assumption of nullability to runtime and the caller
* specifies if the scalar may be valid or invalid.
* For `DYNAMIC{true}` the return value will be a `thrust::optional{scalar}` when the
* scalar is valid and a `thrust::optional{}` when the scalar is invalid.
* For `DYNAMIC{false}` the return value will always be a `thrust::optional{scalar}`.
*
* @throws `cudf::logic_error` if scalar datatype and Element type mismatch.
*
* @tparam Element The type of return type of functor
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <typename Element, typename Nullate>
struct scalar_optional_accessor : public scalar_value_accessor<Element> {
using super_t = scalar_value_accessor<Element>;
using value_type = thrust::optional<Element>;
scalar_optional_accessor(scalar const& scalar_value, Nullate with_nulls)
: scalar_value_accessor<Element>(scalar_value), has_nulls{with_nulls}
{
}
__device__ inline value_type const operator()(size_type) const
{
if (has_nulls && !super_t::dscalar.is_valid()) { return value_type{thrust::nullopt}; }
if constexpr (cudf::is_fixed_point<Element>()) {
using namespace numeric;
using rep = typename Element::rep;
auto const value = super_t::dscalar.rep();
auto const scale = scale_type{super_t::dscalar.type().scale()};
return Element{scaled_integer<rep>{value, scale}};
} else {
return Element{super_t::dscalar.value()};
}
}
Nullate has_nulls{};
};
/**
* @brief pair accessor for scalar.
* The unary functor returns a pair of data of Element type and bool validity of the scalar.
*
* @throws `cudf::logic_error` if scalar datatype and Element type mismatch.
*
* @tparam Element The type of return type of functor
*/
template <typename Element>
struct scalar_pair_accessor : public scalar_value_accessor<Element> {
using super_t = scalar_value_accessor<Element>;
using value_type = thrust::pair<Element, bool>;
scalar_pair_accessor(scalar const& scalar_value) : scalar_value_accessor<Element>(scalar_value) {}
__device__ inline value_type const operator()(size_type) const
{
return {Element(super_t::dscalar.value()), super_t::dscalar.is_valid()};
}
};
/**
* @brief Utility to discard template type arguments.
*
* Substitute for std::void_t.
*
* @tparam T Ignored template parameter
*/
template <typename... T>
using void_t = void;
/**
* @brief Compile-time reflection to check if `Element` type has a `rep()` member.
*/
template <typename Element, typename = void>
struct has_rep_member : std::false_type {};
template <typename Element>
struct has_rep_member<Element, void_t<decltype(std::declval<Element>().rep())>> : std::true_type {};
/**
* @brief Pair accessor for scalar's representation value and validity.
*
* @tparam Element The type of element in the scalar.
*/
template <typename Element>
struct scalar_representation_pair_accessor : public scalar_value_accessor<Element> {
using base = scalar_value_accessor<Element>;
using rep_type = device_storage_type_t<Element>;
using value_type = thrust::pair<rep_type, bool>;
scalar_representation_pair_accessor(scalar const& scalar_value) : base(scalar_value) {}
__device__ inline value_type const operator()(size_type) const
{
return {get_rep(base::dscalar), base::dscalar.is_valid()};
}
private:
template <typename DeviceScalar,
std::enable_if_t<!has_rep_member<DeviceScalar>::value, void>* = nullptr>
__device__ inline rep_type get_rep(DeviceScalar const& dscalar) const
{
return dscalar.value();
}
template <typename DeviceScalar,
std::enable_if_t<has_rep_member<DeviceScalar>::value, void>* = nullptr>
__device__ inline rep_type get_rep(DeviceScalar const& dscalar) const
{
return dscalar.rep();
}
};
/**
* @brief Constructs an optional iterator over a scalar's values and its validity.
*
* Dereferencing the returned iterator returns a `thrust::optional<Element>`.
*
* The element of this iterator contextually converts to bool. The conversion returns true
* if the object contains a value and false if it does not contain a value.
*
* The iterator behavior is undefined if the scalar is destroyed before iterator dereferencing.
*
* Calling this function with `nullate::DYNAMIC` defers the assumption
* of nullability to runtime with the caller indicating if the scalar is valid.
*
* @code{.cpp}
* template<typename T>
* void some_function(cudf::column_view<T> const& col_view,
* scalar const& scalar_value,
* bool col_has_nulls){
* auto d_col = cudf::column_device_view::create(col_view);
* auto column_iterator = cudf::detail::make_optional_iterator<T>(
* d_col, cudf::nullate::DYNAMIC{col_has_nulls});
* auto scalar_iterator = cudf::detail::make_optional_iterator<T>(
* scalar_value, cudf::nullate::DYNAMIC{scalar_value.is_valid()});
* //use iterators
* }
* @endcode
*
* Calling this function with `nullate::YES` means that the scalar maybe invalid
* and the optional return might not contain a value.
* Calling this function with `nullate::NO` means that the scalar is valid
* and the optional returned will always contain a value.
*
* @code{.cpp}
* template<typename T, bool any_nulls>
* void some_function(cudf::column_view<T> const& col_view, scalar const& scalar_value){
* auto d_col = cudf::column_device_view::create(col_view);
* if constexpr(any_nulls) {
* auto column_iterator =
* cudf::detail::make_optional_iterator<T>(d_col, cudf::nullate::YES{});
* auto scalar_iterator =
* cudf::detail::make_optional_iterator<T>(scalar_value, cudf::nullate::YES{});
* //use iterators
* } else {
* auto column_iterator =
* cudf::detail::make_optional_iterator<T>(d_col, cudf::nullate::NO{});
* auto scalar_iterator =
* cudf::detail::make_optional_iterator<T>(scalar_value, cudf::nullate::NO{});
* //use iterators
* }
* }
* @endcode
*
* @throws cudf::logic_error if scalar datatype and Element type mismatch.
*
* @tparam Element The type of elements in the scalar
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*
* @param scalar_value The scalar to be returned by the iterator.
* @param has_nulls Indicates if the scalar value may be invalid.
* @return Iterator that returns scalar and the validity of the scalar in a thrust::optional
*/
template <typename Element, typename Nullate>
auto inline make_optional_iterator(scalar const& scalar_value, Nullate has_nulls)
{
CUDF_EXPECTS(type_id_matches_device_storage_type<Element>(scalar_value.type().id()),
"the data type mismatch");
return thrust::make_transform_iterator(
thrust::make_constant_iterator<size_type>(0),
scalar_optional_accessor<Element, Nullate>{scalar_value, has_nulls});
}
/**
* @brief Constructs a constant device pair iterator over a scalar's value and its validity.
*
* Dereferencing the returned iterator returns a `thrust::pair<Element, bool>`.
*
* If scalar is valid, then for `p = *(iter + i)`, `p.first` contains
* the value of the scalar and `p.second == true`.
*
* Else, if the scalar is null, then the value of `p.first` is undefined and `p.second == false`.
*
* The behavior is undefined if the scalar is destroyed before iterator dereferencing.
*
* @throws cudf::logic_error if scalar datatype and Element type mismatch.
* @throws cudf::logic_error if the returned iterator is dereferenced in host
*
* @tparam Element The type of elements in the scalar
* @tparam bool unused. This template parameter exists to enforce same
* template interface as @ref make_pair_iterator(column_device_view const&).
* @param scalar_value The scalar to iterate
* @return auto Iterator that returns scalar, and validity of the scalar in a pair
*/
template <typename Element, bool = false>
auto inline make_pair_iterator(scalar const& scalar_value)
{
CUDF_EXPECTS(type_id_matches_device_storage_type<Element>(scalar_value.type().id()),
"the data type mismatch");
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_pair_accessor<Element>{scalar_value});
}
/**
* @brief Constructs a constant device pair iterator over a scalar's representative value
* and its validity.
*
* Dereferencing the returned iterator returns a `thrust::pair<Element::rep, bool>`.
* E.g. For a valid `decimal32` row, a `thrust::pair<int32_t, bool>` is returned,
* with the value set to the `int32_t` representative value of the decimal,
* and validity `true`, indicating that the row is valid.
*
* If scalar is valid, then for `p = *(iter + i)`, `p.first` contains
* the representative value of the scalar and `p.second == true`.
*
* Else, if the scalar is null, then the value of `p.first` is undefined and `p.second == false`.
*
* The behavior is undefined if the scalar is destroyed before iterator dereferencing.
*
* @throws cudf::logic_error if scalar datatype and Element type mismatch.
* @throws cudf::logic_error if the returned iterator is dereferenced in host
*
* @tparam Element The type of elements in the scalar
* @tparam bool unused. This template parameter exists to enforce same
* template interface as @ref make_pair_iterator(column_device_view const&).
* @param scalar_value The scalar to iterate
* @return auto Iterator that returns scalar's representative value,
* and validity of the scalar in a pair
*/
template <typename Element, bool = false>
auto make_pair_rep_iterator(scalar const& scalar_value)
{
CUDF_EXPECTS(type_id_matches_device_storage_type<Element>(scalar_value.type().id()),
"the data type mismatch");
return make_counting_transform_iterator(
0, scalar_representation_pair_accessor<Element>{scalar_value});
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/unary.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
/**
* @brief Creates a column of `type_id::BOOL8` elements by applying a predicate to every element
* between
* [`begin, `end`) `true` indicates the value is satisfies the predicate and `false` indicates it
* doesn't.
*
* @tparam InputIterator Iterator type for `begin` and `end`
* @tparam Predicate A predicator type which will be evaluated
* @param begin Beginning of the sequence of elements
* @param end End of the sequence of elements
* @param p Predicate to be applied to each element in `[begin,end)`
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column of type `type_id::BOOL8,` with `true` representing predicate is satisfied.
*/
template <typename InputIterator, typename Predicate>
std::unique_ptr<column> true_if(InputIterator begin,
InputIterator end,
size_type size,
Predicate p,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output =
make_numeric_column(data_type(type_id::BOOL8), size, mask_state::UNALLOCATED, stream, mr);
auto output_mutable_view = output->mutable_view();
auto output_data = output_mutable_view.data<bool>();
thrust::transform(rmm::exec_policy(stream), begin, end, output_data, p);
return output;
}
/**
* @copydoc cudf::unary_operation
*/
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::is_valid
*/
std::unique_ptr<cudf::column> is_valid(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::cast
*/
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::is_nan
*/
std::unique_ptr<column> is_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::is_not_nan
*/
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/fill.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::fill_in_place
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void fill_in_place(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::fill
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/gather.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
enum class negative_index_policy : bool { ALLOWED, NOT_ALLOWED };
/**
* @brief Gathers the specified rows of a set of columns according to a gather map.
*
* Gathers the rows of the source columns according to `gather_map` such that row "i"
* in the resulting table's columns will contain row "gather_map[i]" from the source columns.
* The number of rows in the result table will be equal to the number of elements in
* `gather_map`.
*
* A negative value `i` in the `gather_map` is interpreted as `i+n`, where
* `n` is the number of rows in the `source_table`.
*
* @throws cudf::logic_error if `check_bounds == true` and an index exists in
* `gather_map` outside the range `[-n, n)`, where `n` is the number of rows in
* the source table. If `check_bounds == false`, the behavior is undefined.
*
* @param[in] source_table The input columns whose rows will be gathered
* @param[in] gather_map View into a non-nullable column of integral indices that maps the
* rows in the source columns to rows in the destination columns.
* @param[in] bounds_policy How to treat out-of-bounds indices. `NULLIFY` coerces rows that
* correspond to out-of-bounds indices in the gather map to be null elements. For better
* performance, use `DONT_CHECK` when the `gather_map` is known to contain only valid
* indices. If `policy` is set to `DONT_CHECK` and there are out-of-bounds indices in `gather_map`,
* the behavior is undefined.
* @param[in] negative_index_policy Interpret each negative index `i` in the
* `gather_map` as the positive index `i+num_source_rows`.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Result of the gather
*/
std::unique_ptr<table> gather(table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::detail::gather(table_view const&,column_view const&,table_view
* const&,cudf::out_of_bounds_policy,cudf::detail::negative_index_policy,rmm::cuda_stream_view,
* rmm::mr::device_memory_resource*)
*
* @throws cudf::logic_error if `gather_map` span size is larger than max of `size_type`.
*/
std::unique_ptr<table> gather(table_view const& source_table,
device_span<size_type const> const gather_map,
out_of_bounds_policy bounds_policy,
negative_index_policy neg_indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/concatenate_masks.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
namespace cudf {
//! Inner interfaces and implementations
namespace detail {
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @param d_views Column device views whose null masks will be concatenated
* @param d_offsets Prefix sum of sizes of elements of `d_views`
* @param dest_mask The output buffer to copy null masks into
* @param output_size The total number of null masks bits that are being copied
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return The number of nulls
*/
size_type concatenate_masks(device_span<column_device_view const> d_views,
device_span<size_t const> d_offsets,
bitmask_type* dest_mask,
size_type output_size,
rmm::cuda_stream_view stream);
/**
* @brief Concatenates `views[i]`'s bitmask from the bits
* `[views[i].offset(), views[i].offset() + views[i].size())` for all elements
* views[i] in views into a destination bitmask pointer.
*
* @param views Column views whose bitmasks will be concatenated
* @param dest_mask The output buffer to copy null masks into
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return The number of nulls
*/
size_type concatenate_masks(host_span<column_view const> views,
bitmask_type* dest_mask,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::concatenate_masks(host_span<column_view const>, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/null_mask.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <cub/block/block_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <optional>
#include <vector>
namespace cudf {
namespace detail {
/**
* @brief Computes the merger of an array of bitmasks using a binary operator
*
* @tparam block_size Number of threads in each thread block
* @tparam Binop Type of binary operator
*
* @param op The binary operator used to combine the bitmasks
* @param destination The bitmask to write result into
* @param source Array of source mask pointers. All masks must be of same size
* @param source_begin_bits Array of offsets into corresponding @p source masks.
* Must be same size as source array
* @param source_size_bits Number of bits in each mask in @p source
* @param count_ptr Pointer to counter of set bits
*/
template <int block_size, typename Binop>
__global__ void offset_bitmask_binop(Binop op,
device_span<bitmask_type> destination,
device_span<bitmask_type const* const> source,
device_span<size_type const> source_begin_bits,
size_type source_size_bits,
size_type* count_ptr)
{
auto const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const last_bit_index = source_size_bits - 1;
auto const last_word_index = cudf::word_index(last_bit_index);
size_type thread_count = 0;
for (size_type destination_word_index = tid; destination_word_index < destination.size();
destination_word_index += blockDim.x * gridDim.x) {
bitmask_type destination_word =
detail::get_mask_offset_word(source[0],
destination_word_index,
source_begin_bits[0],
source_begin_bits[0] + source_size_bits);
for (size_type i = 1; i < source.size(); i++) {
destination_word = op(destination_word,
detail::get_mask_offset_word(source[i],
destination_word_index,
source_begin_bits[i],
source_begin_bits[i] + source_size_bits));
}
if (destination_word_index == last_word_index) {
// mask out any bits not part of this word
auto const num_bits_in_last_word = intra_word_index(last_bit_index);
if (num_bits_in_last_word <
static_cast<size_type>(detail::size_in_bits<bitmask_type>() - 1)) {
destination_word &= set_least_significant_bits(num_bits_in_last_word + 1);
}
}
destination[destination_word_index] = destination_word;
thread_count += __popc(destination_word);
}
using BlockReduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
size_type block_count = BlockReduce(temp_storage).Sum(thread_count);
if (threadIdx.x == 0) { atomicAdd(count_ptr, block_count); }
}
/**
* @copydoc bitmask_binop(Binop op, host_span<bitmask_type const* const>, host_span<size_type>
* const, size_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
template <typename Binop>
std::pair<rmm::device_buffer, size_type> bitmask_binop(Binop op,
host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dest_mask = rmm::device_buffer{bitmask_allocation_size_bytes(mask_size_bits), stream, mr};
auto null_count =
mask_size_bits -
inplace_bitmask_binop(op,
device_span<bitmask_type>(static_cast<bitmask_type*>(dest_mask.data()),
num_bitmask_words(mask_size_bits)),
masks,
masks_begin_bits,
mask_size_bits,
stream);
return std::pair(std::move(dest_mask), null_count);
}
/**
* @brief Performs a merge of the specified bitmasks using the binary operator
* provided, writes in place to destination and returns count of set bits
*
* @param[in] op The binary operator used to combine the bitmasks
* @param[out] dest_mask Destination to which the merged result is written
* @param[in] masks The list of data pointers of the bitmasks to be merged
* @param[in] masks_begin_bits The bit offsets from which each mask is to be merged
* @param[in] mask_size_bits The number of bits to be ANDed in each mask
* @param[in] stream CUDA stream used for device memory operations and kernel launches
* @return size_type Count of set bits
*/
template <typename Binop>
size_type inplace_bitmask_binop(Binop op,
device_span<bitmask_type> dest_mask,
host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(
std::all_of(masks_begin_bits.begin(), masks_begin_bits.end(), [](auto b) { return b >= 0; }),
"Invalid range.");
CUDF_EXPECTS(mask_size_bits > 0, "Invalid bit range.");
CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }),
"Mask pointer cannot be null");
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource();
rmm::device_scalar<size_type> d_counter{0, stream, mr};
rmm::device_uvector<bitmask_type const*> d_masks(masks.size(), stream, mr);
rmm::device_uvector<size_type> d_begin_bits(masks_begin_bits.size(), stream, mr);
CUDF_CUDA_TRY(cudaMemcpyAsync(
d_masks.data(), masks.data(), masks.size_bytes(), cudaMemcpyDefault, stream.value()));
CUDF_CUDA_TRY(cudaMemcpyAsync(d_begin_bits.data(),
masks_begin_bits.data(),
masks_begin_bits.size_bytes(),
cudaMemcpyDefault,
stream.value()));
auto constexpr block_size = 256;
cudf::detail::grid_1d config(dest_mask.size(), block_size);
offset_bitmask_binop<block_size>
<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
op, dest_mask, d_masks, d_begin_bits, mask_size_bits, d_counter.data());
CUDF_CHECK_CUDA(stream.value());
return d_counter.value(stream);
}
/**
* @brief Enum indicating whether to count unset (0) bits or set (1) bits.
*/
enum class count_bits_policy : bool {
UNSET_BITS, /// Count unset (0) bits
SET_BITS /// Count set (1) bits
};
/**
* For each range `[first_bit_indices[i], last_bit_indices[i])`
* (where 0 <= i < `num_ranges`), count the number of bits set outside the range
* in the boundary words (i.e. words that include either the first or last bit)
* and subtract the count from the range's null count.
*
* Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`.
*
* @param[in] bitmask The bitmask whose non-zero bits outside the range in the
* boundary words will be counted.
* @param[in] num_ranges The number of ranges.
* @param[in] first_bit_indices Random-access input iterator to the sequence of indices (inclusive)
* of the first bit in each range.
* @param[in] last_bit_indices Random-access input iterator to the sequence of indices (exclusive)
* of the last bit in each range.
* @param[in,out] null_counts Random-access input/output iterator where the number of non-zero bits
* in each range is updated.
*/
template <typename OffsetIterator, typename OutputIterator>
__global__ void subtract_set_bits_range_boundaries_kernel(bitmask_type const* bitmask,
size_type num_ranges,
OffsetIterator first_bit_indices,
OffsetIterator last_bit_indices,
OutputIterator null_counts)
{
constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()};
size_type const tid = threadIdx.x + blockIdx.x * blockDim.x;
size_type range_id = tid;
while (range_id < num_ranges) {
size_type const first_bit_index = *(first_bit_indices + range_id);
size_type const last_bit_index = *(last_bit_indices + range_id);
size_type delta = 0;
// Compute delta due to the preceding bits in the first word in the range.
size_type const first_num_slack_bits = intra_word_index(first_bit_index);
if (first_num_slack_bits > 0) {
bitmask_type const word = bitmask[word_index(first_bit_index)];
bitmask_type const slack_mask = set_least_significant_bits(first_num_slack_bits);
delta -= __popc(word & slack_mask);
}
// Compute delta due to the following bits in the last word in the range.
size_type const last_num_slack_bits = (last_bit_index % word_size_in_bits) == 0
? 0
: word_size_in_bits - intra_word_index(last_bit_index);
if (last_num_slack_bits > 0) {
bitmask_type const word = bitmask[word_index(last_bit_index)];
bitmask_type const slack_mask = set_most_significant_bits(last_num_slack_bits);
delta -= __popc(word & slack_mask);
}
// Update the null count with the computed delta.
size_type updated_null_count = *(null_counts + range_id) + delta;
*(null_counts + range_id) = updated_null_count;
range_id += blockDim.x * gridDim.x;
}
}
/**
* @brief Functor that converts bit segment indices to word segment indices.
*
* Converts [first_bit_index, last_bit_index) to [first_word_index,
* last_word_index). The flag `inclusive` indicates whether the indices are inclusive or exclusive.
* the end of a segment, in which case the word index should be incremented for
* bits at the start of a word.
*/
struct bit_to_word_index {
bit_to_word_index(bool inclusive) : inclusive(inclusive) {}
__device__ inline size_type operator()(size_type const& bit_index) const
{
return word_index(bit_index) + ((inclusive || intra_word_index(bit_index) == 0) ? 0 : 1);
}
bool const inclusive;
};
struct popc {
__device__ inline size_type operator()(bitmask_type word) const { return __popc(word); }
};
// Count set/unset bits in a segmented null mask, using offset iterators accessible by the device.
template <typename OffsetIterator>
rmm::device_uvector<size_type> segmented_count_bits(bitmask_type const* bitmask,
OffsetIterator first_bit_indices_begin,
OffsetIterator first_bit_indices_end,
OffsetIterator last_bit_indices_begin,
count_bits_policy count_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_ranges =
static_cast<size_type>(std::distance(first_bit_indices_begin, first_bit_indices_end));
rmm::device_uvector<size_type> d_bit_counts(num_ranges, stream);
auto num_set_bits_in_word = thrust::make_transform_iterator(bitmask, popc{});
auto first_word_indices =
thrust::make_transform_iterator(first_bit_indices_begin, bit_to_word_index{true});
auto last_word_indices =
thrust::make_transform_iterator(last_bit_indices_begin, bit_to_word_index{false});
// Allocate temporary memory.
size_t temp_storage_bytes{0};
CUDF_CUDA_TRY(cub::DeviceSegmentedReduce::Sum(nullptr,
temp_storage_bytes,
num_set_bits_in_word,
d_bit_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
// Perform segmented reduction.
CUDF_CUDA_TRY(cub::DeviceSegmentedReduce::Sum(d_temp_storage.data(),
temp_storage_bytes,
num_set_bits_in_word,
d_bit_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
// Adjust counts in segment boundaries (if segments are not word-aligned).
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_ranges, block_size);
subtract_set_bits_range_boundaries_kernel<<<grid.num_blocks,
grid.num_threads_per_block,
0,
stream.value()>>>(
bitmask, num_ranges, first_bit_indices_begin, last_bit_indices_begin, d_bit_counts.begin());
if (count_bits == count_bits_policy::UNSET_BITS) {
// Convert from set bits counts to unset bits by subtracting the number of
// set bits from the length of the segment.
auto segments_begin =
thrust::make_zip_iterator(first_bit_indices_begin, last_bit_indices_begin);
auto segment_length_iterator =
thrust::transform_iterator(segments_begin, [] __device__(auto const& segment) {
auto const begin = thrust::get<0>(segment);
auto const end = thrust::get<1>(segment);
return end - begin;
});
thrust::transform(rmm::exec_policy(stream),
segment_length_iterator,
segment_length_iterator + num_ranges,
d_bit_counts.data(),
d_bit_counts.data(),
[] __device__(auto segment_size, auto segment_bit_count) {
return segment_size - segment_bit_count;
});
}
CUDF_CHECK_CUDA(stream.value());
return d_bit_counts;
}
/**
* @brief Given two iterators, validate that the iterators represent valid ranges of
* indices and return the number of ranges.
*
* @throws cudf::logic_error if `std::distance(indices_begin, indices_end) % 2 != 0`
* @throws cudf::logic_error if `indices_begin[2*i] < 0 or indices_begin[2*i] >
* indices_begin[(2*i)+1]`
*
* @param indices_begin An iterator representing the beginning of the ranges of indices
* @param indices_end An iterator representing the end of the ranges of indices
*
* @return The number of segments specified by the input iterators.
*/
template <typename IndexIterator>
size_type validate_segmented_indices(IndexIterator indices_begin, IndexIterator indices_end)
{
auto const num_indices = static_cast<size_type>(std::distance(indices_begin, indices_end));
CUDF_EXPECTS(num_indices % 2 == 0, "Array of indices needs to have an even number of elements.");
size_type const num_segments = num_indices / 2;
for (size_type i = 0; i < num_segments; i++) {
auto begin = indices_begin[2 * i];
auto end = indices_begin[2 * i + 1];
CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative.");
CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index.");
}
return num_segments;
}
struct index_alternator {
__device__ inline size_type operator()(size_type const& i) const
{
return *(d_indices + 2 * i + (is_end ? 1 : 0));
}
bool const is_end = false;
size_type const* d_indices;
};
/**
* @brief Given a bitmask, counts the number of set (1) or unset (0) bits in every range
* `[indices_begin[2*i], indices_begin[(2*i)+1])` (where 0 <= i < std::distance(indices_begin,
* indices_end) / 2).
*
* If `bitmask == nullptr`, this function returns a vector containing the
* segment lengths, or a vector of zeros if counting unset bits.
*
* @throws cudf::logic_error if `bitmask == nullptr`.
* @throws cudf::logic_error if `std::distance(indices_begin, indices_end) % 2 != 0`.
* @throws cudf::logic_error if `indices_begin[2*i] < 0 or indices_begin[2*i] >
* indices_begin[(2*i)+1]`.
*
* @param bitmask Bitmask residing in device memory whose bits will be counted.
* @param indices_begin An iterator representing the beginning of the range of indices specifying
* ranges to count the number of set/unset bits within.
* @param indices_end An iterator representing the end of the range of indices specifying ranges to
* count the number of set/unset bits within.
* @param count_bits If SET_BITS, count set (1) bits. If UNSET_BITS, count unset (0) bits.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return A vector storing the number of non-zero bits in the specified ranges
*/
template <typename IndexIterator>
std::vector<size_type> segmented_count_bits(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
count_bits_policy count_bits,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(bitmask != nullptr, "Invalid bitmask.");
auto const num_segments = validate_segmented_indices(indices_begin, indices_end);
// Return an empty vector if there are zero segments.
if (num_segments == 0) { return std::vector<size_type>{}; }
// Construct a contiguous host buffer of indices and copy to device.
auto const h_indices = std::vector<size_type>(indices_begin, indices_end);
auto const d_indices =
make_device_uvector_async(h_indices, stream, rmm::mr::get_current_device_resource());
// Compute the bit counts over each segment.
auto first_bit_indices_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), index_alternator{false, d_indices.data()});
auto const first_bit_indices_end = first_bit_indices_begin + num_segments;
auto last_bit_indices_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), index_alternator{true, d_indices.data()});
rmm::device_uvector<size_type> d_bit_counts =
cudf::detail::segmented_count_bits(bitmask,
first_bit_indices_begin,
first_bit_indices_end,
last_bit_indices_begin,
count_bits,
stream,
rmm::mr::get_current_device_resource());
// Copy the results back to the host.
return make_std_vector_sync(d_bit_counts, stream);
}
// Count non-zero bits in the specified ranges.
template <typename IndexIterator>
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
return detail::segmented_count_bits(
bitmask, indices_begin, indices_end, count_bits_policy::SET_BITS, stream);
}
// Count zero bits in the specified ranges.
template <typename IndexIterator>
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
return detail::segmented_count_bits(
bitmask, indices_begin, indices_end, count_bits_policy::UNSET_BITS, stream);
}
// Count valid elements in the specified ranges of a validity bitmask.
template <typename IndexIterator>
std::vector<size_type> segmented_valid_count(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
if (bitmask == nullptr) {
// Return a vector of segment lengths.
auto const num_segments = validate_segmented_indices(indices_begin, indices_end);
auto ret = std::vector<size_type>(num_segments, 0);
for (size_type i = 0; i < num_segments; i++) {
ret[i] = indices_begin[2 * i + 1] - indices_begin[2 * i];
}
return ret;
}
return detail::segmented_count_set_bits(bitmask, indices_begin, indices_end, stream);
}
// Count null elements in the specified ranges of a validity bitmask.
template <typename IndexIterator>
std::vector<size_type> segmented_null_count(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
if (bitmask == nullptr) {
// Return a vector of zeros.
auto const num_segments = validate_segmented_indices(indices_begin, indices_end);
return std::vector<size_type>(num_segments, 0);
}
return detail::segmented_count_unset_bits(bitmask, indices_begin, indices_end, stream);
}
/**
* @brief Create an output null mask whose validity is determined by the
* validity of any/all elements of segments of an input null mask.
*
* @tparam OffsetIterator Random-access input iterator type.
* @param bitmask Null mask residing in device memory whose segments will be reduced into a new
* mask.
* @param first_bit_indices_begin Random-access input iterator to the beginning of a sequence of
* indices of the first bit in each segment (inclusive).
* @param first_bit_indices_end Random-access input iterator to the end of a sequence of indices of
* the first bit in each segment (inclusive).
* @param last_bit_indices_begin Random-access input iterator to the beginning of a sequence of
* indices of the last bit in each segment (exclusive).
* @param null_handling If `null_policy::INCLUDE`, all elements in a segment must be valid for the
* reduced value to be valid. If `null_policy::EXCLUDE`, the reduction is valid if any element in
* the segment is valid.
* @param valid_initial_value Indicates whether a valid initial value was provided to the reduction.
* True indicates a valid initial value, false indicates a null initial value, and null indicates no
* initial value was provided.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned buffer's device memory.
* @return A pair containing the reduced null mask and number of nulls.
*/
template <typename OffsetIterator>
std::pair<rmm::device_buffer, size_type> segmented_null_mask_reduction(
bitmask_type const* bitmask,
OffsetIterator first_bit_indices_begin,
OffsetIterator first_bit_indices_end,
OffsetIterator last_bit_indices_begin,
null_policy null_handling,
std::optional<bool> valid_initial_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const segments_begin =
thrust::make_zip_iterator(first_bit_indices_begin, last_bit_indices_begin);
auto const segment_length_iterator =
thrust::make_transform_iterator(segments_begin, [] __device__(auto const& segment) {
auto const begin = thrust::get<0>(segment);
auto const end = thrust::get<1>(segment);
return end - begin;
});
auto const num_segments =
static_cast<size_type>(std::distance(first_bit_indices_begin, first_bit_indices_end));
if (bitmask == nullptr) {
return cudf::detail::valid_if(
segment_length_iterator,
segment_length_iterator + num_segments,
[valid_initial_value] __device__(auto const& length) {
return valid_initial_value.value_or(length > 0);
},
stream,
mr);
}
auto const segment_valid_counts =
cudf::detail::segmented_count_bits(bitmask,
first_bit_indices_begin,
first_bit_indices_end,
last_bit_indices_begin,
cudf::detail::count_bits_policy::SET_BITS,
stream,
rmm::mr::get_current_device_resource());
auto const length_and_valid_count =
thrust::make_zip_iterator(segment_length_iterator, segment_valid_counts.begin());
return cudf::detail::valid_if(
length_and_valid_count,
length_and_valid_count + num_segments,
[null_handling, valid_initial_value] __device__(auto const& length_and_valid_count) {
auto const length = thrust::get<0>(length_and_valid_count);
auto const valid_count = thrust::get<1>(length_and_valid_count);
return (null_handling == null_policy::EXCLUDE)
? (valid_initial_value.value_or(false) || valid_count > 0)
: (valid_initial_value.value_or(length > 0) && valid_count == length);
},
stream,
mr);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/null_mask.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <vector>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::create_null_mask(size_type, mask_state, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer create_null_mask(size_type size,
mask_state state,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::set_null_mask(bitmask_type*, size_type, size_type, bool)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void set_null_mask(bitmask_type* bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream);
/**
* @brief Given a bitmask, counts the number of set (1) bits in the range
* `[start, stop)`.
*
* @throws cudf::logic_error if `bitmask == nullptr`
* @throws cudf::logic_error if `start > stop`
* @throws cudf::logic_error if `start < 0`
*
* @param bitmask Bitmask residing in device memory whose bits will be counted.
* @param start Index of the first bit to count (inclusive).
* @param stop Index of the last bit to count (exclusive).
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return The number of non-zero bits in the specified range.
*/
cudf::size_type count_set_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream);
/**
* @brief Given a bitmask, counts the number of unset (0) bits in the range
* `[start, stop)`.
*
* @throws cudf::logic_error if `bitmask == nullptr`
* @throws cudf::logic_error if `start > stop`
* @throws cudf::logic_error if `start < 0`
*
* @param bitmask Bitmask residing in device memory whose bits will be counted.
* @param start Index of the first bit to count (inclusive).
* @param stop Index of the last bit to count (exclusive).
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return The number of zero bits in the specified range.
*/
cudf::size_type count_unset_bits(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream);
/**
* @brief Given a bitmask, counts the number of set (1) bits in every range
* `[indices[2*i], indices[(2*i)+1])` (where 0 <= i < indices.size() / 2).
*
* @throws cudf::logic_error if `bitmask == nullptr`
* @throws cudf::logic_error if `indices.size() % 2 != 0`
* @throws cudf::logic_error if `indices[2*i] < 0 or indices[2*i] > indices[(2*i)+1]`
*
* @param[in] bitmask Bitmask residing in device memory whose bits will be counted.
* @param[in] indices A host_span of indices specifying ranges to count the number of set bits.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @return A vector storing the number of non-zero bits in the specified ranges.
*/
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @brief Given a bitmask, counts the number of unset (0) bits in every range
* `[indices[2*i], indices[(2*i)+1])` (where 0 <= i < indices.size() / 2).
*
* @throws cudf::logic_error if `bitmask == nullptr`
* @throws cudf::logic_error if `indices.size() % 2 != 0`
* @throws cudf::logic_error if `indices[2*i] < 0 or indices[2*i] > indices[(2*i)+1]`
*
* @param[in] bitmask Bitmask residing in device memory whose bits will be counted.
* @param[in] indices A host_span of indices specifying ranges to count the number of unset bits.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @return A vector storing the number of zero bits in the specified ranges.
*/
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @brief Given a validity bitmask, counts the number of valid elements (set bits)
* in the range `[start, stop)`.
*
* If `bitmask == nullptr`, all elements are assumed to be valid and the
* function returns `stop-start`.
*
* @throws cudf::logic_error if `start > stop`
* @throws cudf::logic_error if `start < 0`
*
* @param[in] bitmask Validity bitmask residing in device memory.
* @param[in] start Index of the first bit to count (inclusive).
* @param[in] stop Index of the last bit to count (exclusive).
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @return The number of valid elements in the specified range.
*/
cudf::size_type valid_count(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream);
/**
* @copydoc null_count(bitmask_type const* bitmask, size_type start, size_type stop)
*
* @param stream Stream view on which to allocate resources and queue execution.
*/
cudf::size_type null_count(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream);
/**
* @brief Given a validity bitmask, counts the number of valid elements (set
* bits) in every range `[indices[2*i], indices[(2*i)+1])` (where 0 <= i <
* indices.size() / 2).
*
* If `bitmask == nullptr`, all elements are assumed to be valid and a vector of
* length `indices.size()` containing segment lengths is returned.
*
* @throws cudf::logic_error if `indices.size() % 2 != 0`.
* @throws cudf::logic_error if `indices[2*i] < 0 or indices[2*i] > indices[(2*i)+1]`.
*
* @param[in] bitmask Validity bitmask residing in device memory.
* @param[in] indices A host_span of indices specifying ranges to count the number of valid
* elements.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @return A vector storing the number of valid elements in each specified range.
*/
std::vector<size_type> segmented_valid_count(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @brief Given a validity bitmask, counts the number of null elements (unset
* bits) in every range `[indices[2*i], indices[(2*i)+1])` (where 0 <= i <
* indices.size() / 2).
*
* If `bitmask == nullptr`, all elements are assumed to be valid and a vector of
* length `indices.size()` containing all zeros is returned.
*
* @throws cudf::logic_error if `indices.size() % 2 != 0`
* @throws cudf::logic_error if `indices[2*i] < 0 or indices[2*i] > indices[(2*i)+1]`
*
* @param[in] bitmask Validity bitmask residing in device memory.
* @param[in] indices A host_span of indices specifying ranges to count the number of null elements.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @return A vector storing the number of null elements in each specified range.
*/
std::vector<size_type> segmented_null_count(bitmask_type const* bitmask,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::copy_bitmask(bitmask_type const*, size_type, size_type,
*rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer copy_bitmask(bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::copy_bitmask(column_view const& view, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
rmm::device_buffer copy_bitmask(column_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc bitmask_and(host_span<bitmask_type const* const>, host_span<size_type> const,
* size_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::pair<rmm::device_buffer, size_type> bitmask_and(host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::bitmask_and
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<rmm::device_buffer, size_type> bitmask_and(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::bitmask_or
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<rmm::device_buffer, size_type> bitmask_or(table_view const& view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Performs a bitwise AND of the specified bitmasks,
* and writes in place to destination
*
* @param dest_mask Destination to which the AND result is written
* @param masks The list of data pointers of the bitmasks to be ANDed
* @param masks_begin_bits The bit offsets from which each mask is to be ANDed
* @param mask_size_bits The number of bits to be ANDed in each mask
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Count of set bits
*/
cudf::size_type inplace_bitmask_and(device_span<bitmask_type> dest_mask,
host_span<bitmask_type const* const> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream);
/**
* @brief Recursively set valid null masks for all children.
*
* This function applies all valid null masks to the output column if input column satisfies
* `nullable() == true` condition
*
* @param input input column to check for nullability
* @param output output column to mirror nullability of input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*/
void set_all_valid_null_masks(column_view const& input,
column& output,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/copy_range.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <cub/cub.cuh>
#include <cuda_runtime.h>
#include <memory>
namespace {
template <cudf::size_type block_size,
typename SourceValueIterator,
typename SourceValidityIterator,
typename T,
bool has_validity>
__global__ void copy_range_kernel(SourceValueIterator source_value_begin,
SourceValidityIterator source_validity_begin,
cudf::mutable_column_device_view target,
cudf::size_type target_begin,
cudf::size_type target_end,
cudf::size_type* __restrict__ const null_count)
{
using cudf::detail::warp_size;
static_assert(block_size <= 1024, "copy_range_kernel assumes block_size is not larger than 1024");
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"copy_range_kernel assumes bitmask element size in bits == warp size");
constexpr cudf::size_type leader_lane{0};
int const lane_id = threadIdx.x % warp_size;
cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x;
int const warp_id = tid / warp_size;
cudf::size_type const offset = target.offset();
cudf::size_type const begin_mask_idx = cudf::word_index(offset + target_begin);
cudf::size_type const end_mask_idx = cudf::word_index(offset + target_end);
cudf::size_type mask_idx = begin_mask_idx + warp_id;
cudf::size_type const masks_per_grid = gridDim.x * blockDim.x / warp_size;
cudf::size_type target_offset = begin_mask_idx * warp_size - (offset + target_begin);
cudf::size_type source_idx = tid + target_offset;
cudf::size_type warp_null_change{0};
while (mask_idx <= end_mask_idx) {
cudf::size_type index = mask_idx * warp_size + lane_id - offset;
bool in_range = (index >= target_begin && index < target_end);
// write data
if (in_range) target.element<T>(index) = *(source_value_begin + source_idx);
if (has_validity) { // update bitmask
bool const valid = in_range && *(source_validity_begin + source_idx);
int const active_mask = __ballot_sync(0xFFFF'FFFFu, in_range);
int const valid_mask = __ballot_sync(0xFFFF'FFFFu, valid);
int const warp_mask = active_mask & valid_mask;
cudf::bitmask_type old_mask = target.get_mask_word(mask_idx);
if (lane_id == leader_lane) {
cudf::bitmask_type new_mask = (old_mask & ~active_mask) | warp_mask;
target.set_mask_word(mask_idx, new_mask);
warp_null_change += __popc(active_mask & old_mask) - __popc(active_mask & new_mask);
}
}
source_idx += blockDim.x * gridDim.x;
mask_idx += masks_per_grid;
}
if (has_validity) {
auto block_null_change =
cudf::detail::single_lane_block_sum_reduce<block_size, leader_lane>(warp_null_change);
if (threadIdx.x == 0) { // if the first thread in a block
atomicAdd(null_count, block_null_change);
}
}
}
} // namespace
namespace cudf {
namespace detail {
/**
* @brief Internal API to copy a range of values from source iterators to a
* target column.
*
* The elements indicated by the indices [@p target_begin, @p target_end) were
* replaced with the elements retrieved from source iterators;
* *(@p source_value_begin + idx) if *(@p source_validity_begin + idx) is true,
* invalidate otherwise (where idx = [0, @p target_end - @p target_begin)).
* @p target is modified in place.
*
* @tparam SourceValueIterator Iterator for retrieving source values
* @tparam SourceValidityIterator Iterator for retrieving source validities
* @param source_value_begin Start of source value iterator
* @param source_validity_begin Start of source validity iterator
* @param target the column to copy into
* @param target_begin The starting index of the target range (inclusive)
* @param target_end The index of the last element in the target range
* (exclusive)
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename SourceValueIterator, typename SourceValidityIterator>
void copy_range(SourceValueIterator source_value_begin,
SourceValidityIterator source_validity_begin,
mutable_column_view& target,
size_type target_begin,
size_type target_end,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS((target_begin <= target_end) && (target_begin >= 0) &&
(target_begin < target.size()) && (target_end <= target.size()),
"Range is out of bounds.");
using T = typename std::iterator_traits<SourceValueIterator>::value_type;
// this code assumes that source and target have the same type.
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(target.type().id()), "data type mismatch");
auto warp_aligned_begin_lower_bound = cudf::util::round_down_safe(target_begin, warp_size);
auto warp_aligned_end_upper_bound = cudf::util::round_up_safe(target_end, warp_size);
auto num_items = warp_aligned_end_upper_bound - warp_aligned_begin_lower_bound;
constexpr size_type block_size{256};
auto grid = cudf::detail::grid_1d{num_items, block_size, 1};
if (target.nullable()) {
rmm::device_scalar<size_type> null_count(target.null_count(), stream);
auto kernel =
copy_range_kernel<block_size, SourceValueIterator, SourceValidityIterator, T, true>;
kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
source_value_begin,
source_validity_begin,
*mutable_column_device_view::create(target, stream),
target_begin,
target_end,
null_count.data());
target.set_null_count(null_count.value(stream));
} else {
auto kernel =
copy_range_kernel<block_size, SourceValueIterator, SourceValidityIterator, T, false>;
kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
source_value_begin,
source_validity_begin,
*mutable_column_device_view::create(target, stream),
target_begin,
target_end,
nullptr);
}
CUDF_CHECK_CUDA(stream.value());
}
/**
* @copydoc cudf::copy_range_in_place
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::copy_range
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return std::unique_ptr<column> The result target column
*/
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/search.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
namespace cudf::detail {
/**
* @copydoc cudf::lower_bound
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> lower_bound(table_view const& haystack,
table_view const& needles,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::upper_bound
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> upper_bound(table_view const& haystack,
table_view const& needles,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::contains(column_view const&, scalar const&, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream);
/**
* @copydoc cudf::contains(column_view const&, column_view const&, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Check if rows in the given `needles` table exist in the `haystack` table.
*
* Given two tables, each row in the `needles` table is checked to see if there is any matching row
* (i.e., compared equal to it) in the `haystack` table. The boolean search results are written into
* the corresponding rows of the output array.
*
* @code{.pseudo}
* Example:
*
* haystack = { { 5, 4, 1, 2, 3 } }
* needles = { { 0, 1, 2 } }
* output = { false, true, true }
* @endcode
*
* @throws cudf::logic_error If column types of haystack and needles don't match
*
* @param haystack The table containing the search space
* @param needles A table of rows whose existence to check in the search space
* @param compare_nulls Control whether nulls should be compared as equal or not
* @param compare_nans Control whether floating-point NaNs values should be compared as equal or not
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned vector
* @return A vector of bools indicating if each row in `needles` has matching rows in `haystack`
*/
rmm::device_uvector<bool> contains(table_view const& haystack,
table_view const& needles,
null_equality compare_nulls,
nan_equality compare_nans,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/stream_compaction.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::drop_nulls(table_view const&, std::vector<size_type> const&,
* cudf::size_type, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> drop_nulls(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::drop_nans(table_view const&, std::vector<size_type> const&,
* cudf::size_type, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> drop_nans(table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::apply_boolean_mask
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> apply_boolean_mask(table_view const& input,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::unique
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> unique(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::distinct
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> distinct(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::stable_distinct
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> stable_distinct(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Create a column of indices of all distinct rows in the input table.
*
* Given an `input` table_view, an output vector of all row indices of the distinct rows is
* generated. If there are duplicate rows, which index is kept depends on the `keep` parameter.
*
* @param input The input table
* @param keep Get index of any, first, last, or none of the found duplicates
* @param nulls_equal Flag to specify whether null elements should be considered as equal
* @param nans_equal Flag to specify whether NaN elements should be considered as equal
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned vector
* @return A device_uvector containing the result indices
*/
rmm::device_uvector<size_type> get_distinct_indices(table_view const& input,
duplicate_keep_option keep,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::unique_count(column_view const&, null_policy, nan_policy)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::unique_count(table_view const&, null_equality)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
cudf::size_type unique_count(table_view const& input,
null_equality nulls_equal,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::distinct_count(column_view const&, null_policy, nan_policy)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
cudf::size_type distinct_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::distinct_count(table_view const&, null_equality)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
cudf::size_type distinct_count(table_view const& input,
null_equality nulls_equal,
rmm::cuda_stream_view stream);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/reshape.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::tile
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::unique_ptr<table> tile(table_view const& input,
size_type count,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::interleave_columns
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::unique_ptr<column> interleave_columns(table_view const& input,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/join.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/hashing.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <cuco/static_multimap.cuh>
#include <cstddef>
#include <memory>
#include <optional>
// Forward declaration
template <typename T>
class default_allocator;
namespace cudf::experimental::row::equality {
class preprocessed_table;
}
namespace cudf {
namespace detail {
constexpr int DEFAULT_JOIN_CG_SIZE = 2;
enum class join_kind { INNER_JOIN, LEFT_JOIN, FULL_JOIN, LEFT_SEMI_JOIN, LEFT_ANTI_JOIN };
/**
* @brief Hash join that builds hash table in creation and probes results in subsequent `*_join`
* member functions.
*
* User-defined hash function can be passed via the template parameter `Hasher`
*
* @tparam Hasher Unary callable type
*/
template <typename Hasher>
struct hash_join {
public:
using map_type =
cuco::static_multimap<hash_value_type,
cudf::size_type,
cuda::thread_scope_device,
rmm::mr::stream_allocator_adaptor<default_allocator<char>>,
cuco::double_hashing<DEFAULT_JOIN_CG_SIZE, Hasher, Hasher>>;
hash_join() = delete;
~hash_join() = default;
hash_join(hash_join const&) = delete;
hash_join(hash_join&&) = delete;
hash_join& operator=(hash_join const&) = delete;
hash_join& operator=(hash_join&&) = delete;
private:
bool const _is_empty; ///< true if `_hash_table` is empty
bool const _has_nulls; ///< true if nulls are present in either build table or any probe table
cudf::null_equality const _nulls_equal; ///< whether to consider nulls as equal
cudf::table_view _build; ///< input table to build the hash map
std::shared_ptr<cudf::experimental::row::equality::preprocessed_table>
_preprocessed_build; ///< input table preprocssed for row operators
map_type _hash_table; ///< hash table built on `_build`
public:
/**
* @brief Constructor that internally builds the hash table based on the given `build` table.
*
* @throw cudf::logic_error if the number of columns in `build` table is 0.
*
* @param build The build table, from which the hash table is built.
* @param has_nulls Flag to indicate if the there exists any nulls in the `build` table or
* any `probe` table that will be used later for join.
* @param compare_nulls Controls whether null join-key values should match or not.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
hash_join(cudf::table_view const& build,
bool has_nulls,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::hash_join::inner_join
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
inner_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
/**
* @copydoc cudf::hash_join::left_join
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
left_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
/**
* @copydoc cudf::hash_join::full_join
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
full_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
/**
* @copydoc cudf::hash_join::inner_join_size
*/
[[nodiscard]] std::size_t inner_join_size(cudf::table_view const& probe,
rmm::cuda_stream_view stream) const;
/**
* @copydoc cudf::hash_join::left_join_size
*/
[[nodiscard]] std::size_t left_join_size(cudf::table_view const& probe,
rmm::cuda_stream_view stream) const;
/**
* @copydoc cudf::hash_join::full_join_size
*/
std::size_t full_join_size(cudf::table_view const& probe,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
private:
/**
* @brief Probes the `_hash_table` built from `_build` for tuples in `probe_table`,
* and returns the output indices of `build_table` and `probe_table` as a combined table,
* i.e. if full join is specified as the join type then left join is called. Behavior
* is undefined if the provided `output_size` is smaller than the actual output size.
*
* @throw cudf::logic_error if build table is empty and `join == INNER_JOIN`.
*
* @param probe_table Table of probe side columns to join.
* @param join The type of join to be performed.
* @param output_size Optional value which allows users to specify the exact output size.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned vectors.
*
* @return Join output indices vector pair.
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
probe_join_indices(cudf::table_view const& probe_table,
join_kind join,
std::optional<std::size_t> output_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
/**
* @copydoc cudf::detail::hash_join::probe_join_indices
*
* @throw cudf::logic_error if probe table is empty.
* @throw cudf::logic_error if the number of columns in build table and probe table do not match.
* @throw cudf::logic_error if the column data types in build table and probe table do not match.
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
compute_hash_join(cudf::table_view const& probe,
join_kind join,
std::optional<std::size_t> output_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const;
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/valid_if.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/distance.h>
namespace cudf {
namespace detail {
/**
* @brief Generate a bitmask where every bit is set for which a predicate is
* `true` over the elements in `[begin, begin + size)`.
*
* Bit `i` in the output mask will be set if `p(*(begin+i)) == true`.
*
* @tparam block_size The number of threads in the block
* @param[out] output The output bitmask
* @param[in] begin The beginning of the sequence of elements
* @param[in] size The number of elements
* @param[in] p The predicate to apply to each element
* @param[out] valid_count The count of set bits in the output bitmask
*/
template <size_type block_size, typename InputIterator, typename Predicate>
__global__ void valid_if_kernel(
bitmask_type* output, InputIterator begin, size_type size, Predicate p, size_type* valid_count)
{
constexpr size_type leader_lane{0};
auto const lane_id{threadIdx.x % warp_size};
auto i = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
size_type warp_valid_count{0};
auto active_mask = __ballot_sync(0xFFFF'FFFFu, i < size);
while (i < size) {
bitmask_type ballot = __ballot_sync(active_mask, p(*(begin + i)));
if (lane_id == leader_lane) {
output[cudf::word_index(i)] = ballot;
warp_valid_count += __popc(ballot);
}
i += stride;
active_mask = __ballot_sync(active_mask, i < size);
}
size_type block_count = single_lane_block_sum_reduce<block_size, leader_lane>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(valid_count, block_count); }
}
/**
* @brief Generate a bitmask where every bit is set for which a predicate is
* `true` over the elements in `[begin,end)`.
*
* Bit `i` in the output mask will be set if `p(*(begin+i)) == true`.
*
* If `distance(begin,end) == 0`, returns an empty `rmm::device_buffer`.
*
* @throws cudf::logic_error if `(begin > end)`
*
* @param begin The beginning of the sequence
* @param end The end of the sequence
* @param p The predicate
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return A pair containing a `device_buffer` with the new bitmask and it's
* null count
*/
template <typename InputIterator, typename Predicate>
std::pair<rmm::device_buffer, size_type> valid_if(InputIterator begin,
InputIterator end,
Predicate p,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(begin <= end, "Invalid range.");
size_type size = thrust::distance(begin, end);
auto null_mask = detail::create_null_mask(size, mask_state::UNINITIALIZED, stream, mr);
size_type null_count{0};
if (size > 0) {
rmm::device_scalar<size_type> valid_count{0, stream};
constexpr size_type block_size{256};
grid_1d grid{size, block_size};
valid_if_kernel<block_size><<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
static_cast<bitmask_type*>(null_mask.data()), begin, size, p, valid_count.data());
null_count = size - valid_count.value(stream);
}
return std::pair(std::move(null_mask), null_count);
}
/**
* @brief Populates a set of bitmasks by applying a binary predicate to two
* input ranges.
* Given a set of bitmasks, `masks`, the state of bit `j` in mask `i` is
* determined by `p( *(begin1 + i), *(begin2 + j))`. If the predicate evaluates
* to true, the bit is set to `1`. If false, set to `0`.
*
* Example Arguments:
* begin1: zero-based counting iterator,
* begin2: zero-based counting iterator,
* p: [](size_type col, size_type row){ return col == row; }
* masks: [[b00...], [b00...], [b00...]]
* mask_count: 3
* mask_num_bits: 2
* valid_counts: [0, 0, 0]
*
* Example Results:
* masks: [[b10...], [b01...], [b00...]]
* valid_counts: [1, 1, 0]
*
* @note If any mask in `masks` is `nullptr`, that mask will be ignored.
*
* @param begin1 LHS arguments to binary predicate. ex: column/mask idx
* @param begin2 RHS arguments to binary predicate. ex: row/bit idx
* @param p Predicate: `bit = p(begin1 + mask_idx, begin2 + bit_idx)`
* @param masks Masks for which bits will be obtained and assigned.
* @param mask_count The number of `masks`.
* @param mask_num_bits The number of bits to assign for each mask. If this
* number is smaller than the total number of bits, the
* remaining bits may not be initialized.
* @param valid_counts Used to obtain the total number of valid bits for each
* mask.
*/
template <typename InputIterator1,
typename InputIterator2,
typename BinaryPredicate,
int32_t block_size>
__global__ void valid_if_n_kernel(InputIterator1 begin1,
InputIterator2 begin2,
BinaryPredicate p,
bitmask_type* masks[],
size_type mask_count,
size_type mask_num_bits,
size_type* valid_counts)
{
for (size_type mask_idx = 0; mask_idx < mask_count; mask_idx++) {
auto const mask = masks[mask_idx];
if (mask == nullptr) { continue; }
auto block_offset = blockIdx.x * blockDim.x;
auto warp_valid_count = static_cast<size_type>(0);
while (block_offset < mask_num_bits) {
auto const thread_idx = block_offset + threadIdx.x;
auto const thread_active = thread_idx < mask_num_bits;
auto const arg_1 = *(begin1 + mask_idx);
auto const arg_2 = *(begin2 + thread_idx);
auto const bit_is_valid = thread_active && p(arg_1, arg_2);
auto const warp_validity = __ballot_sync(0xffff'ffffu, bit_is_valid);
auto const mask_idx = word_index(thread_idx);
if (thread_active && threadIdx.x % warp_size == 0) { mask[mask_idx] = warp_validity; }
warp_valid_count += __popc(warp_validity);
block_offset += blockDim.x * gridDim.x;
}
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(valid_counts + mask_idx, block_valid_count); }
}
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/transform.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/expressions.hpp>
#include <cudf/transform.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::transform
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> transform(column_view const& input,
std::string const& unary_udf,
data_type output_type,
bool is_ptx,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::compute_column
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> compute_column(table_view const& table,
ast::expression const& expr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::nans_to_nulls
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<rmm::device_buffer>, size_type> nans_to_nulls(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::bools_to_mask
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::encode
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::column>> encode(
cudf::table_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::one_hot_encode
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<column>, table_view> one_hot_encode(column_view const& input,
column_view const& categories,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::mask_to_bools
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> mask_to_bools(bitmask_type const* null_mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::row_bit_count
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> row_bit_count(table_view const& t,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/datetime_ops.cuh
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda/std/chrono>
namespace cudf {
namespace datetime {
namespace detail {
using namespace cuda::std::chrono;
template <typename Timestamp>
__device__ Timestamp add_calendrical_months_with_scale_back(Timestamp time_val, months months_val)
{
auto const days_since_epoch = floor<days>(time_val);
auto const date = [&]() {
auto const ymd = year_month_day{days_since_epoch} + months_val;
return ymd.ok() ? ymd : ymd.year() / ymd.month() / last;
}();
auto const time = (time_val - days_since_epoch);
return sys_days{date} + time;
}
} // namespace detail
} // namespace datetime
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/merge.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/device_uvector.hpp>
#include <thrust/pair.h>
namespace cudf {
namespace detail {
/**
* @brief Source table identifier to copy data from.
*/
enum class side : bool { LEFT, RIGHT };
/**
* @brief Tagged index type: `thrust::get<0>` indicates left/right side,
* `thrust::get<1>` indicates the row index
*/
using index_type = thrust::pair<side, cudf::size_type>;
/**
* @brief Vector of `index_type` values.
*/
using index_vector = rmm::device_uvector<index_type>;
/**
* @copydoc std::unique_ptr<cudf::table> merge(
* std::vector<table_view> const& tables_to_merge,
* std::vector<cudf::size_type> const& key_cols,
* std::vector<cudf::order> const& column_order,
* std::vector<cudf::null_order> const& null_precedence,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/rolling.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/rolling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @copydoc std::unique_ptr<column> rolling_window(
* column_view const& input,
* column_view const& preceding_window,
* column_view const& following_window,
* size_type min_periods,
* rolling_aggregation const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/contiguous_split.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/contiguous_split.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::contiguous_split
*
* @param stream CUDA stream used for device memory operations and kernel launches.
**/
std::vector<packed_table> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::pack
*
* @param stream Optional CUDA stream on which to execute kernels
**/
packed_columns pack(cudf::table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
// opaque implementation of `metadata_builder` since it needs to use
// `serialized_column`, which is only defined in pack.cpp
class metadata_builder_impl;
/**
* @brief Helper class that creates packed column metadata.
*
* This class is an interface to the opaque metadata that is used to
* describe `contiguous_split` and `pack` results.
*/
class metadata_builder {
public:
/**
* @brief Construct a new metadata_builder.
*
* @param num_root_columns is the number of top-level columns
*/
explicit metadata_builder(size_type const num_root_columns);
/**
* @brief Destructor that will be implemented as default, required because metadata_builder_impl
* is incomplete at this stage.
*/
~metadata_builder();
/**
* @brief Add a column to this metadata builder.
*
* Callers must call this function for the parent column and followed by any children,
* in the order maintained in the column/column_view.
*
* Example: given a table with a nested column "a" with 2 children, and a non-nested column "b":
*
* 1) add_column_info_to_meta(col_a)
* 2) add_column_info_to_meta(col_a_child_1)
* 3) add_column_info_to_meta(col_a_child_2)
* 4) add_column_info_to_meta(col_b)
*
* @param col_type column data type
* @param col_size column row count
* @param col_null_count column null count
* @param data_offset data offset from the column's base ptr,
* or -1 for an empty column
* @param null_mask_offset null mask offset from the column's base ptr,
* or -1 for a column that isn't nullable
* @param num_children number of children columns
*/
void add_column_info_to_meta(data_type const col_type,
size_type const col_size,
size_type const col_null_count,
int64_t const data_offset,
int64_t const null_mask_offset,
size_type const num_children);
/**
* @brief Builds the opaque metadata for all added columns.
*
* @returns A vector containing the serialized column metadata
*/
std::vector<uint8_t> build() const;
/**
* @brief Clear the internal buffer containing all added metadata.
*/
void clear();
private:
std::unique_ptr<metadata_builder_impl> impl;
};
/**
* @copydoc pack_metadata
* @param builder The reusable builder object to create packed column metadata.
*/
std::vector<uint8_t> pack_metadata(table_view const& table,
uint8_t const* contiguous_buffer,
size_t buffer_size,
metadata_builder& builder);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/scatter.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @brief Scatters the rows of the source table into a copy of the target table
* according to a scatter map.
*
* Scatters values from the source table into the target table out-of-place,
* returning a "destination table". The scatter is performed according to a
* scatter map such that row `scatter_map[i]` of the destination table gets row
* `i` of the source table. All other rows of the destination table equal
* corresponding rows of the target table.
*
* The number of columns in source must match the number of columns in target
* and their corresponding datatypes must be the same.
*
* A negative value `i` in the `scatter_map` is interpreted as `i+n`, where `n`
* is the number of rows in the `target` table.
*
* If the same index appears more than once in the scatter map, the result is
* undefined.
* If any values in `scatter_map` are outside of the interval [-n, n) where `n`
* is the number of rows in the `target` table, behavior is undefined.
*
* @param source The input columns containing values to be scattered into the
* target columns
* @param scatter_map A non-nullable column of integral indices that maps the
* rows in the source table to rows in the target table. The size must be equal
* to or less than the number of elements in the source columns.
* @param target The set of columns into which values from the source_table
* are to be scattered
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(table_view const& source,
column_view const& scatter_map,
table_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::detail::scatter(table_view const&,column_view const&,table_view
* const&,bool,rmm::cuda_stream_view,rmm::mr::device_memory_resource*)
*
* @throws cudf::logic_error if `scatter_map` span size is larger than max of `size_type`.
*/
std::unique_ptr<table> scatter(table_view const& source,
device_span<size_type const> const scatter_map,
table_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Scatters a row of scalar values into a copy of the target table
* according to a scatter map.
*
* Scatters values from the source row into the target table out-of-place,
* returning a "destination table". The scatter is performed according to a
* scatter map such that row `scatter_map[i]` of the destination table is
* replaced by the source row. All other rows of the destination table equal
* corresponding rows of the target table.
*
* The number of elements in source must match the number of columns in target
* and their corresponding datatypes must be the same.
*
* If the same index appears more than once in the scatter map, the result is
* undefined.
*
* If any values in `indices` are outside of the interval [-n, n) where `n`
* is the number of rows in the `target` table, behavior is undefined.
*
* @param source The input scalars containing values to be scattered into the
* target columns
* @param indices A non-nullable column of integral indices that indicate
* the rows in the target table to be replaced by source.
* @param target The set of columns into which values from the source_table
* are to be scattered
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::boolean_mask_scatter(
table_view const& source, table_view const& target,
* column_view const& boolean_mask,
* rmm::mr::device_memory_resource *mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> boolean_mask_scatter(table_view const& source,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::boolean_mask_scatter(
* std::vector<std::reference_wrapper<scalar>> const& source,
* table_view const& target,
* column_view const& boolean_mask,
* rmm::mr::device_memory_resource *mr)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<scalar const>> const& source,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/round.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/round.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
//! Inner interfaces and implementations
namespace detail {
/**
* @copydoc cudf::round(column_view const&, int32_t, rounding_method,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> round(column_view const& input,
int32_t decimal_places,
rounding_method method,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/concatenate.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <vector>
namespace cudf {
//! Inner interfaces and implementations
namespace detail {
/**
* @copydoc cudf::concatenate(host_span<column_view const>,rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::concatenate(host_span<table_view const>,rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/sizes_to_offsets_iterator.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/distance.h>
#include <thrust/scan.h>
#include <stdexcept>
namespace cudf {
namespace detail {
/**
* @brief Iterator that can be used with a scan algorithm and also return the last element
*
* Use cudf::detail::make_sizes_to_offsets_iterator to create an instance of this class.
*
* @tparam ScanIterator Output iterator type for use in a scan operation
* @tparam LastType Type used for final scan element
*/
template <typename ScanIterator, typename LastType>
struct sizes_to_offsets_iterator {
using difference_type = ptrdiff_t;
using value_type = LastType;
using pointer = LastType*;
using reference = sizes_to_offsets_iterator const&;
using iterator_category = std::random_access_iterator_tag;
using ScanType = typename thrust::iterator_traits<ScanIterator>::value_type;
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator& operator++()
{
++itr_;
return *this;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator operator++(int)
{
sizes_to_offsets_iterator tmp(*this);
operator++();
return tmp;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator& operator--()
{
--itr_;
return *this;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator operator--(int)
{
sizes_to_offsets_iterator tmp(*this);
operator--();
return tmp;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator& operator+=(difference_type offset)
{
itr_ += offset;
return *this;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator operator+(difference_type offset) const
{
sizes_to_offsets_iterator tmp(*this);
tmp.itr_ += offset;
return tmp;
}
CUDF_HOST_DEVICE inline friend sizes_to_offsets_iterator operator+(
difference_type offset, sizes_to_offsets_iterator const& rhs)
{
sizes_to_offsets_iterator tmp{rhs};
tmp.itr_ += offset;
return tmp;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator& operator-=(difference_type offset)
{
itr_ -= offset;
return *this;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator operator-(difference_type offset) const
{
sizes_to_offsets_iterator tmp(*this);
tmp.itr_ -= offset;
return tmp;
}
CUDF_HOST_DEVICE inline friend sizes_to_offsets_iterator operator-(
difference_type offset, sizes_to_offsets_iterator const& rhs)
{
sizes_to_offsets_iterator tmp{rhs};
tmp.itr_ -= offset;
return tmp;
}
CUDF_HOST_DEVICE inline difference_type operator-(sizes_to_offsets_iterator const& rhs) const
{
return itr_ - rhs.itr_;
}
CUDF_HOST_DEVICE inline bool operator==(sizes_to_offsets_iterator const& rhs) const
{
return rhs.itr_ == itr_;
}
CUDF_HOST_DEVICE inline bool operator!=(sizes_to_offsets_iterator const& rhs) const
{
return rhs.itr_ != itr_;
}
CUDF_HOST_DEVICE inline bool operator<(sizes_to_offsets_iterator const& rhs) const
{
return itr_ < rhs.itr_;
}
CUDF_HOST_DEVICE inline bool operator>(sizes_to_offsets_iterator const& rhs) const
{
return itr_ > rhs.itr_;
}
CUDF_HOST_DEVICE inline bool operator<=(sizes_to_offsets_iterator const& rhs) const
{
return itr_ <= rhs.itr_;
}
CUDF_HOST_DEVICE inline bool operator>=(sizes_to_offsets_iterator const& rhs) const
{
return itr_ >= rhs.itr_;
}
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator const& operator*() const { return *this; }
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator const operator[](int idx) const
{
sizes_to_offsets_iterator tmp{*this};
tmp.itr_ += idx;
return tmp;
}
/**
* @brief Called to set the output of the scan operation to the current iterator position
*
* @param value Value to set to the current output
* @return This iterator instance
*/
CUDF_HOST_DEVICE inline sizes_to_offsets_iterator const& operator=(LastType const value) const
{
*itr_ = static_cast<ScanType>(value); // place into the output
if (itr_ == end_) { *last_ = value; } // also save the last value
return *this;
}
sizes_to_offsets_iterator() = default;
sizes_to_offsets_iterator(sizes_to_offsets_iterator const&) = default;
sizes_to_offsets_iterator(sizes_to_offsets_iterator&&) = default;
sizes_to_offsets_iterator& operator=(sizes_to_offsets_iterator const&) = default;
sizes_to_offsets_iterator& operator=(sizes_to_offsets_iterator&&) = default;
protected:
template <typename S, typename R>
friend sizes_to_offsets_iterator<S, R> make_sizes_to_offsets_iterator(S, S, R*);
/**
* @brief Iterator constructor
*
* Use the make_sizes_to_offsets_iterator() to create an instance of this class
*/
sizes_to_offsets_iterator(ScanIterator begin, ScanIterator end, LastType* last)
: itr_{begin}, end_{thrust::prev(end)}, last_{last}
{
}
ScanIterator itr_{};
ScanIterator end_{};
LastType* last_{};
};
/**
* @brief Create an instance of a sizes_to_offsets_iterator
*
* @code{.pseudo}
* auto begin = // begin input iterator
* auto end = // end input iterator
* auto result = rmm::device_uvector(std::distance(begin,end), stream);
* auto last = rmm::device_scalar<int64_t>(0, stream);
* auto itr = make_sizes_to_offsets_iterator(result.begin(),
* result.end(),
* last.data());
* thrust::exclusive_scan(rmm::exec_policy(stream), begin, end, itr, int64_t{0});
* // last contains the value of the final element in the scan result
* @endcode
*
* @tparam ScanIterator Output iterator type for use in a scan operation
* @tparam LastType Type used for holding the final element value
*
* @param begin Output iterator for scan
* @param end End of the output iterator for scan
* @param last Last element in the scan is stored here
* @return Instance of iterator
*/
template <typename ScanIterator, typename LastType>
static sizes_to_offsets_iterator<ScanIterator, LastType> make_sizes_to_offsets_iterator(
ScanIterator begin, ScanIterator end, LastType* last)
{
return sizes_to_offsets_iterator<ScanIterator, LastType>(begin, end, last);
}
/**
* @brief Perform an exclusive-scan and capture the final element value
*
* This performs an exclusive-scan (addition only) on the given input `[begin, end)`.
* The output of the scan is placed in `result` and the value of the last element is returned.
*
* This implementation will return the last element in `int64_t` or `uint64_t` precision
* as appropriate regardless of the input or result types.
* This can be used to check if the scan operation overflowed when the input and result are
* declared as smaller types.
*
* Only integral types for input and result types are supported.
*
* Note that `begin == result` is allowed but `result` may not overlap `[begin,end)` otherwise the
* behavior is undefined.
*
* @code{.pseudo}
* auto const bytes = cudf::detail::sizes_to_offsets(
* d_offsets, d_offsets + strings_count + 1, d_offsets, stream);
* CUDF_EXPECTS(bytes <= static_cast<int64_t>(std::numeric_limits<size_type>::max()),
* "Size of output exceeds the column size limit", std::overflow_error);
* @endcode
*
* @tparam SizesIterator Iterator type for input of the scan using addition operation
* @tparam OffsetsIterator Iterator type for the output of the scan
*
* @param begin Input iterator for scan
* @param end End of the input iterator
* @param result Output iterator for scan result
* @return The last element of the scan
*/
template <typename SizesIterator, typename OffsetsIterator>
auto sizes_to_offsets(SizesIterator begin,
SizesIterator end,
OffsetsIterator result,
rmm::cuda_stream_view stream)
{
using SizeType = typename thrust::iterator_traits<SizesIterator>::value_type;
static_assert(std::is_integral_v<SizeType>,
"Only numeric types are supported by sizes_to_offsets");
using LastType = std::conditional_t<std::is_signed_v<SizeType>, int64_t, uint64_t>;
auto last_element = rmm::device_scalar<LastType>(0, stream);
auto output_itr =
make_sizes_to_offsets_iterator(result, result + std::distance(begin, end), last_element.data());
// This function uses the type of the initialization parameter as the accumulator type
// when computing the individual scan output elements.
thrust::exclusive_scan(rmm::exec_policy(stream), begin, end, output_itr, LastType{0});
return last_element.value(stream);
}
/**
* @brief Create an offsets column to be a child of a compound column
*
* This function sets the offsets values by executing scan over the sizes in the provided
* Iterator.
*
* The return also includes the total number of elements -- the last element value from the
* scan.
*
* @throw std::overflow_error if the total size of the scan (last element) greater than maximum
* value of `size_type`
*
* @tparam InputIterator Used as input to scan to set the offset values
* @param begin The beginning of the input sequence
* @param end The end of the input sequence
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Offsets column and total elements
*/
template <typename InputIterator>
std::pair<std::unique_ptr<column>, size_type> make_offsets_child_column(
InputIterator begin,
InputIterator end,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto count = static_cast<size_type>(std::distance(begin, end));
auto offsets_column = make_numeric_column(
data_type{type_to_id<size_type>()}, count + 1, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
auto d_offsets = offsets_view.template data<size_type>();
// The number of offsets is count+1 so to build the offsets from the sizes
// using exclusive-scan technically requires count+1 input values even though
// the final input value is never used.
// The input iterator is wrapped here to allow the last value to be safely read.
auto map_fn = [begin, count] __device__(size_type idx) -> size_type {
return idx < count ? static_cast<size_type>(begin[idx]) : size_type{0};
};
auto input_itr = cudf::detail::make_counting_transform_iterator(0, map_fn);
// Use the sizes-to-offsets iterator to compute the total number of elements
auto const total_elements = sizes_to_offsets(input_itr, input_itr + count + 1, d_offsets, stream);
CUDF_EXPECTS(
total_elements <= static_cast<decltype(total_elements)>(std::numeric_limits<size_type>::max()),
"Size of output exceeds the column size limit",
std::overflow_error);
offsets_column->set_null_count(0);
return std::pair(std::move(offsets_column), static_cast<size_type>(total_elements));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/timezone.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/timezone.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf::detail {
/**
* @copydoc cudf::make_timezone_transition_table(std::optional<std::string_view>, std::string_view,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> make_timezone_transition_table(
std::optional<std::string_view> tzif_dir,
std::string_view timezone_name,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/copy.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <initializer_list>
namespace cudf {
namespace detail {
/**
* @brief Constructs a zero-copy `column_view`/`mutable_column_view` of the
* elements in the range `[begin,end)` in `input`.
*
* @note It is the caller's responsibility to ensure that the returned view
* does not outlive the viewed device memory.
*
* @throws cudf::logic_error if `begin < 0`, `end < begin` or
* `end > input.size()`.
*
* @tparam ColumnView Must be either cudf::column_view or cudf::mutable_column_view
* @param input View of input column to slice
* @param begin Index of the first desired element in the slice (inclusive).
* @param end Index of the last desired element in the slice (exclusive).
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return ColumnView View of the elements `[begin,end)` from `input`.
*/
template <typename ColumnView>
ColumnView slice(ColumnView const& input,
size_type begin,
size_type end,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::slice(column_view const&, host_span<size_type const>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<column_view> slice(column_view const& input,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::slice(column_view const&, std::initializer_list<size_type>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<column_view> slice(column_view const& input,
std::initializer_list<size_type> indices,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::slice(table_view const&, host_span<size_type const>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<table_view> slice(table_view const& input,
host_span<size_type const> indices,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::slice(table_view const&, std::initializer_list<size_type>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<table_view> slice(table_view const& input,
std::initializer_list<size_type> indices,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::split(column_view const&, host_span<size_type const>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<column_view> split(column_view const& input,
host_span<size_type const> splits,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::split(column_view const&, std::initializer_list<size_type>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<column_view> split(column_view const& input,
std::initializer_list<size_type> splits,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::split(table_view const&, host_span<size_type const>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<table_view> split(table_view const& input,
host_span<size_type const> splits,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::split(table_view const&, std::initializer_list<size_type>)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::vector<table_view> split(table_view const& input,
std::initializer_list<size_type> splits,
rmm::cuda_stream_view stream);
/**
* @copydoc cudf::shift(column_view const&,size_type,scalar const&,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> shift(column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Performs segmented shifts for specified values.
*
* For each segment, `i`th element is determined by the `i - offset`th element
* of the segment. If `i - offset < 0 or >= segment_size`, the value is determined by
* @p fill_value.
*
* Example:
* @code{.pseudo}
* segmented_values: { 3 1 2 | 3 5 3 | 2 6 }
* segment_offsets: {0 3 6 8}
* offset: 2
* fill_value: @
* result: { @ @ 3 | @ @ 3 | @ @ }
* -------------------------------------------------
* segmented_values: { 3 1 2 | 3 5 3 | 2 6 }
* segment_offsets: {0 3 6 8}
* offset: -1
* fill_value: -1
* result: { 1 2 -1 | 5 3 -1 | 6 -1 }
* @endcode
*
* @param segmented_values Segmented column, specified by @p segment_offsets
* @param segment_offsets Each segment's offset of @p segmented_values. A list of offsets
* with size `num_segments + 1`. The size of each segment is `segment_offsets[i+1] -
* segment_offsets[i]`.
* @param offset The offset by which to shift the input
* @param fill_value Fill value for indeterminable outputs
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @note If `offset == 0`, a copy of @p segmented_values is returned.
*/
std::unique_ptr<column> segmented_shift(column_view const& segmented_values,
device_span<size_type const> segment_offsets,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::allocate_like(column_view const&, size_type, mask_allocation_policy,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> allocate_like(column_view const& input,
size_type size,
mask_allocation_policy mask_alloc,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::copy_if_else( column_view const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::copy_if_else( scalar const&, column_view const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::copy_if_else( column_view const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::copy_if_else( scalar const&, scalar const&,
* column_view const&, rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> copy_if_else(scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::sample
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> sample(table_view const& input,
size_type const n,
sample_with_replacement replacement,
int64_t const seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::get_element
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<scalar> get_element(column_view const& input,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::has_nonempty_nulls
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
bool has_nonempty_nulls(column_view const& input, rmm::cuda_stream_view stream);
/**
* @copydoc cudf::may_have_nonempty_nulls
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
bool may_have_nonempty_nulls(column_view const& input, rmm::cuda_stream_view stream);
/**
* @copydoc cudf::purge_nonempty_nulls
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> purge_nonempty_nulls(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/binaryop.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/binaryop.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
//! Inner interfaces and implementations
namespace detail {
/**
* @copydoc cudf::binary_operation(column_view const&, column_view const&,
* std::string const&, data_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> binary_operation(column_view const& lhs,
column_view const& rhs,
std::string const& ptx,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::binary_operation(scalar const&, column_view const&, binary_operator,
* data_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> binary_operation(scalar const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::binary_operation(column_view const&, scalar const&, binary_operator,
* data_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> binary_operation(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::binary_operation(column_view const&, column_view const&,
* binary_operator, data_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> binary_operation(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/calendrical_month_sequence.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/datetime_ops.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
struct calendrical_month_sequence_functor {
template <typename T>
std::enable_if_t<cudf::is_timestamp_t<T>::value, std::unique_ptr<cudf::column>> operator()(
size_type n,
scalar const& input,
size_type months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Return empty column if n = 0
if (n == 0) return cudf::make_empty_column(input.type());
auto const device_input =
get_scalar_device_view(static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(input)));
auto output_column_type = cudf::data_type{cudf::type_to_id<T>()};
auto output = cudf::make_fixed_width_column(
output_column_type, n, cudf::mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(n),
output->mutable_view().begin<T>(),
[initial = device_input, months] __device__(size_type i) {
return datetime::detail::add_calendrical_months_with_scale_back(
initial.value(), cuda::std::chrono::months{i * months});
});
return output;
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_timestamp_t<T>::value, std::unique_ptr<cudf::column>> operator()(
Args&&...)
{
CUDF_FAIL("Cannot make a date_range of a non-datetime type");
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/sorting.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/sorting.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
#include <vector>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::sorted_order
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::stable_sorted_order
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> stable_sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::sort_by_key
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::rank
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> rank(column_view const& input,
rank_method method,
order column_order,
null_policy null_handling,
null_order null_precedence,
bool percentage,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::stable_sort_by_key
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> stable_sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::segmented_sorted_order
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> segmented_sorted_order(table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::stable_segmented_sorted_order
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::segmented_sort_by_key
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::stable_segmented_sort_by_key
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> stable_segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::sort
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> sort(table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/scatter.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/lists/detail/scatter.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/scatter.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/distance.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scatter.h>
#include <thrust/sequence.h>
#include <thrust/uninitialized_fill.h>
namespace cudf {
namespace detail {
/**
* @brief Convert a scatter map into a gather map.
*
* The caller is expected to use the output map on a subsequent gather_bitmask()
* function using the PASSTHROUGH op since the resulting map may contain index
* values outside the target's range.
*
* First, the gather-map is initialized with an invalid index.
* The value `numeric_limits::lowest()` is used since it should always be outside the target size.
* Then, `output[scatter_map[i]] = i` for each `i`.
*
* @tparam MapIterator Iterator type of the input scatter map.
* @param scatter_map_begin Beginning of scatter map.
* @param scatter_map_end End of the scatter map.
* @param gather_rows Number of rows in the output map.
* @param stream Stream used for CUDA kernel calls.
* @return Output gather map.
*/
template <typename MapIterator>
auto scatter_to_gather(MapIterator scatter_map_begin,
MapIterator scatter_map_end,
size_type gather_rows,
rmm::cuda_stream_view stream)
{
using MapValueType = typename thrust::iterator_traits<MapIterator>::value_type;
// The gather_map is initialized with `numeric_limits::lowest()` value to identify pass-through
// entries when calling the gather_bitmask() which applies a pass-through whenever it finds a
// value outside the range of the target column.
// We'll use the `numeric_limits::lowest()` value for this since it should always be outside the
// valid range.
auto gather_map = rmm::device_uvector<size_type>(gather_rows, stream);
thrust::uninitialized_fill(rmm::exec_policy_nosync(stream),
gather_map.begin(),
gather_map.end(),
std::numeric_limits<size_type>::lowest());
// Convert scatter map to a gather map
thrust::scatter(
rmm::exec_policy_nosync(stream),
thrust::make_counting_iterator<MapValueType>(0),
thrust::make_counting_iterator<MapValueType>(std::distance(scatter_map_begin, scatter_map_end)),
scatter_map_begin,
gather_map.begin());
return gather_map;
}
/**
* @brief Create a complement map of `scatter_to_gather` map
*
* The purpose of this map is to create an identity-mapping for the rows that are not
* touched by the `scatter_map`.
*
* The output result of this mapping is firstly initialized as an identity-mapping
* (i.e., `output[i] = i`). Then, for each value `idx` from `scatter_map`, the value `output[idx]`
* is set to `numeric_limits::lowest()`, which is an invalid, out-of-bound index to identify the
* pass-through entries when calling the `gather_bitmask()` function.
*
*/
template <typename MapIterator>
auto scatter_to_gather_complement(MapIterator scatter_map_begin,
MapIterator scatter_map_end,
size_type gather_rows,
rmm::cuda_stream_view stream)
{
auto gather_map = rmm::device_uvector<size_type>(gather_rows, stream);
thrust::sequence(rmm::exec_policy_nosync(stream), gather_map.begin(), gather_map.end(), 0);
auto const out_of_bounds_begin =
thrust::make_constant_iterator(std::numeric_limits<size_type>::lowest());
auto const out_of_bounds_end =
out_of_bounds_begin + thrust::distance(scatter_map_begin, scatter_map_end);
thrust::scatter(rmm::exec_policy_nosync(stream),
out_of_bounds_begin,
out_of_bounds_end,
scatter_map_begin,
gather_map.begin());
return gather_map;
}
template <typename Element, typename Enable = void>
struct column_scatterer_impl {
template <typename... Args>
std::unique_ptr<column> operator()(Args&&...) const
{
CUDF_FAIL("Unsupported type for scatter.");
}
};
template <typename Element>
struct column_scatterer_impl<Element, std::enable_if_t<cudf::is_fixed_width<Element>()>> {
template <typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto result = std::make_unique<column>(target, stream, mr);
auto result_view = result->mutable_view();
// NOTE use source.begin + scatter rows rather than source.end in case the
// scatter map is smaller than the number of source rows
thrust::scatter(rmm::exec_policy_nosync(stream),
source.begin<Element>(),
source.begin<Element>() + cudf::distance(scatter_map_begin, scatter_map_end),
scatter_map_begin,
result_view.begin<Element>());
return result;
}
};
template <>
struct column_scatterer_impl<string_view> {
template <typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto d_column = column_device_view::create(source, stream);
auto const begin = d_column->begin<string_view>();
auto const end = begin + cudf::distance(scatter_map_begin, scatter_map_end);
return strings::detail::scatter(begin, end, scatter_map_begin, target, stream, mr);
}
};
template <>
struct column_scatterer_impl<list_view> {
template <typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
return cudf::lists::detail::scatter(
source, scatter_map_begin, scatter_map_end, target, stream, mr);
}
};
template <>
struct column_scatterer_impl<dictionary32> {
template <typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source_in,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target_in,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (target_in.is_empty()) // empty begets empty
return make_empty_column(type_id::DICTIONARY32);
if (source_in.is_empty()) // no input, just make a copy
return std::make_unique<column>(target_in, stream, mr);
// check the keys match
dictionary_column_view const source(source_in);
dictionary_column_view const target(target_in);
CUDF_EXPECTS(source.keys().type() == target.keys().type(),
"scatter dictionary keys must be the same type");
// first combine keys so both dictionaries have the same set
auto target_matched = dictionary::detail::add_keys(target, source.keys(), stream, mr);
auto const target_view = dictionary_column_view(target_matched->view());
auto source_matched = dictionary::detail::set_keys(
source, target_view.keys(), stream, rmm::mr::get_current_device_resource());
auto const source_view = dictionary_column_view(source_matched->view());
// now build the new indices by doing a scatter on just the matched indices
auto source_itr = indexalator_factory::make_input_iterator(source_view.indices());
auto new_indices = std::make_unique<column>(target_view.get_indices_annotated(), stream, mr);
auto target_itr = indexalator_factory::make_output_iterator(new_indices->mutable_view());
thrust::scatter(rmm::exec_policy_nosync(stream),
source_itr,
source_itr + std::distance(scatter_map_begin, scatter_map_end),
scatter_map_begin,
target_itr);
// record some data before calling release()
auto const indices_type = new_indices->type();
auto const output_size = new_indices->size();
auto const null_count = new_indices->null_count();
auto contents = new_indices->release();
auto indices_column = std::make_unique<column>(indices_type,
static_cast<size_type>(output_size),
std::move(*(contents.data.release())),
rmm::device_buffer{0, stream, mr},
0);
// take the keys from the matched column allocated using mr
std::unique_ptr<column> keys_column(std::move(target_matched->release().children.back()));
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
};
struct column_scatterer {
template <typename Element, typename MapIterator>
std::unique_ptr<column> operator()(column_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
column_scatterer_impl<Element> scatterer{};
return scatterer(source, scatter_map_begin, scatter_map_end, target, stream, mr);
}
};
template <>
struct column_scatterer_impl<struct_view> {
template <typename MapItRoot>
std::unique_ptr<column> operator()(column_view const& source,
MapItRoot scatter_map_begin,
MapItRoot scatter_map_end,
column_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
CUDF_EXPECTS(source.num_children() == target.num_children(),
"Scatter source and target are not of the same type.");
auto const scatter_map_size = std::distance(scatter_map_begin, scatter_map_end);
if (scatter_map_size == 0) { return std::make_unique<column>(target, stream, mr); }
structs_column_view const structs_src(source);
structs_column_view const structs_target(target);
std::vector<std::unique_ptr<column>> output_struct_members(structs_src.num_children());
std::transform(structs_src.child_begin(),
structs_src.child_end(),
structs_target.child_begin(),
output_struct_members.begin(),
[&scatter_map_begin, &scatter_map_end, stream, mr](auto const& source_col,
auto const& target_col) {
return type_dispatcher<dispatch_storage_type>(source_col.type(),
column_scatterer{},
source_col,
scatter_map_begin,
scatter_map_end,
target_col,
stream,
mr);
});
// We still need to call `gather_bitmask` even when the source's children are not nullable,
// as if the target's children have null_masks, those null_masks need to be updated after
// being scattered onto.
auto const child_nullable = std::any_of(structs_src.child_begin(),
structs_src.child_end(),
[](auto const& col) { return col.nullable(); }) or
std::any_of(structs_target.child_begin(),
structs_target.child_end(),
[](auto const& col) { return col.nullable(); });
if (child_nullable) {
auto const gather_map =
scatter_to_gather(scatter_map_begin, scatter_map_end, target.size(), stream);
gather_bitmask(cudf::table_view{std::vector<cudf::column_view>{structs_src.child_begin(),
structs_src.child_end()}},
gather_map.begin(),
output_struct_members,
gather_bitmask_op::PASSTHROUGH,
stream,
mr);
}
// Need to put the result column in a vector to call `gather_bitmask`.
std::vector<std::unique_ptr<column>> result;
result.emplace_back(cudf::make_structs_column(target.size(),
std::move(output_struct_members),
0,
rmm::device_buffer{0, stream, mr},
stream,
mr));
// Only gather bitmask from the target column for the rows that have not been scattered onto
// The bitmask from the source column will be gathered at the top level `scatter()` call.
if (target.nullable()) {
auto const gather_map =
scatter_to_gather_complement(scatter_map_begin, scatter_map_end, target.size(), stream);
gather_bitmask(table_view{std::vector<cudf::column_view>{target}},
gather_map.begin(),
result,
gather_bitmask_op::PASSTHROUGH,
stream,
mr);
}
return std::move(result.front());
}
};
/**
* @brief Scatters the rows of the source table into a copy of the target table
* according to a scatter map.
*
* Scatters values from the source table into the target table out-of-place,
* returning a "destination table". The scatter is performed according to a
* scatter map such that row `scatter_begin[i]` of the destination table gets row
* `i` of the source table. All other rows of the destination table equal
* corresponding rows of the target table.
*
* The number of columns in source must match the number of columns in target
* and their corresponding datatypes must be the same.
*
* If the same index appears more than once in the scatter map, the result is
* undefined. This range might have negative values, which will be modified by adding target.size()
*
* @throws cudf::logic_error if scatter map index is out of bounds
* @throws cudf::logic_error if scatter_map.size() > source.num_rows()
*
* @param[in] source The input columns containing values to be scattered into the
* target columns
* @param[in] scatter_map_begin Beginning of iterator range of integer indices that has been
*provided.
* @param[in] scatter_map_end End of iterator range of integer indices that has been provided.
* source columns to rows in the target columns
* @param[in] target The set of columns into which values from the source_table
* are to be scattered
* @param[in] check_bounds Optionally perform bounds checking on the values of
* `scatter_map` and throw an error if any of its values are out of bounds.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
*
* @return Result of scattering values from source to target
*/
template <typename MapIterator>
std::unique_ptr<table> scatter(table_view const& source,
MapIterator scatter_map_begin,
MapIterator scatter_map_end,
table_view const& target,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
using MapType = typename thrust::iterator_traits<MapIterator>::value_type;
CUDF_EXPECTS(std::distance(scatter_map_begin, scatter_map_end) <= source.num_rows(),
"scatter map size should be <= to number of rows in source");
// Transform negative indices to index + target size.
auto updated_scatter_map_begin =
thrust::make_transform_iterator(scatter_map_begin, index_converter<MapType>{target.num_rows()});
auto updated_scatter_map_end =
thrust::make_transform_iterator(scatter_map_end, index_converter<MapType>{target.num_rows()});
auto result = std::vector<std::unique_ptr<column>>(target.num_columns());
std::transform(source.begin(),
source.end(),
target.begin(),
result.begin(),
[=](auto const& source_col, auto const& target_col) {
return type_dispatcher<dispatch_storage_type>(source_col.type(),
column_scatterer{},
source_col,
updated_scatter_map_begin,
updated_scatter_map_end,
target_col,
stream,
mr);
});
// We still need to call `gather_bitmask` even when the source columns are not nullable,
// as if the target has null_mask, that null_mask needs to be updated after scattering.
auto const nullable =
std::any_of(source.begin(), source.end(), [](auto const& col) { return col.nullable(); }) or
std::any_of(target.begin(), target.end(), [](auto const& col) { return col.nullable(); });
if (nullable) {
auto const gather_map = scatter_to_gather(
updated_scatter_map_begin, updated_scatter_map_end, target.num_rows(), stream);
gather_bitmask(source, gather_map.begin(), result, gather_bitmask_op::PASSTHROUGH, stream, mr);
// For struct columns, we need to superimpose the null_mask of the parent over the null_mask of
// the children.
std::for_each(result.begin(), result.end(), [=](auto& col) {
auto const col_view = col->view();
if (col_view.type().id() == type_id::STRUCT and col_view.nullable()) {
auto const num_rows = col_view.size();
auto const null_count = col_view.null_count();
auto contents = col->release();
// Children null_mask will be superimposed during structs column construction.
col = cudf::make_structs_column(num_rows,
std::move(contents.children),
null_count,
std::move(*contents.null_mask),
stream,
mr);
}
});
}
return std::make_unique<table>(std::move(result));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/get_value.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @brief Return a fixed-width value from column.
*
* Retrieves the specified value from device memory. This function
* synchronizes the stream.
*
* @throw cudf::logic_error if `col_view` is not a fixed-width column
* @throw cudf::logic_error if `element_index < 0 or >= col_view.size()`
*
* @tparam T Fixed-width type to return.
* @param col_view The column to retrieve the element from.
* @param element_index The specific element to retrieve
* @param stream The stream to use for copying the value to the host.
* @return Value from the `col_view[element_index]`
*/
template <typename T>
T get_value(column_view const& col_view, size_type element_index, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(cudf::is_fixed_width(col_view.type()), "get_value supports only fixed-width types");
CUDF_EXPECTS(data_type(type_to_id<T>()) == col_view.type(), "get_value data type mismatch");
CUDF_EXPECTS(element_index >= 0 && element_index < col_view.size(),
"invalid element_index value");
T result;
CUDF_CUDA_TRY(cudaMemcpyAsync(
&result, col_view.data<T>() + element_index, sizeof(T), cudaMemcpyDefault, stream.value()));
stream.synchronize();
return result;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/copy_if_else.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/optional.h>
namespace cudf {
namespace detail {
namespace { // anonymous
template <size_type block_size,
typename T,
typename LeftIter,
typename RightIter,
typename Filter,
bool has_nulls>
__launch_bounds__(block_size) __global__
void copy_if_else_kernel(LeftIter lhs,
RightIter rhs,
Filter filter,
mutable_column_device_view out,
size_type* __restrict__ const valid_count)
{
size_type const tid = threadIdx.x + blockIdx.x * block_size;
int const warp_id = tid / warp_size;
size_type const warps_per_grid = gridDim.x * block_size / warp_size;
// begin/end indices for the column data
size_type begin = 0;
size_type end = out.size();
// warp indices. since 1 warp == 32 threads == sizeof(bitmask_type) * 8,
// each warp will process one (32 bit) of the validity mask via
// __ballot_sync()
size_type warp_begin = cudf::word_index(begin);
size_type warp_end = cudf::word_index(end - 1);
// lane id within the current warp
constexpr size_type leader_lane{0};
int const lane_id = threadIdx.x % warp_size;
size_type warp_valid_count{0};
// current warp.
size_type warp_cur = warp_begin + warp_id;
size_type index = tid;
while (warp_cur <= warp_end) {
auto const opt_value =
(index < end) ? (filter(index) ? lhs[index] : rhs[index]) : thrust::nullopt;
if (opt_value) { out.element<T>(index) = static_cast<T>(*opt_value); }
// update validity
if (has_nulls) {
// the final validity mask for this warp
int warp_mask = __ballot_sync(0xFFFF'FFFFu, opt_value.has_value());
// only one guy in the warp needs to update the mask and count
if (lane_id == 0) {
out.set_mask_word(warp_cur, warp_mask);
warp_valid_count += __popc(warp_mask);
}
}
// next grid
warp_cur += warps_per_grid;
index += block_size * gridDim.x;
}
if (has_nulls) {
// sum all null counts across all warps
size_type block_valid_count =
single_lane_block_sum_reduce<block_size, leader_lane>(warp_valid_count);
// block_valid_count will only be valid on thread 0
if (threadIdx.x == 0) {
// using an atomic here because there are multiple blocks doing this work
atomicAdd(valid_count, block_valid_count);
}
}
}
} // anonymous namespace
/**
* @brief Returns a new column, where each element is selected from either of two input ranges based
* on a filter
*
* Given two ranges lhs and rhs, and a unary filter function, this function will allocate and return
* an output column that contains `lhs[i]` if `function(i) == true` or `rhs[i]` if `function(i) ==
* false`. The validity of the elements is propagated to the output.
*
* The range lhs is defined by iterators `[lhs_begin, lhs_end)`. The `size` of output is
* determined by the distance between `lhs_begin` and `lhs_end`.
*
* The range rhs is defined by `[rhs, rhs + size)`
*
* Example:
* @code{.pseudo}
* lhs = {1, 2, 3, -, 5}
* rhs = {-, 6, 7, 8, 9}
*
* filter = [](i) {
* bool arr[5] = {1, 1, 0, 1, 0}
* return arr[i];
* }
*
* output = {1, 2, 7, -, 9}
* @endcode
*
* @tparam FilterFn A function of type `bool(size_type)`
* @tparam LeftIter An iterator of pair type where `first` is the value and `second` is the
* validity
* @tparam RightIter An iterator of pair type where `first` is the value and `second` is the
* validity
* @param nullable Indicate whether either input range can contain nulls
* @param lhs_begin Begin iterator of lhs range
* @param lhs_end End iterator of lhs range
* @param rhs Begin iterator of rhs range
* @param filter Function of type `FilterFn` which determines for index `i` where to get the
* corresponding output value from
* @param out_type `cudf::data_type` of the returned column
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A new column that contains the values from either `lhs` or `rhs` as determined
* by `filter[i]`
*/
template <typename FilterFn, typename LeftIter, typename RightIter>
std::unique_ptr<column> copy_if_else(bool nullable,
LeftIter lhs_begin,
LeftIter lhs_end,
RightIter rhs,
FilterFn filter,
cudf::data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// This is the type of the thrust::optional element in the passed iterators
using Element = typename thrust::iterator_traits<LeftIter>::value_type::value_type;
size_type size = std::distance(lhs_begin, lhs_end);
size_type num_els = cudf::util::round_up_safe(size, warp_size);
constexpr int block_size = 256;
cudf::detail::grid_1d grid{num_els, block_size, 1};
std::unique_ptr<column> out = make_fixed_width_column(
output_type, size, nullable ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream, mr);
auto out_v = mutable_column_device_view::create(*out, stream);
// if we have validity in the output
if (nullable) {
rmm::device_scalar<size_type> valid_count{0, stream};
// call the kernel
copy_if_else_kernel<block_size, Element, LeftIter, RightIter, FilterFn, true>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(
lhs_begin, rhs, filter, *out_v, valid_count.data());
out->set_null_count(size - valid_count.value(stream));
} else {
// call the kernel
copy_if_else_kernel<block_size, Element, LeftIter, RightIter, FilterFn, false>
<<<grid.num_blocks, block_size, 0, stream.value()>>>(lhs_begin, rhs, filter, *out_v, nullptr);
}
return out;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/is_element_valid.hpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @brief Return validity of a row
*
* Retrieves the validity (NULL or non-NULL) of the specified row from device memory.
*
* @note Synchronizes `stream`.
*
* @throw cudf::logic_error if `element_index < 0 or >= col_view.size()`
*
* @param col_view The column to retrieve the validity from.
* @param element_index The index of the row to retrieve.
* @param stream The stream to use for copying the validity to the host.
* @return Host boolean that indicates the validity of the row.
*/
bool is_element_valid_sync(column_view const& col_view,
size_type element_index,
rmm::cuda_stream_view stream);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/replace.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/replace.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::replace_nulls(column_view const&, column_view const&,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> replace_nulls(column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::replace_nulls(column_view const&, scalar const&,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> replace_nulls(column_view const& input,
scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::replace_nulls(column_view const&, replace_policy const&,
* rmm::mr::device_memory_resource*)
*
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> replace_nulls(column_view const& input,
replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::replace_nans(column_view const&, column_view const&,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> replace_nans(column_view const& input,
column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::replace_nans(column_view const&, scalar const&,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> replace_nans(column_view const& input,
scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::find_and_replace_all
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> find_and_replace_all(column_view const& input_col,
column_view const& values_to_replace,
column_view const& replacement_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::normalize_nans_and_zeros
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> normalize_nans_and_zeros(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/copy_if.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/counting_iterator.h>
#include <cub/cub.cuh>
#include <cuda/atomic>
#include <algorithm>
namespace cudf {
namespace detail {
// Compute the count of elements that pass the mask within each block
template <typename Filter, int block_size>
__global__ void compute_block_counts(cudf::size_type* __restrict__ block_counts,
cudf::size_type size,
cudf::size_type per_thread,
Filter filter)
{
int tid = threadIdx.x + per_thread * block_size * blockIdx.x;
int count = 0;
for (int i = 0; i < per_thread; i++) {
bool mask_true = (tid < size) && filter(tid);
count += __syncthreads_count(mask_true);
tid += block_size;
}
if (threadIdx.x == 0) block_counts[blockIdx.x] = count;
}
// Compute the exclusive prefix sum of each thread's mask value within each block
template <int block_size>
__device__ cudf::size_type block_scan_mask(bool mask_true, cudf::size_type& block_sum)
{
int offset = 0;
using BlockScan = cub::BlockScan<cudf::size_type, block_size>;
__shared__ typename BlockScan::TempStorage temp_storage;
BlockScan(temp_storage).ExclusiveSum(mask_true, offset, block_sum);
return offset;
}
// This kernel scatters data and validity mask of a column based on the
// scan of the boolean mask. The block offsets for the scan are already computed.
// Just compute the scan of the mask in each block and add it to the block's
// output offset. This is the output index of each element. Scattering
// the valid mask is not as easy, because each thread is only responsible for
// one bit. Warp-level processing (ballot) makes this simpler.
// To make scattering efficient, we "coalesce" the block's scattered data and
// valids in shared memory, and then write from shared memory to global memory
// in a contiguous manner.
// The has_validity template parameter specializes this kernel for the
// non-nullable case for performance without writing another kernel.
//
// Note: `filter` is not run on indices larger than the input column size
template <typename T, typename Filter, int block_size, bool has_validity>
__launch_bounds__(block_size) __global__
void scatter_kernel(cudf::mutable_column_device_view output_view,
cudf::size_type* output_null_count,
cudf::column_device_view input_view,
cudf::size_type const* __restrict__ block_offsets,
cudf::size_type size,
cudf::size_type per_thread,
Filter filter)
{
T* __restrict__ output_data = output_view.data<T>();
cudf::bitmask_type* __restrict__ output_valid = output_view.null_mask();
static_assert(block_size <= 1024, "Maximum thread block size exceeded");
int tid = threadIdx.x + per_thread * block_size * blockIdx.x;
cudf::size_type block_offset = block_offsets[blockIdx.x];
// one extra warp worth in case the block is not aligned
__shared__ bool temp_valids[has_validity ? block_size + cudf::detail::warp_size : 1];
__shared__ T temp_data[block_size];
cudf::size_type warp_valid_counts{0}; // total valid sum over the `per_thread` loop below
cudf::size_type block_sum = 0; // count passing filter over the `per_thread` loop below
// Note that since the maximum gridDim.x on all supported GPUs is as big as
// cudf::size_type, this loop is sufficient to cover our maximum column size
// regardless of the value of block_size and per_thread.
for (int i = 0; i < per_thread; i++) {
bool mask_true = (tid < size) && filter(tid);
cudf::size_type tmp_block_sum = 0;
// get output location using a scan of the mask result
cudf::size_type const local_index = block_scan_mask<block_size>(mask_true, tmp_block_sum);
block_sum += tmp_block_sum;
if (has_validity) {
temp_valids[threadIdx.x] = false; // init shared memory
if (threadIdx.x < cudf::detail::warp_size) temp_valids[block_size + threadIdx.x] = false;
__syncthreads(); // wait for init
}
if (mask_true) {
temp_data[local_index] = input_view.data<T>()[tid]; // scatter data to shared
// scatter validity mask to shared memory
if (has_validity and input_view.is_valid(tid)) {
// determine aligned offset for this warp's output
cudf::size_type const aligned_offset = block_offset % cudf::detail::warp_size;
temp_valids[local_index + aligned_offset] = true;
}
}
__syncthreads(); // wait for shared data and validity mask to be complete
// Copy output data coalesced from shared to global
if (threadIdx.x < tmp_block_sum)
output_data[block_offset + threadIdx.x] = temp_data[threadIdx.x];
if (has_validity) {
// Since the valid bools are contiguous in shared memory now, we can use
// __popc to combine them into a single mask element.
// Then, most mask elements can be directly copied from shared to global
// memory. Only the first and last 32-bit mask elements of each block must
// use an atomicOr, because these are where other blocks may overlap.
constexpr int num_warps = block_size / cudf::detail::warp_size;
// account for partial blocks with non-warp-aligned offsets
int const last_index = tmp_block_sum + (block_offset % cudf::detail::warp_size) - 1;
int const last_warp = min(num_warps, last_index / cudf::detail::warp_size);
int const wid = threadIdx.x / cudf::detail::warp_size;
int const lane = threadIdx.x % cudf::detail::warp_size;
cudf::size_type tmp_warp_valid_counts{0};
if (tmp_block_sum > 0 && wid <= last_warp) {
int valid_index = (block_offset / cudf::detail::warp_size) + wid;
// compute the valid mask for this warp
uint32_t valid_warp = __ballot_sync(0xffff'ffffu, temp_valids[threadIdx.x]);
// Note the atomicOr's below assume that output_valid has been set to
// all zero before the kernel
if (lane == 0 && valid_warp != 0) {
tmp_warp_valid_counts = __popc(valid_warp);
if (wid > 0 && wid < last_warp)
output_valid[valid_index] = valid_warp;
else {
cuda::atomic_ref<cudf::bitmask_type, cuda::thread_scope_device> ref{
output_valid[valid_index]};
ref.fetch_or(valid_warp, cuda::std::memory_order_relaxed);
}
}
// if the block is full and not aligned then we have one more warp to cover
if ((wid == 0) && (last_warp == num_warps)) {
uint32_t valid_warp = __ballot_sync(0xffff'ffffu, temp_valids[block_size + threadIdx.x]);
if (lane == 0 && valid_warp != 0) {
tmp_warp_valid_counts += __popc(valid_warp);
cuda::atomic_ref<cudf::bitmask_type, cuda::thread_scope_device> ref{
output_valid[valid_index + num_warps]};
ref.fetch_or(valid_warp, cuda::std::memory_order_relaxed);
}
}
}
warp_valid_counts += tmp_warp_valid_counts;
}
block_offset += tmp_block_sum;
tid += block_size;
}
// Compute total null_count for this block and add it to global count
constexpr cudf::size_type leader_lane{0};
cudf::size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, leader_lane>(warp_valid_counts);
if (threadIdx.x == 0) { // one thread computes and adds to null count
cuda::atomic_ref<size_type, cuda::thread_scope_device> ref{*output_null_count};
ref.fetch_add(block_sum - block_valid_count, cuda::std::memory_order_relaxed);
}
}
template <typename T, typename Enable = void>
struct DeviceType {
using type = T;
};
template <typename T>
struct DeviceType<T, std::enable_if_t<cudf::is_timestamp<T>()>> {
using type = typename T::rep;
};
template <typename T>
struct DeviceType<T, std::enable_if_t<cudf::is_fixed_point<T>()>> {
using type = typename cudf::device_storage_type_t<T>;
};
// Dispatch functor which performs the scatter for fixed column types and gather for other
template <typename Filter, int block_size>
struct scatter_gather_functor {
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::size_type const& output_size,
cudf::size_type const* block_offsets,
Filter filter,
cudf::size_type per_thread,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output_column = cudf::detail::allocate_like(
input, output_size, cudf::mask_allocation_policy::RETAIN, stream, mr);
auto output = output_column->mutable_view();
bool has_valid = input.nullable();
using Type = typename DeviceType<T>::type;
auto scatter = (has_valid) ? scatter_kernel<Type, Filter, block_size, true>
: scatter_kernel<Type, Filter, block_size, false>;
cudf::detail::grid_1d grid{input.size(), block_size, per_thread};
rmm::device_scalar<cudf::size_type> null_count{0, stream};
if (output.nullable()) {
// Have to initialize the output mask to all zeros because we may update
// it with atomicOr().
CUDF_CUDA_TRY(cudaMemsetAsync(static_cast<void*>(output.null_mask()),
0,
cudf::bitmask_allocation_size_bytes(output.size()),
stream.value()));
}
auto output_device_view = cudf::mutable_column_device_view::create(output, stream);
auto input_device_view = cudf::column_device_view::create(input, stream);
scatter<<<grid.num_blocks, block_size, 0, stream.value()>>>(*output_device_view,
null_count.data(),
*input_device_view,
block_offsets,
input.size(),
per_thread,
filter);
if (has_valid) { output_column->set_null_count(null_count.value(stream)); }
return output_column;
}
template <typename T,
std::enable_if_t<!cudf::is_fixed_width<T>() and !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::size_type const& output_size,
cudf::size_type const*,
Filter filter,
cudf::size_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<cudf::size_type> indices(output_size, stream);
thrust::copy_if(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(input.size()),
indices.begin(),
filter);
auto output_table = cudf::detail::gather(cudf::table_view{{input}},
indices,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
// There will be only one column
return std::make_unique<cudf::column>(std::move(output_table->get_column(0)));
}
};
/**
* @brief Filters `input` using a Filter function object
*
* @p filter must be a functor or lambda with the following signature:
* __device__ bool operator()(cudf::size_type i);
* It will return true if element i of @p input should be copied,
* false otherwise.
*
* @tparam Filter the filter functor type
* @param[in] input The table_view to filter
* @param[in] filter A function object that takes an index and returns a bool
* @return unique_ptr<table> The table generated from filtered `input`.
*/
template <typename Filter>
std::unique_ptr<table> copy_if(table_view const& input,
Filter filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (0 == input.num_rows() || 0 == input.num_columns()) { return empty_like(input); }
constexpr int block_size = 256;
cudf::size_type per_thread =
elements_per_thread(compute_block_counts<Filter, block_size>, input.num_rows(), block_size);
cudf::detail::grid_1d grid{input.num_rows(), block_size, per_thread};
// temp storage for block counts and offsets
rmm::device_uvector<cudf::size_type> block_counts(grid.num_blocks, stream);
rmm::device_uvector<cudf::size_type> block_offsets(grid.num_blocks + 1, stream);
// 1. Find the count of elements in each block that "pass" the mask
compute_block_counts<Filter, block_size><<<grid.num_blocks, block_size, 0, stream.value()>>>(
block_counts.begin(), input.num_rows(), per_thread, filter);
// initialize just the first element of block_offsets to 0 since the InclusiveSum below
// starts at the second element.
CUDF_CUDA_TRY(cudaMemsetAsync(block_offsets.begin(), 0, sizeof(cudf::size_type), stream.value()));
// 2. Find the offset for each block's output using a scan of block counts
if (grid.num_blocks > 1) {
// Determine and allocate temporary device storage
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(nullptr,
temp_storage_bytes,
block_counts.begin(),
block_offsets.begin() + 1,
grid.num_blocks,
stream.value());
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
// Run exclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage.data(),
temp_storage_bytes,
block_counts.begin(),
block_offsets.begin() + 1,
grid.num_blocks,
stream.value());
}
// As it is InclusiveSum, last value in block_offsets will be output_size
// unless num_blocks == 1, in which case output_size is just block_counts[0]
cudf::size_type output_size{0};
CUDF_CUDA_TRY(cudaMemcpyAsync(
&output_size,
grid.num_blocks > 1 ? block_offsets.begin() + grid.num_blocks : block_counts.begin(),
sizeof(cudf::size_type),
cudaMemcpyDefault,
stream.value()));
stream.synchronize();
if (output_size == input.num_rows()) {
return std::make_unique<table>(input, stream, mr);
} else if (output_size > 0) {
std::vector<std::unique_ptr<column>> out_columns(input.num_columns());
std::transform(input.begin(), input.end(), out_columns.begin(), [&](auto col_view) {
return cudf::type_dispatcher(col_view.type(),
scatter_gather_functor<Filter, block_size>{},
col_view,
output_size,
block_offsets.begin(),
filter,
per_thread,
stream,
mr);
});
return std::make_unique<table>(std::move(out_columns));
} else {
return empty_like(input);
}
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/scan.hpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @brief Computes the exclusive scan of a column.
*
* The null values are skipped for the operation, and if an input element at `i` is null, then the
* output element at `i` will also be null.
*
* The identity value for the column type as per the aggregation type is used for the value of the
* first element in the output column.
*
* Struct columns are allowed with aggregation types Min and Max.
*
* @throws cudf::logic_error if column data_type is not an arithmetic type or struct type but the
* `agg` is not Min or Max.
*
* @param input The input column view for the scan.
* @param agg Aggregation operator applied by the scan
* @param null_handling Exclude null values when computing the result if null_policy::EXCLUDE.
* Include nulls if null_policy::INCLUDE. Any operation with a null results in
* a null.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned scalar's device memory.
* @returns Column with scan results.
*/
std::unique_ptr<column> scan_exclusive(column_view const& input,
scan_aggregation const& agg,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Computes the inclusive scan of a column.
*
* The null values are skipped for the operation, and if an input element at `i` is null, then the
* output element at `i` will also be null.
*
* String and struct columns are allowed with aggregation types Min and Max.
*
* @throws cudf::logic_error if column data_type is not an arithmetic type or string/struct types
* but the `agg` is not Min or Max.
*
* @param input The input column view for the scan.
* @param agg Aggregation operator applied by the scan
* @param null_handling Exclude null values when computing the result if null_policy::EXCLUDE.
* Include nulls if null_policy::INCLUDE. Any operation with a null results in
* a null.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned scalar's device memory.
* @returns Column with scan results.
*/
std::unique_ptr<column> scan_inclusive(column_view const& input,
scan_aggregation const& agg,
null_policy null_handling,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Generate row ranks for a column.
*
* @param order_by Input column to generate ranks for.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return rank values.
*/
std::unique_ptr<column> inclusive_rank_scan(column_view const& order_by,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Generate row dense ranks for a column.
*
* @param order_by Input column to generate ranks for.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return rank values.
*/
std::unique_ptr<column> inclusive_dense_rank_scan(column_view const& order_by,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Generate row ONE_NORMALIZED percent ranks for a column.
* Also, knowns as ANSI SQL PERCENT RANK.
* Calculated by (rank - 1) / (count - 1).
*
* @param order_by Input column to generate ranks for.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return rank values.
*/
std::unique_ptr<column> inclusive_one_normalized_percent_rank_scan(
column_view const& order_by, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/normalizing_iterator.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/type_dispatcher.hpp>
#include <type_traits>
namespace cudf {
namespace detail {
/**
* @brief The base class for the input or output normalizing iterator
*
* The base class mainly manages updating the `p_` member variable while the
* subclasses handle accessing individual elements in device memory.
*
* @tparam Derived The derived class type for the iterator
* @tparam Integer The type the iterator normalizes to
*/
template <class Derived, typename Integer>
struct alignas(16) base_normalator {
static_assert(cudf::is_index_type<Integer>());
using difference_type = std::ptrdiff_t;
using value_type = Integer;
using pointer = Integer*;
using iterator_category = std::random_access_iterator_tag;
base_normalator() = default;
base_normalator(base_normalator const&) = default;
base_normalator(base_normalator&&) = default;
base_normalator& operator=(base_normalator const&) = default;
base_normalator& operator=(base_normalator&&) = default;
/**
* @brief Prefix increment operator.
*/
CUDF_HOST_DEVICE inline Derived& operator++()
{
Derived& derived = static_cast<Derived&>(*this);
derived.p_ += width_;
return derived;
}
/**
* @brief Postfix increment operator.
*/
CUDF_HOST_DEVICE inline Derived operator++(int)
{
Derived tmp{static_cast<Derived&>(*this)};
operator++();
return tmp;
}
/**
* @brief Prefix decrement operator.
*/
CUDF_HOST_DEVICE inline Derived& operator--()
{
Derived& derived = static_cast<Derived&>(*this);
derived.p_ -= width_;
return derived;
}
/**
* @brief Postfix decrement operator.
*/
CUDF_HOST_DEVICE inline Derived operator--(int)
{
Derived tmp{static_cast<Derived&>(*this)};
operator--();
return tmp;
}
/**
* @brief Compound assignment by sum operator.
*/
CUDF_HOST_DEVICE inline Derived& operator+=(difference_type offset)
{
Derived& derived = static_cast<Derived&>(*this);
derived.p_ += offset * width_;
return derived;
}
/**
* @brief Increment by offset operator.
*/
CUDF_HOST_DEVICE inline Derived operator+(difference_type offset) const
{
auto tmp = Derived{static_cast<Derived const&>(*this)};
tmp.p_ += (offset * width_);
return tmp;
}
/**
* @brief Addition assignment operator.
*/
CUDF_HOST_DEVICE inline friend Derived operator+(difference_type offset, Derived const& rhs)
{
Derived tmp{rhs};
tmp.p_ += (offset * rhs.width_);
return tmp;
}
/**
* @brief Compound assignment by difference operator.
*/
CUDF_HOST_DEVICE inline Derived& operator-=(difference_type offset)
{
Derived& derived = static_cast<Derived&>(*this);
derived.p_ -= offset * width_;
return derived;
}
/**
* @brief Decrement by offset operator.
*/
CUDF_HOST_DEVICE inline Derived operator-(difference_type offset) const
{
auto tmp = Derived{static_cast<Derived const&>(*this)};
tmp.p_ -= (offset * width_);
return tmp;
}
/**
* @brief Subtraction assignment operator.
*/
CUDF_HOST_DEVICE inline friend Derived operator-(difference_type offset, Derived const& rhs)
{
Derived tmp{rhs};
tmp.p_ -= (offset * rhs.width_);
return tmp;
}
/**
* @brief Compute offset from iterator difference operator.
*/
CUDF_HOST_DEVICE inline difference_type operator-(Derived const& rhs) const
{
return (static_cast<Derived const&>(*this).p_ - rhs.p_) / width_;
}
/**
* @brief Equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator==(Derived const& rhs) const
{
return rhs.p_ == static_cast<Derived const&>(*this).p_;
}
/**
* @brief Not equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator!=(Derived const& rhs) const
{
return rhs.p_ != static_cast<Derived const&>(*this).p_;
}
/**
* @brief Less than operator.
*/
CUDF_HOST_DEVICE inline bool operator<(Derived const& rhs) const
{
return static_cast<Derived const&>(*this).p_ < rhs.p_;
}
/**
* @brief Greater than operator.
*/
CUDF_HOST_DEVICE inline bool operator>(Derived const& rhs) const
{
return static_cast<Derived const&>(*this).p_ > rhs.p_;
}
/**
* @brief Less than or equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator<=(Derived const& rhs) const
{
return static_cast<Derived const&>(*this).p_ <= rhs.p_;
}
/**
* @brief Greater than or equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator>=(Derived const& rhs) const
{
return static_cast<Derived const&>(*this).p_ >= rhs.p_;
}
private:
struct integer_sizeof_fn {
template <typename T, CUDF_ENABLE_IF(not cudf::is_fixed_width<T>())>
CUDF_HOST_DEVICE constexpr std::size_t operator()() const
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("only integral types are supported");
#else
CUDF_UNREACHABLE("only integral types are supported");
#endif
}
template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_width<T>())>
CUDF_HOST_DEVICE constexpr std::size_t operator()() const noexcept
{
return sizeof(T);
}
};
protected:
/**
* @brief Constructor assigns width and type member variables for base class.
*/
explicit CUDF_HOST_DEVICE base_normalator(data_type dtype) : dtype_(dtype)
{
width_ = static_cast<int32_t>(type_dispatcher(dtype, integer_sizeof_fn{}));
}
/**
* @brief Constructor assigns width and type member variables for base class.
*/
explicit CUDF_HOST_DEVICE base_normalator(data_type dtype, int32_t width)
: width_(width), dtype_(dtype)
{
}
int32_t width_; /// integer type width = 1,2,4, or 8
data_type dtype_; /// for type-dispatcher calls
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/timezone.cuh
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/table/table_device_view.cuh>
#include <cudf/timezone.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
namespace cudf::detail {
/**
* @brief Returns the UT offset for a given date and given timezone table.
*
* @param transition_times Transition times; trailing `solar_cycle_entry_count` entries are used for
* all times beyond the one covered by the TZif file
* @param offsets Time offsets in specific intervals; trailing `solar_cycle_entry_count` entries are
* used for all times beyond the one covered by the TZif file
* @param ts ORC timestamp
*
* @return offset from UT, in seconds
*/
inline __device__ duration_s get_ut_offset(table_device_view tz_table, timestamp_s ts)
{
if (tz_table.num_rows() == 0) { return duration_s{0}; }
cudf::device_span<timestamp_s const> transition_times(tz_table.column(0).head<timestamp_s>(),
static_cast<size_t>(tz_table.num_rows()));
auto const ts_ttime_it = [&]() {
auto last_less_equal = [](auto begin, auto end, auto value) {
auto const first_larger = thrust::upper_bound(thrust::seq, begin, end, value);
// Return start of the range if all elements are larger than the value
if (first_larger == begin) return begin;
// Element before the first larger element is the last one less or equal
return first_larger - 1;
};
auto const file_entry_end =
transition_times.begin() + (transition_times.size() - solar_cycle_entry_count);
if (ts <= *(file_entry_end - 1)) {
// Search the file entries if the timestamp is in range
return last_less_equal(transition_times.begin(), file_entry_end, ts);
} else {
auto project_to_cycle = [](timestamp_s ts) {
// Years divisible by four are leap years
// Exceptions are years divisible by 100, but not divisible by 400
static constexpr int32_t num_leap_years_in_cycle =
solar_cycle_years / 4 - (solar_cycle_years / 100 - solar_cycle_years / 400);
static constexpr duration_s cycle_s = cuda::std::chrono::duration_cast<duration_s>(
duration_D{365 * solar_cycle_years + num_leap_years_in_cycle});
return timestamp_s{(ts.time_since_epoch() + cycle_s) % cycle_s};
};
// Search the 400-year cycle if outside of the file entries range
return last_less_equal(file_entry_end, transition_times.end(), project_to_cycle(ts));
}
}();
return tz_table.column(1).element<duration_s>(ts_ttime_it - transition_times.begin());
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/datetime.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <memory>
namespace cudf {
namespace datetime {
namespace detail {
/**
* @copydoc cudf::extract_year(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_year(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_month(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_month(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_day(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_day(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_weekday(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_weekday(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_hour(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_hour(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_minute(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_minute(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_second(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_second(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_millisecond_fraction(cudf::column_view const&,
* rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_millisecond_fraction(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_microsecond_fraction(cudf::column_view const&,
* rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_microsecond_fraction(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::extract_nanosecond_fraction(cudf::column_view const&,
* rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> extract_nanosecond_fraction(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::last_day_of_month(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> last_day_of_month(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::day_of_year(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> day_of_year(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::add_calendrical_months(cudf::column_view const&, cudf::column_view const&,
* rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamps,
cudf::column_view const& months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::add_calendrical_months(cudf::column_view const&, cudf::scalar const&,
* rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> add_calendrical_months(cudf::column_view const& timestamps,
cudf::scalar const& months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::is_leap_year(cudf::column_view const&, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<cudf::column> is_leap_year(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
std::unique_ptr<cudf::column> extract_quarter(cudf::column_view const& column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace datetime
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/repeat.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::repeat(table_view const&, column_view const&, bool,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count,
bool check_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::repeat(table_view const&, size_type,
* rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> repeat(table_view const& input_table,
size_type count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/interop.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// We disable warning 611 because the `arrow::TableBatchReader` only partially
// override the `ReadNext` method of `arrow::RecordBatchReader::ReadNext`
// triggering warning 611-D from nvcc.
#ifdef __CUDACC__
#pragma nv_diag_suppress 611
#pragma nv_diag_suppress 2810
#endif
#include <arrow/api.h>
#ifdef __CUDACC__
#pragma nv_diag_default 611
#pragma nv_diag_default 2810
#endif
#include <cudf/interop.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <string>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc cudf::from_dlpack
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<table> from_dlpack(DLManagedTensor const* managed_tensor,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::to_dlpack
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
DLManagedTensor* to_dlpack(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
// Creating arrow as per given type_id and buffer arguments
template <typename... Ts>
std::shared_ptr<arrow::Array> to_arrow_array(cudf::type_id id, Ts&&... args)
{
switch (id) {
case type_id::BOOL8: return std::make_shared<arrow::BooleanArray>(std::forward<Ts>(args)...);
case type_id::INT8: return std::make_shared<arrow::Int8Array>(std::forward<Ts>(args)...);
case type_id::INT16: return std::make_shared<arrow::Int16Array>(std::forward<Ts>(args)...);
case type_id::INT32: return std::make_shared<arrow::Int32Array>(std::forward<Ts>(args)...);
case type_id::INT64: return std::make_shared<arrow::Int64Array>(std::forward<Ts>(args)...);
case type_id::UINT8: return std::make_shared<arrow::UInt8Array>(std::forward<Ts>(args)...);
case type_id::UINT16: return std::make_shared<arrow::UInt16Array>(std::forward<Ts>(args)...);
case type_id::UINT32: return std::make_shared<arrow::UInt32Array>(std::forward<Ts>(args)...);
case type_id::UINT64: return std::make_shared<arrow::UInt64Array>(std::forward<Ts>(args)...);
case type_id::FLOAT32: return std::make_shared<arrow::FloatArray>(std::forward<Ts>(args)...);
case type_id::FLOAT64: return std::make_shared<arrow::DoubleArray>(std::forward<Ts>(args)...);
case type_id::TIMESTAMP_DAYS:
return std::make_shared<arrow::Date32Array>(std::make_shared<arrow::Date32Type>(),
std::forward<Ts>(args)...);
case type_id::TIMESTAMP_SECONDS:
return std::make_shared<arrow::TimestampArray>(arrow::timestamp(arrow::TimeUnit::SECOND),
std::forward<Ts>(args)...);
case type_id::TIMESTAMP_MILLISECONDS:
return std::make_shared<arrow::TimestampArray>(arrow::timestamp(arrow::TimeUnit::MILLI),
std::forward<Ts>(args)...);
case type_id::TIMESTAMP_MICROSECONDS:
return std::make_shared<arrow::TimestampArray>(arrow::timestamp(arrow::TimeUnit::MICRO),
std::forward<Ts>(args)...);
case type_id::TIMESTAMP_NANOSECONDS:
return std::make_shared<arrow::TimestampArray>(arrow::timestamp(arrow::TimeUnit::NANO),
std::forward<Ts>(args)...);
case type_id::DURATION_SECONDS:
return std::make_shared<arrow::DurationArray>(arrow::duration(arrow::TimeUnit::SECOND),
std::forward<Ts>(args)...);
case type_id::DURATION_MILLISECONDS:
return std::make_shared<arrow::DurationArray>(arrow::duration(arrow::TimeUnit::MILLI),
std::forward<Ts>(args)...);
case type_id::DURATION_MICROSECONDS:
return std::make_shared<arrow::DurationArray>(arrow::duration(arrow::TimeUnit::MICRO),
std::forward<Ts>(args)...);
case type_id::DURATION_NANOSECONDS:
return std::make_shared<arrow::DurationArray>(arrow::duration(arrow::TimeUnit::NANO),
std::forward<Ts>(args)...);
default: CUDF_FAIL("Unsupported type_id conversion to arrow");
}
}
/**
* @brief Invokes an `operator()` template with the type instantiation based on
* the specified `arrow::DataType`'s `id()`.
*
* This function is analogous to libcudf's type_dispatcher, but instead applies
* to Arrow functions. Its primary use case is to leverage Arrow's
* metaprogramming facilities like arrow::TypeTraits that require translating
* the runtime dtype information into compile-time types.
*/
template <typename Functor, typename... Ts>
constexpr decltype(auto) arrow_type_dispatcher(arrow::DataType const& dtype,
Functor f,
Ts&&... args)
{
switch (dtype.id()) {
case arrow::Type::INT8:
return f.template operator()<arrow::Int8Type>(std::forward<Ts>(args)...);
case arrow::Type::INT16:
return f.template operator()<arrow::Int16Type>(std::forward<Ts>(args)...);
case arrow::Type::INT32:
return f.template operator()<arrow::Int32Type>(std::forward<Ts>(args)...);
case arrow::Type::INT64:
return f.template operator()<arrow::Int64Type>(std::forward<Ts>(args)...);
case arrow::Type::UINT8:
return f.template operator()<arrow::UInt8Type>(std::forward<Ts>(args)...);
case arrow::Type::UINT16:
return f.template operator()<arrow::UInt16Type>(std::forward<Ts>(args)...);
case arrow::Type::UINT32:
return f.template operator()<arrow::UInt32Type>(std::forward<Ts>(args)...);
case arrow::Type::UINT64:
return f.template operator()<arrow::UInt64Type>(std::forward<Ts>(args)...);
case arrow::Type::FLOAT:
return f.template operator()<arrow::FloatType>(std::forward<Ts>(args)...);
case arrow::Type::DOUBLE:
return f.template operator()<arrow::DoubleType>(std::forward<Ts>(args)...);
case arrow::Type::BOOL:
return f.template operator()<arrow::BooleanType>(std::forward<Ts>(args)...);
case arrow::Type::TIMESTAMP:
return f.template operator()<arrow::TimestampType>(std::forward<Ts>(args)...);
case arrow::Type::DURATION:
return f.template operator()<arrow::DurationType>(std::forward<Ts>(args)...);
case arrow::Type::STRING:
return f.template operator()<arrow::StringType>(std::forward<Ts>(args)...);
case arrow::Type::LIST:
return f.template operator()<arrow::ListType>(std::forward<Ts>(args)...);
case arrow::Type::DECIMAL128:
return f.template operator()<arrow::Decimal128Type>(std::forward<Ts>(args)...);
case arrow::Type::STRUCT:
return f.template operator()<arrow::StructType>(std::forward<Ts>(args)...);
default: {
CUDF_FAIL("Invalid type.");
}
}
}
// Converting arrow type to cudf type
data_type arrow_to_cudf_type(arrow::DataType const& arrow_type);
/**
* @copydoc cudf::to_arrow(table_view input, std::vector<column_metadata> const& metadata,
* rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr)
*/
std::shared_ptr<arrow::Table> to_arrow(table_view input,
std::vector<column_metadata> const& metadata,
rmm::cuda_stream_view stream,
arrow::MemoryPool* ar_mr);
/**
* @copydoc cudf::to_arrow(cudf::scalar const& input, column_metadata const& metadata,
* rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr)
*/
std::shared_ptr<arrow::Scalar> to_arrow(cudf::scalar const& input,
column_metadata const& metadata,
rmm::cuda_stream_view stream,
arrow::MemoryPool* ar_mr);
/**
* @copydoc cudf::from_arrow(arrow::Table const& input_table, rmm::cuda_stream_view stream,
* rmm::mr::device_memory_resource* mr)
*/
std::unique_ptr<table> from_arrow(arrow::Table const& input_table,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @copydoc cudf::from_arrow(arrow::Scalar const& input, rmm::cuda_stream_view stream,
* rmm::mr::device_memory_resource* mr)
*/
std::unique_ptr<cudf::scalar> from_arrow(arrow::Scalar const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Return a maximum precision for a given type.
*
* @tparam T the type to get the maximum precision for
*/
template <typename T>
constexpr std::size_t max_precision()
{
auto constexpr num_bits = sizeof(T) * 8;
return std::floor(num_bits * std::log(2) / std::log(10));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/hash_reduce_by_row.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/hashing/detail/hash_allocator.cuh>
#include <cudf/hashing/detail/helper_functions.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/uninitialized_fill.h>
#include <cuco/static_map.cuh>
namespace cudf::detail {
using hash_table_allocator_type = rmm::mr::stream_allocator_adaptor<default_allocator<char>>;
using hash_map_type =
cuco::static_map<size_type, size_type, cuda::thread_scope_device, hash_table_allocator_type>;
/**
* @brief The base struct for customized reduction functor to perform reduce-by-key with keys are
* rows that compared equal.
*
* TODO: We need to switch to use `static_reduction_map` when it is ready
* (https://github.com/NVIDIA/cuCollections/pull/98).
*/
template <typename MapView, typename KeyHasher, typename KeyEqual, typename OutputType>
struct reduce_by_row_fn_base {
protected:
MapView const d_map;
KeyHasher const d_hasher;
KeyEqual const d_equal;
OutputType* const d_output;
reduce_by_row_fn_base(MapView const& d_map,
KeyHasher const& d_hasher,
KeyEqual const& d_equal,
OutputType* const d_output)
: d_map{d_map}, d_hasher{d_hasher}, d_equal{d_equal}, d_output{d_output}
{
}
/**
* @brief Return a pointer to the output array at the given index.
*
* @param idx The access index
* @return A pointer to the given index in the output array
*/
__device__ OutputType* get_output_ptr(size_type const idx) const
{
auto const iter = d_map.find(idx, d_hasher, d_equal);
if (iter != d_map.end()) {
// Only one (undetermined) index value of the duplicate rows could be inserted into the map.
// As such, looking up for all indices of duplicate rows always returns the same value.
auto const inserted_idx = iter->second.load(cuda::std::memory_order_relaxed);
// All duplicate rows will have concurrent access to this same output slot.
return &d_output[inserted_idx];
} else {
// All input `idx` values have been inserted into the map before.
// Thus, searching for an `idx` key resulting in the `end()` iterator only happens if
// `d_equal(idx, idx) == false`.
// Such situations are due to comparing nulls or NaNs which are considered as always unequal.
// In those cases, all rows containing nulls or NaNs are distinct. Just return their direct
// output slot.
return &d_output[idx];
}
}
};
/**
* @brief Perform a reduction on groups of rows that are compared equal.
*
* This is essentially a reduce-by-key operation with keys are non-contiguous rows and are compared
* equal. A hash table is used to find groups of equal rows.
*
* At the beginning of the operation, the entire output array is filled with a value given by
* the `init` parameter. Then, the reduction result for each row group is written into the output
* array at the index of an unspecified row in the group.
*
* @tparam ReduceFuncBuilder The builder class that must have a `build()` method returning a
* reduction functor derived from `reduce_by_row_fn_base`
* @tparam OutputType Type of the reduction results
* @param map The auxiliary map to perform reduction
* @param preprocessed_input The preprocessed of the input rows for computing row hashing and row
* comparisons
* @param num_rows The number of all input rows
* @param has_nulls Indicate whether the input rows has any nulls at any nested levels
* @param has_nested_columns Indicates whether the input table has any nested columns
* @param nulls_equal Flag to specify whether null elements should be considered as equal
* @param nans_equal Flag to specify whether NaN values in floating point column should be
* considered equal.
* @param init The initial value for reduction of each row group
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned vector
* @return A device_uvector containing the reduction results
*/
template <typename ReduceFuncBuilder, typename OutputType>
rmm::device_uvector<OutputType> hash_reduce_by_row(
hash_map_type const& map,
std::shared_ptr<cudf::experimental::row::equality::preprocessed_table> const preprocessed_input,
size_type num_rows,
cudf::nullate::DYNAMIC has_nulls,
bool has_nested_columns,
null_equality nulls_equal,
nan_equality nans_equal,
ReduceFuncBuilder func_builder,
OutputType init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const map_dview = map.get_device_view();
auto const row_hasher = cudf::experimental::row::hash::row_hasher(preprocessed_input);
auto const key_hasher = row_hasher.device_hasher(has_nulls);
auto const row_comp = cudf::experimental::row::equality::self_comparator(preprocessed_input);
auto reduction_results = rmm::device_uvector<OutputType>(num_rows, stream, mr);
thrust::uninitialized_fill(
rmm::exec_policy(stream), reduction_results.begin(), reduction_results.end(), init);
auto const reduce_by_row = [&](auto const value_comp) {
if (has_nested_columns) {
auto const key_equal = row_comp.equal_to<true>(has_nulls, nulls_equal, value_comp);
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_rows),
func_builder.build(map_dview, key_hasher, key_equal, reduction_results.begin()));
} else {
auto const key_equal = row_comp.equal_to<false>(has_nulls, nulls_equal, value_comp);
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_rows),
func_builder.build(map_dview, key_hasher, key_equal, reduction_results.begin()));
}
};
if (nans_equal == nan_equality::ALL_EQUAL) {
using nan_equal_comparator =
cudf::experimental::row::equality::nan_equal_physical_equality_comparator;
reduce_by_row(nan_equal_comparator{});
} else {
using nan_unequal_comparator = cudf::experimental::row::equality::physical_equality_comparator;
reduce_by_row(nan_unequal_comparator{});
}
return reduction_results;
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/labeling/label_segments.cuh
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/distance.h>
#include <thrust/for_each.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/uninitialized_fill.h>
namespace cudf::detail {
/**
* @brief Fill label values for segments defined by a given offsets array.
*
* Given a pair of iterators accessing to an offset array, generate label values for segments
* defined by the offset values. The output will be an array containing consecutive groups of
* identical labels, the number of elements in each group `i` is defined by
* `offsets[i+1] - offsets[i]`.
*
* The labels always start from `0` regardless of the offset values.
* In case there are empty segments, their corresponding label values will be skipped in the output.
*
* Note that the caller is responsible to make sure the output range have the correct size, which is
* the total segment sizes (i.e., `size = *(offsets_end - 1) - *offsets_begin`). Otherwise, the
* result is undefined.
*
* @code{.pseudo}
* Examples:
*
* offsets = [ 0, 4, 6, 6, 6, 10 ]
* output = [ 0, 0, 0, 0, 1, 1, 4, 4, 4, 4 ]
*
* offsets = [ 5, 10, 12 ]
* output = [ 0, 0, 0, 0, 0, 1, 1 ]
* @endcode
*
* @param offsets_begin The beginning of the offsets that define segments.
* @param offsets_end The end of the offsets that define segments.
* @param label_begin The beginning of the output label range.
* @param label_end The end of the output label range.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename InputIterator, typename OutputIterator>
void label_segments(InputIterator offsets_begin,
InputIterator offsets_end,
OutputIterator label_begin,
OutputIterator label_end,
rmm::cuda_stream_view stream)
{
auto const num_labels = thrust::distance(label_begin, label_end);
// If the output array is empty, that means we have all empty segments.
// In such cases, we must terminate immediately. Otherwise, the `for_each` loop below may try to
// access memory of the output array, resulting in "illegal memory access" error.
if (num_labels == 0) { return; }
// When the output array is not empty, always fill it with `0` value first.
using OutputType = typename thrust::iterator_value<OutputIterator>::type;
thrust::uninitialized_fill(rmm::exec_policy(stream), label_begin, label_end, OutputType{0});
// If the offsets array has no more than 2 offset values, there will be at max 1 segment.
// In such cases, the output will just be an array of all `0` values (which we already filled).
// We should terminate from here, otherwise the `inclusive_scan` call below still does its entire
// computation. That is unnecessary and may be expensive if we have the input offsets defining a
// very large segment.
if (thrust::distance(offsets_begin, offsets_end) <= 2) { return; }
thrust::for_each(
rmm::exec_policy(stream),
offsets_begin + 1, // exclude the first offset value
offsets_end - 1, // exclude the last offset value
[num_labels = static_cast<typename thrust::iterator_value<InputIterator>::type>(num_labels),
offsets = offsets_begin,
output = label_begin] __device__(auto const idx) {
// Zero-normalized offsets.
auto const dst_idx = idx - (*offsets);
// Scatter value `1` to the index at (idx - offsets[0]).
// Note that we need to check for out of bound, since the offset values may be invalid due to
// empty segments at the end.
// In case we have repeated offsets (i.e., we have empty segments), this `atomicAdd` call will
// make sure the label values corresponding to these empty segments will be skipped in the
// output.
if (dst_idx < num_labels) { atomicAdd(&output[dst_idx], OutputType{1}); }
});
thrust::inclusive_scan(rmm::exec_policy(stream), label_begin, label_end, label_begin);
}
/**
* @brief Generate segment offsets from groups of identical label values.
*
* Given a pair of iterators accessing to an array containing groups of identical label values,
* generate offsets for segments defined by these label.
*
* Empty segments are also taken into account. If the input label values are discontinuous, the
* segments corresponding to the missing labels will be inferred as empty segments and their offsets
* will also be generated.
*
* Note that the caller is responsible to make sure the output range for offsets have the correct
* size, which is the maximum label value plus two (i.e., `size = *(labels_end - 1) + 2`).
* Otherwise, the result is undefined.
*
* @code{.pseudo}
* Examples:
*
* labels = [ 0, 0, 0, 0, 1, 1, 4, 4, 4, 4 ]
* output = [ 0, 4, 6, 6, 6, 10 ]
*
* labels = [ 0, 0, 0, 0, 0, 1, 1 ]
* output = [ 0, 5, 7 ]
* @endcode
*
* @param labels_begin The beginning of the labels that define segments.
* @param labels_end The end of the labels that define segments.
* @param offsets_begin The beginning of the output offset range.
* @param offsets_end The end of the output offset range.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename InputIterator, typename OutputIterator>
void labels_to_offsets(InputIterator labels_begin,
InputIterator labels_end,
OutputIterator offsets_begin,
OutputIterator offsets_end,
rmm::cuda_stream_view stream)
{
// Always fill the entire output array with `0` value regardless of the input.
using OutputType = typename thrust::iterator_value<OutputIterator>::type;
thrust::uninitialized_fill(rmm::exec_policy(stream), offsets_begin, offsets_end, OutputType{0});
// If there is not any label value, we will have zero segment or all empty segments. We should
// terminate from here because:
// - If we have zero segment, the output array is empty thus `num_segments` computed below is
// wrong and may cascade to undefined behavior if we continue.
// - If we have all empty segments, the output offset values will be all `0`, which we already
// filled above. If we continue, the `exclusive_scan` call below still does its entire
// computation. That is unnecessary and may be expensive if we have the input labels defining
// a very large number of segments.
if (thrust::distance(labels_begin, labels_end) == 0) { return; }
auto const num_segments = thrust::distance(offsets_begin, offsets_end) - 1;
//================================================================================
// Let's consider an example: Given input labels = [ 0, 0, 0, 0, 1, 1, 4, 4, 4, 4 ].
// This stores the unique label values.
// Given the example above, we will have this array containing [0, 1, 4].
auto list_indices = rmm::device_uvector<OutputType>(num_segments, stream);
// Stores the non-zero segment sizes.
// Given the example above, we will have this array containing [4, 2, 4].
auto list_sizes = rmm::device_uvector<OutputType>(num_segments, stream);
// Count the numbers of labels in the each segment.
auto const end = thrust::reduce_by_key(rmm::exec_policy(stream),
labels_begin, // keys
labels_end, // keys
thrust::make_constant_iterator<OutputType>(1),
list_indices.begin(), // output unique label values
list_sizes.begin()); // count for each label
auto const num_non_empty_segments = thrust::distance(list_indices.begin(), end.first);
// Scatter segment sizes into the end position of their corresponding segment indices.
// Given the example above, we scatter [4, 2, 4] by the scatter map [0, 1, 4], resulting
// output = [4, 2, 0, 0, 4, 0].
thrust::scatter(rmm::exec_policy(stream),
list_sizes.begin(),
list_sizes.begin() + num_non_empty_segments,
list_indices.begin(),
offsets_begin);
// Generate offsets from sizes.
// Given the example above, the final output is [0, 4, 6, 6, 6, 10].
thrust::exclusive_scan(rmm::exec_policy(stream), offsets_begin, offsets_end, offsets_begin);
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/replace/nulls.cuh
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <thrust/functional.h>
#include <thrust/tuple.h>
namespace cudf {
namespace detail {
using idx_valid_pair_t = thrust::tuple<cudf::size_type, bool>;
/**
* @brief Functor used by `replace_nulls(replace_policy)` to determine the index to gather from in
* the result column.
*
* Binary functor passed to `inclusive_scan` or `inclusive_scan_by_key`. Arguments are a tuple of
* index and validity of a row. Returns a tuple of current index and a discarded boolean if current
* row is valid, otherwise a tuple of the nearest non-null row index and a discarded boolean.
*/
struct replace_policy_functor {
__device__ idx_valid_pair_t operator()(idx_valid_pair_t const& lhs, idx_valid_pair_t const& rhs)
{
return thrust::get<1>(rhs) ? rhs : lhs;
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/nvtx/ranges.hpp
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "nvtx3.hpp"
namespace cudf {
/**
* @brief Tag type for libcudf's NVTX domain.
*/
struct libcudf_domain {
static constexpr char const* name{"libcudf"}; ///< Name of the libcudf domain
};
/**
* @brief Alias for an NVTX range in the libcudf domain.
*/
using thread_range = ::nvtx3::domain_thread_range<libcudf_domain>;
} // namespace cudf
/**
* @brief Convenience macro for generating an NVTX range in the `libcudf` domain
* from the lifetime of a function.
*
* Uses the name of the immediately enclosing function returned by `__func__` to
* name the range.
*
* Example:
* ```
* void some_function(){
* CUDF_FUNC_RANGE();
* ...
* }
* ```
*/
#define CUDF_FUNC_RANGE() NVTX3_FUNC_RANGE_IN(cudf::libcudf_domain)
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/nvtx/nvtx3.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#if defined(NVTX3_MINOR_VERSION) and NVTX3_MINOR_VERSION < 0
#error \
"Trying to #include NVTX version 3 in a source file where an older NVTX version has already been included. If you are not directly using NVTX (the NVIDIA Tools Extension library), you are getting this error because libraries you are using have included different versions of NVTX. Suggested solutions are: (1) reorder #includes so the newest NVTX version is included first, (2) avoid using the conflicting libraries in the same .c/.cpp file, or (3) update the library using the older NVTX version to use the newer version instead."
#endif
/**
* @brief Semantic minor version number.
*
* Major version number is hardcoded into the "nvtx3" namespace/prefix.
*
* If this value is incremented, the above version include guard needs to be
* updated.
*/
#define NVTX3_MINOR_VERSION 0
#include <nvtx3/nvToolsExt.h>
#include <string>
/**
* @file nvtx3.hpp
*
* @brief Provides C++ constructs making the NVTX library safer and easier to
* use with zero overhead.
*/
/**
* \mainpage
* \tableofcontents
*
* \section QUICK_START Quick Start
*
* To add NVTX ranges to your code, use the `nvtx3::thread_range` RAII object. A
* range begins when the object is created, and ends when the object is
* destroyed.
*
* \code{.cpp}
* #include "nvtx3.hpp"
* void some_function(){
* // Begins a NVTX range with the message "some_function"
* // The range ends when some_function() returns and `r` is destroyed
* nvtx3::thread_range r{"some_function"};
*
* for(int i = 0; i < 6; ++i){
* nvtx3::thread_range loop{"loop range"};
* std::this_thread::sleep_for(std::chrono::seconds{1});
* }
* } // Range ends when `r` is destroyed
* \endcode
*
* The example code above generates the following timeline view in Nsight
* Systems:
*
* \image html
* https://raw.githubusercontent.com/jrhemstad/nvtx_wrappers/master/docs/example_range.png
*
* Alternatively, use the \ref MACROS like `NVTX3_FUNC_RANGE()` to add
* ranges to your code that automatically use the name of the enclosing function
* as the range's message.
*
* \code{.cpp}
* #include "nvtx3.hpp"
* void some_function(){
* // Creates a range with a message "some_function" that ends when the
* enclosing
* // function returns
* NVTX3_FUNC_RANGE();
* ...
* }
* \endcode
*
*
* \section Overview
*
* The NVTX library provides a set of functions for users to annotate their code
* to aid in performance profiling and optimization. These annotations provide
* information to tools like Nsight Systems to improve visualization of
* application timelines.
*
* \ref RANGES are one of the most commonly used NVTX constructs for annotating
* a span of time. For example, imagine a user wanted to see every time a
* function, `my_function`, is called and how long it takes to execute. This can
* be accomplished with an NVTX range created on the entry to the function and
* terminated on return from `my_function` using the push/pop C APIs:
*
* ```
* void my_function(...){
* nvtxRangePushA("my_function"); // Begins NVTX range
* // do work
* nvtxRangePop(); // Ends NVTX range
* }
* ```
*
* One of the challenges with using the NVTX C API is that it requires manually
* terminating the end of the range with `nvtxRangePop`. This can be challenging
* if `my_function()` has multiple returns or can throw exceptions as it
* requires calling `nvtxRangePop()` before all possible return points.
*
* NVTX++ solves this inconvenience through the "RAII" technique by providing a
* `nvtx3::thread_range` class that begins a range at construction and ends the
* range on destruction. The above example then becomes:
*
* ```
* void my_function(...){
* nvtx3::thread_range r{"my_function"}; // Begins NVTX range
* // do work
* } // Range ends on exit from `my_function` when `r` is destroyed
* ```
*
* The range object `r` is deterministically destroyed whenever `my_function`
* returns---ending the NVTX range without manual intervention. For more
* information, see \ref RANGES and `nvtx3::domain_thread_range`.
*
* Another inconvenience of the NVTX C APIs are the several constructs where the
* user is expected to initialize an object at the beginning of an application
* and reuse that object throughout the lifetime of the application. For example
* Domains, Categories, and Registered messages.
*
* Example:
* ```
* nvtxDomainHandle_t D = nvtxDomainCreateA("my domain");
* // Reuse `D` throughout the rest of the application
* ```
*
* This can be problematic if the user application or library does not have an
* explicit initialization function called before all other functions to
* ensure that these long-lived objects are initialized before being used.
*
* NVTX++ makes use of the "construct on first use" technique to alleviate this
* inconvenience. In short, a function local static object is constructed upon
* the first invocation of a function and returns a reference to that object on
* all future invocations. See the documentation for
* `nvtx3::registered_message`, `nvtx3::domain`, `nvtx3::named_category`, and
* https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use for more
* information.
*
* Using construct on first use, the above example becomes:
* ```
* struct my_domain{ static constexpr char const* name{"my domain"}; };
*
* // The first invocation of `domain::get` for the type `my_domain` will
* // construct a `nvtx3::domain` object and return a reference to it. Future
* // invocations simply return a reference.
* nvtx3::domain const& D = nvtx3::domain::get<my_domain>();
* ```
* For more information about NVTX and how it can be used, see
* https://docs.nvidia.com/cuda/profiler-users-guide/index.html#nvtx and
* https://devblogs.nvidia.com/cuda-pro-tip-generate-custom-application-profile-timelines-nvtx/
* for more information.
*
* \section RANGES Ranges
*
* Ranges are used to describe a span of time during the execution of an
* application. Common examples are using ranges to annotate the time it takes
* to execute a function or an iteration of a loop.
*
* NVTX++ uses RAII to automate the generation of ranges that are tied to the
* lifetime of objects. Similar to `std::lock_guard` in the C++ Standard
* Template Library.
*
* \subsection THREAD_RANGE Thread Range
*
* `nvtx3::domain_thread_range` is a class that begins a range upon construction
* and ends the range at destruction. This is one of the most commonly used
* constructs in NVTX++ and is useful for annotating spans of time on a
* particular thread. These ranges can be nested to arbitrary depths.
*
* `nvtx3::thread_range` is an alias for a `nvtx3::domain_thread_range` in the
* global NVTX domain. For more information about Domains, see \ref DOMAINS.
*
* Various attributes of a range can be configured constructing a
* `nvtx3::domain_thread_range` with a `nvtx3::event_attributes` object. For
* more information, see \ref ATTRIBUTES.
*
* Example:
*
* \code{.cpp}
* void some_function(){
* // Creates a range for the duration of `some_function`
* nvtx3::thread_range r{};
*
* while(true){
* // Creates a range for every loop iteration
* // `loop_range` is nested inside `r`
* nvtx3::thread_range loop_range{};
* }
* }
* \endcode
*
* \subsection PROCESS_RANGE Process Range
*
* `nvtx3::domain_process_range` is identical to `nvtx3::domain_thread_range`
* with the exception that a `domain_process_range` can be created and destroyed
* on different threads. This is useful to annotate spans of time that can
* bridge multiple threads.
*
* `nvtx3::domain_thread_range`s should be preferred unless one needs the
* ability to begin and end a range on different threads.
*
* \section MARKS Marks
*
* `nvtx3::mark` allows annotating an instantaneous event in an application's
* timeline. For example, indicating when a mutex is locked or unlocked.
*
* \code{.cpp}
* std::mutex global_lock;
* void lock_mutex(){
* global_lock.lock();
* // Marks an event immediately after the mutex is locked
* nvtx3::mark<my_domain>("lock_mutex");
* }
* \endcode
*
* \section DOMAINS Domains
*
* Similar to C++ namespaces, Domains allow for scoping NVTX events. By default,
* all NVTX events belong to the "global" domain. Libraries and applications
* should scope their events to use a custom domain to differentiate where the
* events originate from.
*
* It is common for a library or application to have only a single domain and
* for the name of that domain to be known at compile time. Therefore, Domains
* in NVTX++ are represented by _tag types_.
*
* For example, to define a custom domain, simply define a new concrete type
* (a `class` or `struct`) with a `static` member called `name` that contains
* the desired name of the domain.
*
* ```
* struct my_domain{ static constexpr char const* name{"my domain"}; };
* ```
*
* For any NVTX++ construct that can be scoped to a domain, the type `my_domain`
* can be passed as an explicit template argument to scope it to the custom
* domain.
*
* The tag type `nvtx3::domain::global` represents the global NVTX domain.
*
* \code{.cpp}
* // By default, `domain_thread_range` belongs to the global domain
* nvtx3::domain_thread_range<> r0{};
*
* // Alias for a `domain_thread_range` in the global domain
* nvtx3::thread_range r1{};
*
* // `r` belongs to the custom domain
* nvtx3::domain_thread_range<my_domain> r{};
* \endcode
*
* When using a custom domain, it is recommended to define type aliases for NVTX
* constructs in the custom domain.
* ```
* using my_thread_range = nvtx3::domain_thread_range<my_domain>;
* using my_registered_message = nvtx3::registered_message<my_domain>;
* using my_named_category = nvtx3::named_category<my_domain>;
* ```
*
* See `nvtx3::domain` for more information.
*
* \section ATTRIBUTES Event Attributes
*
* NVTX events can be customized with various attributes to provide additional
* information (such as a custom message) or to control visualization of the
* event (such as the color used). These attributes can be specified per-event
* via arguments to a `nvtx3::event_attributes` object.
*
* NVTX events can be customized via four "attributes":
* - \ref COLOR : color used to visualize the event in tools.
* - \ref MESSAGES : Custom message string.
* - \ref PAYLOAD : User-defined numerical value.
* - \ref CATEGORY : Intra-domain grouping.
*
* It is possible to construct a `nvtx3::event_attributes` from any number of
* attribute objects (nvtx3::color, nvtx3::message, nvtx3::payload,
* nvtx3::category) in any order. If an attribute is not specified, a tool
* specific default value is used. See `nvtx3::event_attributes` for more
* information.
*
* \code{.cpp}
* // Custom color, message
* event_attributes attr{nvtx3::rgb{127, 255, 0},
* "message"};
*
* // Custom color, message, payload, category
* event_attributes attr{nvtx3::rgb{127, 255, 0},
* nvtx3::payload{42},
* "message",
* nvtx3::category{1}};
*
* // Arguments can be in any order
* event_attributes attr{nvtx3::payload{42},
* nvtx3::category{1},
* "message",
* nvtx3::rgb{127, 255, 0}};
*
* // "First wins" with multiple arguments of the same type
* event_attributes attr{ nvtx3::payload{42}, nvtx3::payload{7} }; // payload is
* 42 \endcode
*
* \subsection MESSAGES message
*
* A `nvtx3::message` allows associating a custom message string with an NVTX
* event.
*
* Example:
* \code{.cpp}
* // Create an `event_attributes` with the custom message "my message"
* nvtx3::event_attributes attr{nvtx3::message{"my message"}};
*
* // strings and string literals implicitly assumed to be a `nvtx3::message`
* nvtx3::event_attributes attr{"my message"};
* \endcode
*
* \subsubsection REGISTERED_MESSAGE Registered Messages
*
* Associating a `nvtx3::message` with an event requires copying the contents of
* the message every time the message is used, i.e., copying the entire message
* string. This may cause non-trivial overhead in performance sensitive code.
*
* To eliminate this overhead, NVTX allows registering a message string,
* yielding a "handle" that is inexpensive to copy that may be used in place of
* a message string. When visualizing the events, tools such as Nsight Systems
* will take care of mapping the message handle to its string.
*
* A message should be registered once and the handle reused throughout the rest
* of the application. This can be done by either explicitly creating static
* `nvtx3::registered_message` objects, or using the
* `nvtx3::registered_message::get` construct on first use helper (recommended).
*
* Similar to \ref DOMAINS, `nvtx3::registered_message::get` requires defining a
* custom tag type with a static `message` member whose value will be the
* contents of the registered string.
*
* Example:
* \code{.cpp}
* // Explicitly constructed, static `registered_message`
* static registered_message<my_domain> static_message{"my message"};
*
* // Or use construct on first use:
* // Define a tag type with a `message` member string to register
* struct my_message{ static constexpr char const* message{ "my message" }; };
*
* // Uses construct on first use to register the contents of
* // `my_message::message`
* nvtx3::registered_message<my_domain> const& msg =
* nvtx3::registered_message<my_domain>::get<my_message>(); \endcode
*
* \subsection COLOR color
*
* Associating a `nvtx3::color` with an event allows controlling how the event
* is visualized in a tool such as Nsight Systems. This is a convenient way to
* visually differentiate among different events.
*
* \code{.cpp}
* // Define a color via rgb color values
* nvtx3::color c{nvtx3::rgb{127, 255, 0}};
* nvtx3::event_attributes attr{c};
*
* // rgb color values can be passed directly to an `event_attributes`
* nvtx3::event_attributes attr1{nvtx3::rgb{127,255,0}};
* \endcode
*
* \subsection CATEGORY category
*
* A `nvtx3::category` is simply an integer id that allows for fine-grain
* grouping of NVTX events. For example, one might use separate categories for
* IO, memory allocation, compute, etc.
*
* \code{.cpp}
* nvtx3::event_attributes{nvtx3::category{1}};
* \endcode
*
* \subsubsection NAMED_CATEGORIES Named Categories
*
* Associates a `name` string with a category `id` to help differentiate among
* categories.
*
* For any given category id `Id`, a `named_category{Id, "name"}` should only
* be constructed once and reused throughout an application. This can be done by
* either explicitly creating static `nvtx3::named_category` objects, or using
* the `nvtx3::named_category::get` construct on first use helper (recommended).
*
* Similar to \ref DOMAINS, `nvtx3::named_category::get` requires defining a
* custom tag type with static `name` and `id` members.
*
* \code{.cpp}
* // Explicitly constructed, static `named_category`
* static nvtx3::named_category static_category{42, "my category"};
*
* // OR use construct on first use:
* // Define a tag type with `name` and `id` members
* struct my_category{
* static constexpr char const* name{"my category"}; // category name
* static constexpr category::id_type id{42}; // category id
* };
*
* // Use construct on first use to name the category id `42`
* // with name "my category"
* nvtx3::named_category const& my_category =
* named_category<my_domain>::get<my_category>();
*
* // Range `r` associated with category id `42`
* nvtx3::event_attributes attr{my_category};
* \endcode
*
* \subsection PAYLOAD payload
*
* Allows associating a user-defined numerical value with an event.
*
* ```
* nvtx3:: event_attributes attr{nvtx3::payload{42}}; // Constructs a payload
* from
* // the `int32_t` value 42
* ```
*
*
* \section EXAMPLE Example
*
* Putting it all together:
* \code{.cpp}
* // Define a custom domain tag type
* struct my_domain{ static constexpr char const* name{"my domain"}; };
*
* // Define a named category tag type
* struct my_category{
* static constexpr char const* name{"my category"};
* static constexpr uint32_t id{42};
* };
*
* // Define a registered message tag type
* struct my_message{ static constexpr char const* message{"my message"}; };
*
* // For convenience, use aliases for domain scoped objects
* using my_thread_range = nvtx3::domain_thread_range<my_domain>;
* using my_registered_message = nvtx3::registered_message<my_domain>;
* using my_named_category = nvtx3::named_category<my_domain>;
*
* // Default values for all attributes
* nvtx3::event_attributes attr{};
* my_thread_range r0{attr};
*
* // Custom (unregistered) message, and unnamed category
* nvtx3::event_attributes attr1{"message", nvtx3::category{2}};
* my_thread_range r1{attr1};
*
* // Alternatively, pass arguments of `event_attributes` ctor directly to
* // `my_thread_range`
* my_thread_range r2{"message", nvtx3::category{2}};
*
* // construct on first use a registered message
* auto msg = my_registered_message::get<my_message>();
*
* // construct on first use a named category
* auto category = my_named_category::get<my_category>();
*
* // Use registered message and named category
* my_thread_range r3{msg, category, nvtx3::rgb{127, 255, 0},
* nvtx3::payload{42}};
*
* // Any number of arguments in any order
* my_thread_range r{nvtx3::rgb{127, 255,0}, msg};
*
* \endcode
* \section MACROS Convenience Macros
*
* Oftentimes users want to quickly and easily add NVTX ranges to their library
* or application to aid in profiling and optimization.
*
* A convenient way to do this is to use the \ref NVTX3_FUNC_RANGE and
* \ref NVTX3_FUNC_RANGE_IN macros. These macros take care of constructing an
* `nvtx3::domain_thread_range` with the name of the enclosing function as the
* range's message.
*
* \code{.cpp}
* void some_function(){
* // Automatically generates an NVTX range for the duration of the function
* // using "some_function" as the event's message.
* NVTX3_FUNC_RANGE();
* }
* \endcode
*/
/**
* @brief Enables the use of constexpr when support for C++14 relaxed constexpr
* is present.
*
* Initializing a legacy-C (i.e., no constructor) union member requires
* initializing in the constructor body. Non-empty constexpr constructors
* require C++14 relaxed constexpr.
*/
#if __cpp_constexpr >= 201304L
#define NVTX3_RELAXED_CONSTEXPR constexpr
#else
#define NVTX3_RELAXED_CONSTEXPR
#endif
namespace nvtx3 {
namespace detail {
/**
* @brief Verifies if a type `T` contains a member `T::name` of type `const
* char*` or `const wchar_t*`.
*
* @tparam T The type to verify
* @return True if `T` contains a member `T::name` of type `const char*` or
* `const wchar_t*`.
*/
template <typename T>
constexpr auto has_name_member() noexcept -> decltype(T::name, bool())
{
return (std::is_same_v<char const*, typename std::decay<decltype(T::name)>::type> or
std::is_same_v<wchar_t const*, typename std::decay<decltype(T::name)>::type>);
}
} // namespace detail
/**
* @brief `domain`s allow for grouping NVTX events into a single scope to
* differentiate them from events in other `domain`s.
*
* By default, all NVTX constructs are placed in the "global" NVTX domain.
*
* A custom `domain` may be used in order to differentiate a library's or
* application's NVTX events from other events.
*
* `domain`s are expected to be long-lived and unique to a library or
* application. As such, it is assumed a domain's name is known at compile
* time. Therefore, all NVTX constructs that can be associated with a domain
* require the domain to be specified via a *type* `DomainName` passed as an
* explicit template parameter.
*
* The type `domain::global` may be used to indicate that the global NVTX
* domain should be used.
*
* None of the C++ NVTX constructs require the user to manually construct a
* `domain` object. Instead, if a custom domain is desired, the user is
* expected to define a type `DomainName` that contains a member
* `DomainName::name` which resolves to either a `char const*` or `wchar_t
* const*`. The value of `DomainName::name` is used to name and uniquely
* identify the custom domain.
*
* Upon the first use of an NVTX construct associated with the type
* `DomainName`, the "construct on first use" pattern is used to construct a
* function local static `domain` object. All future NVTX constructs
* associated with `DomainType` will use a reference to the previously
* constructed `domain` object. See `domain::get`.
*
* Example:
* ```
* // The type `my_domain` defines a `name` member used to name and identify
* the
* // `domain` object identified by `my_domain`.
* struct my_domain{ static constexpr char const* name{"my_domain"}; };
*
* // The NVTX range `r` will be grouped with all other NVTX constructs
* // associated with `my_domain`.
* nvtx3::domain_thread_range<my_domain> r{};
*
* // An alias can be created for a `domain_thread_range` in the custom domain
* using my_thread_range = nvtx3::domain_thread_range<my_domain>;
* my_thread_range my_range{};
*
* // `domain::global` indicates that the global NVTX domain is used
* nvtx3::domain_thread_range<domain::global> r2{};
*
* // For convenience, `nvtx3::thread_range` is an alias for a range in the
* // global domain
* nvtx3::thread_range r3{};
* ```
*/
class domain {
public:
domain(domain const&) = delete;
domain& operator=(domain const&) = delete;
domain(domain&&) = delete;
domain& operator=(domain&&) = delete;
/**
* @brief Returns reference to an instance of a function local static
* `domain` object.
*
* Uses the "construct on first use" idiom to safely ensure the `domain`
* object is initialized exactly once upon first invocation of
* `domain::get<DomainName>()`. All following invocations will return a
* reference to the previously constructed `domain` object. See
* https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use
*
* None of the constructs in this header require the user to directly invoke
* `domain::get`. It is automatically invoked when constructing objects like
* a `domain_thread_range` or `category`. Advanced users may wish to use
* `domain::get` for the convenience of the "construct on first use" idiom
* when using domains with their own use of the NVTX C API.
*
* This function is threadsafe as of C++11. If two or more threads call
* `domain::get<DomainName>` concurrently, exactly one of them is guaranteed
* to construct the `domain` object and the other(s) will receive a
* reference to the object after it is fully constructed.
*
* The domain's name is specified via the type `DomainName` pass as an
* explicit template parameter. `DomainName` is required to contain a
* member `DomainName::name` that resolves to either a `char const*` or
* `wchar_t const*`. The value of `DomainName::name` is used to name and
* uniquely identify the `domain`.
*
* Example:
* ```
* // The type `my_domain` defines a `name` member used to name and identify
* // the `domain` object identified by `my_domain`.
* struct my_domain{ static constexpr char const* name{"my domain"}; };
*
* auto D = domain::get<my_domain>(); // First invocation constructs a
* // `domain` with the name "my domain"
*
* auto D1 = domain::get<my_domain>(); // Simply returns reference to
* // previously constructed `domain`.
* ```
*
* @tparam DomainName Type that contains a `DomainName::name` member used to
* name the `domain` object.
* @return Reference to the `domain` corresponding to the type `DomainName`.
*/
template <typename DomainName>
static domain const& get()
{
static_assert(detail::has_name_member<DomainName>(),
"Type used to identify a domain must contain a name member of"
"type const char* or const wchar_t*");
static domain const d{DomainName::name};
return d;
}
/**
* @brief Conversion operator to `nvtxDomainHandle_t`.
*
* Allows transparently passing a domain object into an API expecting a
* native `nvtxDomainHandle_t` object.
*/
operator nvtxDomainHandle_t() const noexcept { return _domain; }
/**
* @brief Tag type for the "global" NVTX domain.
*
* This type may be passed as a template argument to any function/class
* expecting a type to identify a domain to indicate that the global domain
* should be used.
*
* All NVTX events in the global domain across all libraries and
* applications will be grouped together.
*
*/
struct global {};
private:
/**
* @brief Construct a new domain with the specified `name`.
*
* This constructor is private as it is intended that `domain` objects only
* be created through the `domain::get` function.
*
* @param name A unique name identifying the domain
*/
explicit domain(char const* name) noexcept : _domain{nvtxDomainCreateA(name)} {}
/**
* @brief Construct a new domain with the specified `name`.
*
* This constructor is private as it is intended that `domain` objects only
* be created through the `domain::get` function.
*
* @param name A unique name identifying the domain
*/
explicit domain(wchar_t const* name) noexcept : _domain{nvtxDomainCreateW(name)} {}
/**
* @brief Construct a new domain with the specified `name`.
*
* This constructor is private as it is intended that `domain` objects only
* be created through the `domain::get` function.
*
* @param name A unique name identifying the domain
*/
explicit domain(std::string const& name) noexcept : domain{name.c_str()} {}
/**
* @brief Construct a new domain with the specified `name`.
*
* This constructor is private as it is intended that `domain` objects only
* be created through the `domain::get` function.
*
* @param name A unique name identifying the domain
*/
explicit domain(std::wstring const& name) noexcept : domain{name.c_str()} {}
/**
* @brief Default constructor creates a `domain` representing the
* "global" NVTX domain.
*
* All events not associated with a custom `domain` are grouped in the
* "global" NVTX domain.
*
*/
domain() = default;
/**
* @brief Destroy the domain object, unregistering and freeing all domain
* specific resources.
*/
~domain() noexcept { nvtxDomainDestroy(_domain); }
private:
nvtxDomainHandle_t const _domain{}; ///< The `domain`s NVTX handle
};
/**
* @brief Returns reference to the `domain` object that represents the global
* NVTX domain.
*
* This specialization for `domain::global` returns a default constructed,
* `domain` object for use when the "global" domain is desired.
*
* All NVTX events in the global domain across all libraries and applications
* will be grouped together.
*
* @return Reference to the `domain` corresponding to the global NVTX domain.
*/
template <>
inline domain const& domain::get<domain::global>()
{
static domain const d{};
return d;
}
/**
* @brief Indicates the values of the red, green, blue color channels for
* a rgb color code.
*/
struct rgb {
/// Type used for component values
using component_type = uint8_t;
/**
* @brief Construct a rgb with red, green, and blue channels
* specified by `red_`, `green_`, and `blue_`, respectively.
*
* Valid values are in the range `[0,255]`.
*
* @param red_ Value of the red channel
* @param green_ Value of the green channel
* @param blue_ Value of the blue channel
*/
constexpr rgb(component_type red_, component_type green_, component_type blue_) noexcept
: red{red_}, green{green_}, blue{blue_}
{
}
component_type const red{}; ///< Red channel value
component_type const green{}; ///< Green channel value
component_type const blue{}; ///< Blue channel value
};
/**
* @brief Indicates the value of the alpha, red, green, and blue color
* channels for an argb color code.
*/
struct argb final : rgb {
/**
* @brief Construct an argb with alpha, red, green, and blue channels
* specified by `alpha_`, `red_`, `green_`, and `blue_`, respectively.
*
* Valid values are in the range `[0,255]`.
*
* @param alpha_ Value of the alpha channel (opacity)
* @param red_ Value of the red channel
* @param green_ Value of the green channel
* @param blue_ Value of the blue channel
*
*/
constexpr argb(component_type alpha_,
component_type red_,
component_type green_,
component_type blue_) noexcept
: rgb{red_, green_, blue_}, alpha{alpha_}
{
}
component_type const alpha{}; ///< Alpha channel value
};
/**
* @brief Represents a custom color that can be associated with an NVTX event
* via it's `event_attributes`.
*
* Specifying colors for NVTX events is a convenient way to visually
* differentiate among different events in a visualization tool such as Nsight
* Systems.
*/
class color {
public:
/// Type used for the color's value
using value_type = uint32_t;
/**
* @brief Constructs a `color` using the value provided by `hex_code`.
*
* `hex_code` is expected to be a 4 byte argb hex code.
*
* The most significant byte indicates the value of the alpha channel
* (opacity) (0-255)
*
* The next byte indicates the value of the red channel (0-255)
*
* The next byte indicates the value of the green channel (0-255)
*
* The least significant byte indicates the value of the blue channel
* (0-255)
*
* @param hex_code The hex code used to construct the `color`
*/
constexpr explicit color(value_type hex_code) noexcept : _value{hex_code} {}
/**
* @brief Construct a `color` using the alpha, red, green, blue components
* in `argb`.
*
* @param argb The alpha, red, green, blue components of the desired `color`
*/
constexpr color(argb argb) noexcept
: color{from_bytes_msb_to_lsb(argb.alpha, argb.red, argb.green, argb.blue)}
{
}
/**
* @brief Construct a `color` using the red, green, blue components in
* `rgb`.
*
* Uses maximum value for the alpha channel (opacity) of the `color`.
*
* @param rgb The red, green, blue components of the desired `color`
*/
constexpr color(rgb rgb) noexcept
: color{from_bytes_msb_to_lsb(0xFF, rgb.red, rgb.green, rgb.blue)}
{
}
/**
* @brief Returns the `color`s argb hex code
*
*/
constexpr value_type get_value() const noexcept { return _value; }
/**
* @brief Return the NVTX color type of the color.
*
*/
constexpr nvtxColorType_t get_type() const noexcept { return _type; }
color() = delete;
~color() = default;
color(color const&) = default;
color& operator=(color const&) = default;
color(color&&) = default;
color& operator=(color&&) = default;
private:
/**
* @brief Constructs an unsigned, 4B integer from the component bytes in
* most to least significant byte order.
*
*/
constexpr static value_type from_bytes_msb_to_lsb(uint8_t byte3,
uint8_t byte2,
uint8_t byte1,
uint8_t byte0) noexcept
{
return uint32_t{byte3} << 24 | uint32_t{byte2} << 16 | uint32_t{byte1} << 8 | uint32_t{byte0};
}
value_type const _value{}; ///< color's argb color code
nvtxColorType_t const _type{NVTX_COLOR_ARGB}; ///< NVTX color type code
};
/**
* @brief Object for intra-domain grouping of NVTX events.
*
* A `category` is simply an integer id that allows for fine-grain grouping of
* NVTX events. For example, one might use separate categories for IO, memory
* allocation, compute, etc.
*
* Example:
* \code{.cpp}
* nvtx3::category cat1{1};
*
* // Range `r1` belongs to the category identified by the value `1`.
* nvtx3::thread_range r1{cat1};
*
* // Range `r2` belongs to the same category as `r1`
* nvtx3::thread_range r2{nvtx3::category{1}};
* \endcode
*
* To associate a name string with a category id, see `named_category`.
*/
class category {
public:
/// Type used for `category`s integer id.
using id_type = uint32_t;
/**
* @brief Construct a `category` with the specified `id`.
*
* The `category` will be unnamed and identified only by its `id` value.
*
* All `category` objects sharing the same `id` are equivalent.
*
* @param[in] id The `category`'s identifying value
*/
constexpr explicit category(id_type id) noexcept : id_{id} {}
/**
* @brief Returns the id of the category.
*
*/
constexpr id_type get_id() const noexcept { return id_; }
category() = delete;
~category() = default;
category(category const&) = default;
category& operator=(category const&) = default;
category(category&&) = default;
category& operator=(category&&) = default;
private:
id_type const id_{}; ///< category's unique identifier
};
/**
* @brief A `category` with an associated name string.
*
* Associates a `name` string with a category `id` to help differentiate among
* categories.
*
* For any given category id `Id`, a `named_category(Id, "name")` should only
* be constructed once and reused throughout an application. This can be done
* by either explicitly creating static `named_category` objects, or using the
* `named_category::get` construct on first use helper (recommended).
*
* Creating two or more `named_category` objects with the same value for `id`
* in the same domain results in undefined behavior.
*
* Similarly, behavior is undefined when a `named_category` and `category`
* share the same value of `id`.
*
* Example:
* \code{.cpp}
* // Explicitly constructed, static `named_category`
* static nvtx3::named_category static_category{42, "my category"};
*
* // Range `r` associated with category id `42`
* nvtx3::thread_range r{static_category};
*
* // OR use construct on first use:
*
* // Define a type with `name` and `id` members
* struct my_category{
* static constexpr char const* name{"my category"}; // category name
* static constexpr category::id_type id{42}; // category id
* };
*
* // Use construct on first use to name the category id `42`
* // with name "my category"
* auto my_category = named_category<my_domain>::get<my_category>();
*
* // Range `r` associated with category id `42`
* nvtx3::thread_range r{my_category};
* \endcode
*
* `named_category`'s association of a name to a category id is local to the
* domain specified by the type `D`. An id may have a different name in
* another domain.
*
* @tparam D Type containing `name` member used to identify the `domain` to
* which the `named_category` belongs. Else, `domain::global` to indicate
* that the global NVTX domain should be used.
*/
template <typename D = domain::global>
class named_category final : public category {
public:
/**
* @brief Returns a global instance of a `named_category` as a
* function-local static.
*
* Creates a `named_category` with name and id specified by the contents of
* a type `C`. `C::name` determines the name and `C::id` determines the
* category id.
*
* This function is useful for constructing a named `category` exactly once
* and reusing the same instance throughout an application.
*
* Example:
* \code{.cpp}
* // Define a type with `name` and `id` members
* struct my_category{
* static constexpr char const* name{"my category"}; // category name
* static constexpr uint32_t id{42}; // category id
* };
*
* // Use construct on first use to name the category id `42`
* // with name "my category"
* auto cat = named_category<my_domain>::get<my_category>();
*
* // Range `r` associated with category id `42`
* nvtx3::thread_range r{cat};
* \endcode
*
* Uses the "construct on first use" idiom to safely ensure the `category`
* object is initialized exactly once. See
* https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use
*
* @tparam C Type containing a member `C::name` that resolves to either a
* `char const*` or `wchar_t const*` and `C::id`.
*/
template <typename C>
static named_category<D> const& get() noexcept
{
static_assert(detail::has_name_member<C>(),
"Type used to name a category must contain a name member.");
static named_category<D> const category{C::id, C::name};
return category;
}
/**
* @brief Construct a `category` with the specified `id` and `name`.
*
* The name `name` will be registered with `id`.
*
* Every unique value of `id` should only be named once.
*
* @param[in] id The category id to name
* @param[in] name The name to associated with `id`
*/
named_category(id_type id, char const* name) noexcept : category{id}
{
nvtxDomainNameCategoryA(domain::get<D>(), get_id(), name);
};
/**
* @brief Construct a `category` with the specified `id` and `name`.
*
* The name `name` will be registered with `id`.
*
* Every unique value of `id` should only be named once.
*
* @param[in] id The category id to name
* @param[in] name The name to associated with `id`
*/
named_category(id_type id, wchar_t const* name) noexcept : category{id}
{
nvtxDomainNameCategoryW(domain::get<D>(), get_id(), name);
};
};
/**
* @brief A message registered with NVTX.
*
* Normally, associating a `message` with an NVTX event requires copying the
* contents of the message string. This may cause non-trivial overhead in
* highly performance sensitive regions of code.
*
* message registration is an optimization to lower the overhead of
* associating a message with an NVTX event. Registering a message yields a
* handle that is inexpensive to copy that may be used in place of a message
* string.
*
* A particular message should only be registered once and the handle
* reused throughout the rest of the application. This can be done by either
* explicitly creating static `registered_message` objects, or using the
* `registered_message::get` construct on first use helper (recommended).
*
* Example:
* \code{.cpp}
* // Explicitly constructed, static `registered_message`
* static registered_message<my_domain> static_message{"message"};
*
* // "message" is associated with the range `r`
* nvtx3::thread_range r{static_message};
*
* // Or use construct on first use:
*
* // Define a type with a `message` member that defines the contents of the
* // registered message
* struct my_message{ static constexpr char const* message{ "my message" }; };
*
* // Uses construct on first use to register the contents of
* // `my_message::message`
* auto msg = registered_message<my_domain>::get<my_message>();
*
* // "my message" is associated with the range `r`
* nvtx3::thread_range r{msg};
* \endcode
*
* `registered_message`s are local to a particular domain specified via
* the type `D`.
*
* @tparam D Type containing `name` member used to identify the `domain` to
* which the `registered_message` belongs. Else, `domain::global` to indicate
* that the global NVTX domain should be used.
*/
template <typename D = domain::global>
class registered_message {
public:
/**
* @brief Returns a global instance of a `registered_message` as a function
* local static.
*
* Provides a convenient way to register a message with NVTX without having
* to explicitly register the message.
*
* Upon first invocation, constructs a `registered_message` whose contents
* are specified by `message::message`.
*
* All future invocations will return a reference to the object constructed
* in the first invocation.
*
* Example:
* \code{.cpp}
* // Define a type with a `message` member that defines the contents of the
* // registered message
* struct my_message{ static constexpr char const* message{ "my message" };
* };
*
* // Uses construct on first use to register the contents of
* // `my_message::message`
* auto msg = registered_message<my_domain>::get<my_message>();
*
* // "my message" is associated with the range `r`
* nvtx3::thread_range r{msg};
* \endcode
*
* @tparam M Type required to contain a member `M::message` that
* resolves to either a `char const*` or `wchar_t const*` used as the
* registered message's contents.
* @return Reference to a `registered_message` associated with the type `M`.
*/
template <typename M>
static registered_message<D> const& get() noexcept
{
static registered_message<D> const registered_message{M::message};
return registered_message;
}
/**
* @brief Constructs a `registered_message` from the specified `msg` string.
*
* Registers `msg` with NVTX and associates a handle with the registered
* message.
*
* A particular message should only be registered once and the handle
* reused throughout the rest of the application.
*
* @param msg The contents of the message
*/
explicit registered_message(char const* msg) noexcept
: handle_{nvtxDomainRegisterStringA(domain::get<D>(), msg)}
{
}
/**
* @brief Constructs a `registered_message` from the specified `msg` string.
*
* Registers `msg` with NVTX and associates a handle with the registered
* message.
*
* A particular message should only be registered once and the handle
* reused throughout the rest of the application.
*
* @param msg The contents of the message
*/
explicit registered_message(std::string const& msg) noexcept : registered_message{msg.c_str()} {}
/**
* @brief Constructs a `registered_message` from the specified `msg` string.
*
* Registers `msg` with NVTX and associates a handle with the registered
* message.
*
* A particular message should only be registered once and the handle
* reused throughout the rest of the application.
*
* @param msg The contents of the message
*/
explicit registered_message(wchar_t const* msg) noexcept
: handle_{nvtxDomainRegisterStringW(domain::get<D>(), msg)}
{
}
/**
* @brief Constructs a `registered_message` from the specified `msg` string.
*
* Registers `msg` with NVTX and associates a handle with the registered
* message.
*
* A particular message should only be registered once and the handle
* reused throughout the rest of the application.
*
* @param msg The contents of the message
*/
explicit registered_message(std::wstring const& msg) noexcept : registered_message{msg.c_str()} {}
/**
* @brief Returns the registered message's handle
*
*/
nvtxStringHandle_t get_handle() const noexcept { return handle_; }
registered_message() = delete;
~registered_message() = default;
registered_message(registered_message const&) = default;
registered_message& operator=(registered_message const&) = default;
registered_message(registered_message&&) = default;
registered_message& operator=(registered_message&&) = default;
private:
nvtxStringHandle_t const handle_{}; ///< The handle returned from
///< registering the message with NVTX
};
/**
* @brief Allows associating a message string with an NVTX event via
* its `EventAttribute`s.
*
* Associating a `message` with an NVTX event through its `event_attributes`
* allows for naming events to easily differentiate them from other events.
*
* Every time an NVTX event is created with an associated `message`, the
* contents of the message string must be copied. This may cause non-trivial
* overhead in highly performance sensitive sections of code. Use of a
* `nvtx3::registered_message` is recommended in these situations.
*
* Example:
* \code{.cpp}
* // Creates an `event_attributes` with message "message 0"
* nvtx3::event_attributes attr0{nvtx3::message{"message 0"}};
*
* // `range0` contains message "message 0"
* nvtx3::thread_range range0{attr0};
*
* // `std::string` and string literals are implicitly assumed to be
* // the contents of an `nvtx3::message`
* // Creates an `event_attributes` with message "message 1"
* nvtx3::event_attributes attr1{"message 1"};
*
* // `range1` contains message "message 1"
* nvtx3::thread_range range1{attr1};
*
* // `range2` contains message "message 2"
* nvtx3::thread_range range2{nvtx3::message{"message 2"}};
*
* // `std::string` and string literals are implicitly assumed to be
* // the contents of an `nvtx3::message`
* // `range3` contains message "message 3"
* nvtx3::thread_range range3{"message 3"};
* \endcode
*/
class message {
public:
using value_type = nvtxMessageValue_t;
/**
* @brief Construct a `message` whose contents are specified by `msg`.
*
* @param msg The contents of the message
*/
NVTX3_RELAXED_CONSTEXPR message(char const* msg) noexcept : type_{NVTX_MESSAGE_TYPE_ASCII}
{
value_.ascii = msg;
}
/**
* @brief Construct a `message` whose contents are specified by `msg`.
*
* @param msg The contents of the message
*/
message(std::string const& msg) noexcept : message{msg.c_str()} {}
/**
* @brief Disallow construction for `std::string` r-value
*
* `message` is a non-owning type and therefore cannot take ownership of an
* r-value. Therefore, constructing from an r-value is disallowed to prevent
* a dangling pointer.
*
*/
message(std::string&&) = delete;
/**
* @brief Construct a `message` whose contents are specified by `msg`.
*
* @param msg The contents of the message
*/
NVTX3_RELAXED_CONSTEXPR message(wchar_t const* msg) noexcept : type_{NVTX_MESSAGE_TYPE_UNICODE}
{
value_.unicode = msg;
}
/**
* @brief Construct a `message` whose contents are specified by `msg`.
*
* @param msg The contents of the message
*/
message(std::wstring const& msg) noexcept : message{msg.c_str()} {}
/**
* @brief Disallow construction for `std::wstring` r-value
*
* `message` is a non-owning type and therefore cannot take ownership of an
* r-value. Therefore, constructing from an r-value is disallowed to prevent
* a dangling pointer.
*
*/
message(std::wstring&&) = delete;
/**
* @brief Construct a `message` from a `registered_message`.
*
* @tparam D Type containing `name` member used to identify the `domain`
* to which the `registered_message` belongs. Else, `domain::global` to
* indicate that the global NVTX domain should be used.
* @param msg The message that has already been registered with NVTX.
*/
template <typename D>
message(registered_message<D> const& msg) noexcept : type_{NVTX_MESSAGE_TYPE_REGISTERED}
{
value_.registered = msg.get_handle();
}
/**
* @brief Return the union holding the value of the message.
*
*/
NVTX3_RELAXED_CONSTEXPR value_type get_value() const noexcept { return value_; }
/**
* @brief Return the type information about the value the union holds.
*
*/
NVTX3_RELAXED_CONSTEXPR nvtxMessageType_t get_type() const noexcept { return type_; }
private:
nvtxMessageType_t const type_{}; ///< message type
nvtxMessageValue_t value_{}; ///< message contents
};
/**
* @brief A numerical value that can be associated with an NVTX event via
* its `event_attributes`.
*
* Example:
* ```
* nvtx3:: event_attributes attr{nvtx3::payload{42}}; // Constructs a payload
* from
* // the `int32_t` value 42
*
* // `range0` will have an int32_t payload of 42
* nvtx3::thread_range range0{attr};
*
* // range1 has double payload of 3.14
* nvtx3::thread_range range1{ nvtx3::payload{3.14} };
* ```
*/
class payload {
public:
using value_type = typename nvtxEventAttributes_v2::payload_t;
/**
* @brief Construct a `payload` from a signed, 8 byte integer.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(int64_t value) noexcept
: type_{NVTX_PAYLOAD_TYPE_INT64}, value_{}
{
value_.llValue = value;
}
/**
* @brief Construct a `payload` from a signed, 4 byte integer.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(int32_t value) noexcept
: type_{NVTX_PAYLOAD_TYPE_INT32}, value_{}
{
value_.iValue = value;
}
/**
* @brief Construct a `payload` from an unsigned, 8 byte integer.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(uint64_t value) noexcept
: type_{NVTX_PAYLOAD_TYPE_UNSIGNED_INT64}, value_{}
{
value_.ullValue = value;
}
/**
* @brief Construct a `payload` from an unsigned, 4 byte integer.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(uint32_t value) noexcept
: type_{NVTX_PAYLOAD_TYPE_UNSIGNED_INT32}, value_{}
{
value_.uiValue = value;
}
/**
* @brief Construct a `payload` from a single-precision floating point
* value.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(float value) noexcept
: type_{NVTX_PAYLOAD_TYPE_FLOAT}, value_{}
{
value_.fValue = value;
}
/**
* @brief Construct a `payload` from a double-precision floating point
* value.
*
* @param value Value to use as contents of the payload
*/
NVTX3_RELAXED_CONSTEXPR explicit payload(double value) noexcept
: type_{NVTX_PAYLOAD_TYPE_DOUBLE}, value_{}
{
value_.dValue = value;
}
/**
* @brief Return the union holding the value of the payload
*
*/
NVTX3_RELAXED_CONSTEXPR value_type get_value() const noexcept { return value_; }
/**
* @brief Return the information about the type the union holds.
*
*/
NVTX3_RELAXED_CONSTEXPR nvtxPayloadType_t get_type() const noexcept { return type_; }
private:
nvtxPayloadType_t const type_; ///< Type of the payload value
value_type value_; ///< Union holding the payload value
};
/**
* @brief Describes the attributes of a NVTX event.
*
* NVTX events can be customized via four "attributes":
*
* - color: color used to visualize the event in tools such as Nsight
* Systems. See `color`.
* - message: Custom message string. See `message`.
* - payload: User-defined numerical value. See `payload`.
* - category: Intra-domain grouping. See `category`.
*
* These component attributes are specified via an `event_attributes` object.
* See `nvtx3::color`, `nvtx3::message`, `nvtx3::payload`, and
* `nvtx3::category` for how these individual attributes are constructed.
*
* While it is possible to specify all four attributes, it is common to want
* to only specify a subset of attributes and use default values for the
* others. For convenience, `event_attributes` can be constructed from any
* number of attribute components in any order.
*
* Example:
* \code{.cpp}
* event_attributes attr{}; // No arguments, use defaults for all attributes
*
* event_attributes attr{"message"}; // Custom message, rest defaulted
*
* // Custom color & message
* event_attributes attr{"message", nvtx3::rgb{127, 255, 0}};
*
* /// Custom color & message, can use any order of arguments
* event_attributes attr{nvtx3::rgb{127, 255, 0}, "message"};
*
*
* // Custom color, message, payload, category
* event_attributes attr{nvtx3::rgb{127, 255, 0},
* "message",
* nvtx3::payload{42},
* nvtx3::category{1}};
*
* // Custom color, message, payload, category, can use any order of arguments
* event_attributes attr{nvtx3::payload{42},
* nvtx3::category{1},
* "message",
* nvtx3::rgb{127, 255, 0}};
*
* // Multiple arguments of the same type are allowed, but only the first is
* // used. All others are ignored
* event_attributes attr{ nvtx3::payload{42}, nvtx3::payload{7} }; // payload
* is 42
*
* // Range `r` will be customized according the attributes in `attr`
* nvtx3::thread_range r{attr};
*
* // For convenience, the arguments that can be passed to the
* `event_attributes`
* // constructor may be passed to the `domain_thread_range` constructor where
* // they will be forwarded to the `EventAttribute`s constructor
* nvtx3::thread_range r{nvtx3::payload{42}, nvtx3::category{1}, "message"};
* \endcode
*/
class event_attributes {
public:
using value_type = nvtxEventAttributes_t;
/**
* @brief Default constructor creates an `event_attributes` with no
* category, color, payload, nor message.
*/
constexpr event_attributes() noexcept
: attributes_{
NVTX_VERSION, // version
sizeof(nvtxEventAttributes_t), // size
0, // category
NVTX_COLOR_UNKNOWN, // color type
0, // color value
NVTX_PAYLOAD_UNKNOWN, // payload type
{}, // payload value (union)
NVTX_MESSAGE_UNKNOWN, // message type
{} // message value (union)
}
{
}
/**
* @brief Variadic constructor where the first argument is a `category`.
*
* Sets the value of the `EventAttribute`s category based on `c` and
* forwards the remaining variadic parameter pack to the next constructor.
*
*/
template <typename... Args>
NVTX3_RELAXED_CONSTEXPR explicit event_attributes(category const& c, Args const&... args) noexcept
: event_attributes(args...)
{
attributes_.category = c.get_id();
}
/**
* @brief Variadic constructor where the first argument is a `color`.
*
* Sets the value of the `EventAttribute`s color based on `c` and forwards
* the remaining variadic parameter pack to the next constructor.
*
*/
template <typename... Args>
NVTX3_RELAXED_CONSTEXPR explicit event_attributes(color const& c, Args const&... args) noexcept
: event_attributes(args...)
{
attributes_.color = c.get_value();
attributes_.colorType = c.get_type();
}
/**
* @brief Variadic constructor where the first argument is a `payload`.
*
* Sets the value of the `EventAttribute`s payload based on `p` and forwards
* the remaining variadic parameter pack to the next constructor.
*
*/
template <typename... Args>
NVTX3_RELAXED_CONSTEXPR explicit event_attributes(payload const& p, Args const&... args) noexcept
: event_attributes(args...)
{
attributes_.payload = p.get_value();
attributes_.payloadType = p.get_type();
}
/**
* @brief Variadic constructor where the first argument is a `message`.
*
* Sets the value of the `EventAttribute`s message based on `m` and forwards
* the remaining variadic parameter pack to the next constructor.
*
*/
template <typename... Args>
explicit event_attributes(message const& m, Args const&... args) noexcept
: event_attributes(args...)
{
attributes_.message = m.get_value();
attributes_.messageType = m.get_type();
}
~event_attributes() = default;
event_attributes(event_attributes const&) = default;
event_attributes& operator=(event_attributes const&) = default;
event_attributes(event_attributes&&) = default;
event_attributes& operator=(event_attributes&&) = default;
/**
* @brief Get raw pointer to underlying NVTX attributes object.
*
*/
constexpr value_type const* get() const noexcept { return &attributes_; }
private:
value_type attributes_{}; ///< The NVTX attributes structure
};
/**
* @brief A RAII object for creating a NVTX range local to a thread within a
* domain.
*
* When constructed, begins a nested NVTX range on the calling thread in the
* specified domain. Upon destruction, ends the NVTX range.
*
* Behavior is undefined if a `domain_thread_range` object is
* created/destroyed on different threads.
*
* `domain_thread_range` is neither moveable nor copyable.
*
* `domain_thread_range`s may be nested within other ranges.
*
* The domain of the range is specified by the template type parameter `D`.
* By default, the `domain::global` is used, which scopes the range to the
* global NVTX domain. The convenience alias `thread_range` is provided for
* ranges scoped to the global domain.
*
* A custom domain can be defined by creating a type, `D`, with a static
* member `D::name` whose value is used to name the domain associated with
* `D`. `D::name` must resolve to either `char const*` or `wchar_t const*`
*
* Example:
* ```
* // Define a type `my_domain` with a member `name` used to name the domain
* // associated with the type `my_domain`.
* struct my_domain{
* static constexpr const char * name{"my domain"};
* };
* ```
*
* Usage:
* ```
* nvtx3::domain_thread_range<> r0{"range 0"}; // Range in global domain
*
* nvtx3::thread_range r1{"range 1"}; // Alias for range in global domain
*
* nvtx3::domain_thread_range<my_domain> r2{"range 2"}; // Range in custom
* domain
*
* // specify an alias to a range that uses a custom domain
* using my_thread_range = nvtx3::domain_thread_range<my_domain>;
*
* my_thread_range r3{"range 3"}; // Alias for range in custom domain
* ```
*/
template <class D = domain::global>
class domain_thread_range {
public:
/**
* @brief Construct a `domain_thread_range` with the specified
* `event_attributes`
*
* Example:
* ```
* nvtx3::event_attributes attr{"msg", nvtx3::rgb{127,255,0}};
* nvtx3::domain_thread_range<> range{attr}; // Creates a range with message
* contents
* // "msg" and green color
* ```
*
* @param[in] attr `event_attributes` that describes the desired attributes
* of the range.
*/
explicit domain_thread_range(event_attributes const& attr) noexcept
{
nvtxDomainRangePushEx(domain::get<D>(), attr.get());
}
/**
* @brief Constructs a `domain_thread_range` from the constructor arguments
* of an `event_attributes`.
*
* Forwards the arguments `first, args...` to construct an
* `event_attributes` object. The `event_attributes` object is then
* associated with the `domain_thread_range`.
*
* For more detail, see `event_attributes` documentation.
*
* Example:
* ```
* // Creates a range with message "message" and green color
* nvtx3::domain_thread_range<> r{"message", nvtx3::rgb{127,255,0}};
* ```
*
* @note To prevent making needless copies of `event_attributes` objects,
* this constructor is disabled when the first argument is an
* `event_attributes` object, instead preferring the explicit
* `domain_thread_range(event_attributes const&)` constructor.
*
* @param[in] first First argument to forward to the `event_attributes`
* constructor.
* @param[in] args Variadic parameter pack of additional arguments to
* forward.
*
*/
template <typename First,
typename... Args,
typename = typename std::enable_if<
not std::is_same_v<event_attributes, typename std::decay<First>>>>
explicit domain_thread_range(First const& first, Args const&... args) noexcept
: domain_thread_range{event_attributes{first, args...}}
{
}
/**
* @brief Default constructor creates a `domain_thread_range` with no
* message, color, payload, nor category.
*
*/
domain_thread_range() : domain_thread_range{event_attributes{}} {}
domain_thread_range(domain_thread_range const&) = delete;
domain_thread_range& operator=(domain_thread_range const&) = delete;
domain_thread_range(domain_thread_range&&) = delete;
domain_thread_range& operator=(domain_thread_range&&) = delete;
/**
* @brief Destroy the domain_thread_range, ending the NVTX range event.
*/
~domain_thread_range() noexcept { nvtxDomainRangePop(domain::get<D>()); }
};
/**
* @brief Alias for a `domain_thread_range` in the global NVTX domain.
*/
using thread_range = domain_thread_range<>;
/**
* @brief A RAII object for creating a NVTX range within a domain that can be
* created and destroyed on different threads.
*
* When constructed, begins a NVTX range in the specified domain. Upon
* destruction, ends the NVTX range.
*
* Similar to `nvtx3::domain_thread_range`, the only difference being that
* `domain_process_range` can start and end on different threads.
*
* Use of `nvtx3::domain_thread_range` should be preferred unless one needs
* the ability to start and end a range on different threads.
*
* `domain_process_range` is moveable, but not copyable.
*
* @tparam D Type containing `name` member used to identify the `domain`
* to which the `domain_process_range` belongs. Else, `domain::global` to
* indicate that the global NVTX domain should be used.
*/
template <typename D = domain::global>
class domain_process_range {
public:
/**
* @brief Construct a new domain process range object
*
* @param attr
*/
explicit domain_process_range(event_attributes const& attr) noexcept
: range_id_{nvtxDomainRangeStartEx(domain::get<D>(), attr.get())}
{
}
/**
* @brief Construct a new domain process range object
*
* @param first
* @param args
*/
template <typename First,
typename... Args,
typename = typename std::enable_if<
not std::is_same_v<event_attributes, typename std::decay<First>>>>
explicit domain_process_range(First const& first, Args const&... args) noexcept
: domain_process_range{event_attributes{first, args...}}
{
}
/**
* @brief Construct a new domain process range object
*
*/
constexpr domain_process_range() noexcept : domain_process_range{event_attributes{}} {}
/**
* @brief Destroy the `domain_process_range` ending the range.
*
*/
~domain_process_range() noexcept
{
if (not moved_from_) { nvtxRangeEnd(range_id_); }
}
domain_process_range(domain_process_range const&) = delete;
domain_process_range& operator=(domain_process_range const&) = delete;
domain_process_range(domain_process_range&& other) noexcept : range_id_{other.range_id_}
{
other.moved_from_ = true;
}
domain_process_range& operator=(domain_process_range&& other) noexcept
{
range_id_ = other.range_id_;
other.moved_from_ = true;
}
private:
nvtxRangeId_t range_id_; ///< Range id used to correlate
///< the start/end of the range
bool moved_from_{false}; ///< Indicates if the object has had
///< it's contents moved from it,
///< indicating it should not attempt
///< to end the NVTX range.
};
/**
* @brief Alias for a `domain_process_range` in the global NVTX domain.
*/
using process_range = domain_process_range<>;
/**
* @brief Annotates an instantaneous point in time with the attributes specified
* by `attr`.
*
* Unlike a "range", a mark is an instantaneous event in an application, e.g.,
* locking/unlocking a mutex.
*
* \code{.cpp}
* std::mutex global_lock;
* void lock_mutex(){
* global_lock.lock();
* nvtx3::mark("lock_mutex");
* }
* \endcode
*
* @tparam D Type containing `name` member used to identify the `domain`
* to which the `domain_process_range` belongs. Else, `domain::global` to
* indicate that the global NVTX domain should be used.
* @param[in] attr `event_attributes` that describes the desired attributes
* of the mark.
*/
template <typename D = nvtx3::domain::global>
inline void mark(event_attributes const& attr) noexcept
{
nvtxDomainMarkEx(domain::get<D>(), attr.get());
}
} // namespace nvtx3
/**
* @brief Convenience macro for generating a range in the specified `domain`
* from the lifetime of a function
*
* This macro is useful for generating an NVTX range in `domain` from
* the entry point of a function to its exit. It is intended to be the first
* line of the function.
*
* Constructs a static `registered_message` using the name of the immediately
* enclosing function returned by `__func__` and constructs a
* `nvtx3::thread_range` using the registered function name as the range's
* message.
*
* Example:
* ```
* struct my_domain{static constexpr char const* name{"my_domain"};};
*
* void foo(...){
* NVTX3_FUNC_RANGE_IN(my_domain); // Range begins on entry to foo()
* // do stuff
* ...
* } // Range ends on return from foo()
* ```
*
* @param[in] D Type containing `name` member used to identify the
* `domain` to which the `registered_message` belongs. Else,
* `domain::global` to indicate that the global NVTX domain should be used.
*/
#define NVTX3_FUNC_RANGE_IN(D) \
static ::nvtx3::registered_message<D> const nvtx3_func_name__{__func__}; \
static ::nvtx3::event_attributes const nvtx3_func_attr__{nvtx3_func_name__}; \
[[maybe_unused]] ::nvtx3::domain_thread_range<D> const nvtx3_range__{nvtx3_func_attr__};
/**
* @brief Convenience macro for generating a range in the global domain from the
* lifetime of a function.
*
* This macro is useful for generating an NVTX range in the global domain from
* the entry point of a function to its exit. It is intended to be the first
* line of the function.
*
* Constructs a static `registered_message` using the name of the immediately
* enclosing function returned by `__func__` and constructs a
* `nvtx3::thread_range` using the registered function name as the range's
* message.
*
* Example:
* ```
* void foo(...){
* NVTX3_FUNC_RANGE(); // Range begins on entry to foo()
* // do stuff
* ...
* } // Range ends on return from foo()
* ```
*/
#define NVTX3_FUNC_RANGE() NVTX3_FUNC_RANGE_IN(::nvtx3::domain::global)
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/tdigest/tdigest.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace tdigest {
namespace detail {
/**
* @brief Generate a tdigest column from a grouped, sorted set of numeric input values.
*
* The input is expected to be sorted in ascending order within each group, with
* nulls at the end.
*
* struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
* Each output row is a single tdigest. The length of the row is the "size" of the
* tdigest, each element of which represents a weighted centroid (mean, weight).
*
* @param values Grouped (and sorted) values to merge.
* @param group_offsets Offsets of groups' starting points within @p values.
* @param group_labels 0-based ID of group that the corresponding value belongs to
* @param group_valid_counts Per-group counts of valid elements.
* @param num_groups Number of groups.
* @param max_centroids Parameter controlling the level of compression of the tdigest. Higher
* values result in a larger, more precise tdigest.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns tdigest column, with 1 tdigest per row
*/
std::unique_ptr<column> group_tdigest(column_view const& values,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Merges tdigests within the same group to generate a new tdigest.
*
* The tdigest column produced is of the following structure:
*
* struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
* Each output row is a single tdigest. The length of the row is the "size" of the
* tdigest, each element of which represents a weighted centroid (mean, weight).
*
* @param values Grouped tdigests to merge.
* @param group_offsets Offsets of groups' starting points within @p values.
* @param group_labels 0-based ID of group that the corresponding value belongs to
* @param num_groups Number of groups.
* @param max_centroids Parameter controlling the level of compression of the tdigest. Higher
* values result in a larger, more precise tdigest.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns tdigest column, with 1 tdigest per row
*/
std::unique_ptr<column> group_merge_tdigest(column_view const& values,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Create a tdigest column from its constituent components.
*
* @param num_rows The number of rows in the output column.
* @param centroid_means The inner means column. These values are partitioned into lists by the
* `tdigest_offsets` column.
* @param centroid_weights The inner weights column. These values are partitioned into lists by the
* `tdigest_offsets` column.
* @param tdigest_offsets Offsets representing each individual tdigest in the output column. The
* offsets partition the centroid means and weights.
* @param min_values Column representing the minimum input value for each tdigest.
* @param max_values Column representing the maximum input value for each tdigest.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
*
* @returns The constructed tdigest column.
*/
std::unique_ptr<column> make_tdigest_column(size_type num_rows,
std::unique_ptr<column>&& centroid_means,
std::unique_ptr<column>&& centroid_weights,
std::unique_ptr<column>&& tdigest_offsets,
std::unique_ptr<column>&& min_values,
std::unique_ptr<column>&& max_values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Create an empty tdigest column.
*
* An empty tdigest column contains a single row of length 0
*
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
*
* @returns An empty tdigest column.
*/
std::unique_ptr<column> make_empty_tdigest_column(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Create an empty tdigest scalar.
*
* An empty tdigest scalar is a struct_scalar that contains a single row of length 0
*
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
*
* @returns An empty tdigest scalar.
*/
std::unique_ptr<scalar> make_empty_tdigest_scalar(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Generate a tdigest scalar from a set of numeric input values.
*
* The tdigest scalar produced is of the following structure:
** struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
*
* @param values Values to merge.
* @param max_centroids Parameter controlling the level of compression of the tdigest. Higher
* values result in a larger, more precise tdigest.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned scalar's device memory
*
* @returns tdigest scalar
*/
std::unique_ptr<scalar> reduce_tdigest(column_view const& values,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Merges multiple tdigest columns to generate a new tdigest scalar.
*
* The tdigest scalar produced is of the following structure:
*
* struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
* @param values tdigests to merge.
* @param max_centroids Parameter controlling the level of compression of the tdigest. Higher
* values result in a larger, more precise tdigest.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned scalar's device memory
*
* @returns tdigest column, with 1 tdigest per row
*/
std::unique_ptr<scalar> reduce_merge_tdigest(column_view const& input,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace tdigest
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/groupby/sort_helper.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
namespace cudf {
namespace groupby {
namespace detail {
namespace sort {
/**
* @brief Helper class for computing sort-based groupby
*
* This class serves the purpose of sorting the keys and values and provides
* building blocks for aggregations. It can provide:
* 1. On-demand grouping or sorting of a value column based on `keys`
* which is provided at construction
* 2. Group offsets: starting offsets of all groups in sorted key table
* 3. Group valid sizes: The number of valid values in each group in a sorted
* value column
*/
struct sort_groupby_helper {
using index_vector = rmm::device_uvector<size_type>;
using bitmask_vector = rmm::device_uvector<bitmask_type>;
using column_ptr = std::unique_ptr<column>;
using index_vector_ptr = std::unique_ptr<index_vector>;
using bitmask_vector_ptr = std::unique_ptr<bitmask_vector>;
/**
* @brief Construct a new helper object
*
* If `include_null_keys == NO`, then any row in `keys` containing a null
* value will effectively be discarded. I.e., any values corresponding to
* discarded rows in `keys` will not contribute to any aggregation.
*
* @param keys table to group by
* @param include_null_keys Include rows in keys with nulls
* @param keys_pre_sorted Indicate if the keys are already sorted. Enables
* optimizations to help skip re-sorting keys.
* @param null_precedence Indicates the ordering of nulls in each column.
* Default behavior for each column is
* `null_order::AFTER`
*/
sort_groupby_helper(table_view const& keys,
null_policy include_null_keys,
sorted keys_pre_sorted,
std::vector<null_order> const& null_precedence);
~sort_groupby_helper() = default;
sort_groupby_helper(sort_groupby_helper const&) = delete;
sort_groupby_helper& operator=(sort_groupby_helper const&) = delete;
sort_groupby_helper(sort_groupby_helper&&) = default;
sort_groupby_helper& operator=(sort_groupby_helper&&) = default;
/**
* @brief Groups a column of values according to `keys` and sorts within each
* group.
*
* Groups the @p values where the groups are dictated by key table and each
* group is sorted in ascending order, with NULL elements positioned at the
* end of each group.
*
* @throw cudf::logic_error if `values.size() != keys.num_rows()`
*
* @param values The value column to group and sort
* @return the sorted and grouped column
*/
std::unique_ptr<column> sorted_values(column_view const& values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Groups a column of values according to `keys`
*
* The values within each group maintain their original order.
*
* @throw cudf::logic_error if `values.size() != keys.num_rows()`
*
* @param values The value column to group
* @return the grouped column
*/
std::unique_ptr<column> grouped_values(column_view const& values,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Get a table of sorted unique keys
*
* @return a new table in which each row is a unique row in the sorted key table.
*/
std::unique_ptr<table> unique_keys(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Get a table of sorted keys
*
* @return a new table containing the sorted keys.
*/
std::unique_ptr<table> sorted_keys(rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Get the number of groups in `keys`
*/
size_type num_groups(rmm::cuda_stream_view stream) { return group_offsets(stream).size() - 1; }
/**
* @brief check if the groupby keys are presorted
*/
bool is_presorted() { return _keys_pre_sorted == sorted::YES; }
/**
* @brief Return the effective number of keys
*
* When include_null_keys = YES, returned value is same as `keys.num_rows()`
* When include_null_keys = NO, returned value is the number of rows in `keys`
* in which no element is null
*/
size_type num_keys(rmm::cuda_stream_view stream);
/**
* @brief Get the sorted order of `keys`.
*
* Gathering `keys` by sort order indices will produce the sorted key table.
*
* When ignore_null_keys = true, the result will not include indices
* for null keys.
*
* Computes and stores the key sorted order on first invocation, and returns
* the stored order on subsequent calls.
*
* @return the sort order indices for `keys`.
*/
column_view key_sort_order(rmm::cuda_stream_view stream);
/**
* @brief Get each group's offset into the sorted order of `keys`.
*
* Computes and stores the group offsets on first invocation and returns
* the stored group offsets on subsequent calls.
* This returns a vector of size `num_groups() + 1` such that the size of
* group `i` is `group_offsets[i+1] - group_offsets[i]`
*
* @return vector of offsets of the starting point of each group in the sorted
* key table
*/
index_vector const& group_offsets(rmm::cuda_stream_view stream);
/**
* @brief Get the group labels corresponding to the sorted order of `keys`.
*
* Each group is assigned a unique numerical "label" in
* `[0, 1, 2, ... , num_groups() - 1, num_groups(stream))`.
* For a row in sorted `keys`, its corresponding group label indicates which
* group it belongs to.
*
* Computes and stores labels on first invocation and returns stored labels on
* subsequent calls.
*
* @return vector of group labels for each row in the sorted key column
*/
index_vector const& group_labels(rmm::cuda_stream_view stream);
private:
/**
* @brief Get the group labels for unsorted keys
*
* Returns the group label for every row in the original `keys` table. For a
* given unique key row, its group label is equivalent to what is returned by
* `group_labels(stream)`. However, if a row contains a null value, and
* `include_null_keys == NO`, then its label is NULL.
*
* Computes and stores unsorted labels on first invocation and returns stored
* labels on subsequent calls.
*
* @return A nullable column of `INT32` containing group labels in the order
* of the unsorted key table
*/
column_view unsorted_keys_labels(rmm::cuda_stream_view stream);
/**
* @brief Get the column representing the row bitmask for the `keys`
*
* Computes a bitmask corresponding to the rows of `keys` where if bit `i` is
* zero, then row `i` contains one or more null values. If bit `i` is one,
* then row `i` does not contain null values. This bitmask is added as null
* mask of a column of type `INT8` where all the data values are the same and
* the elements differ only in validity.
*
* Computes and stores bitmask on first invocation and returns stored column
* on subsequent calls.
*/
column_view keys_bitmask_column(rmm::cuda_stream_view stream);
private:
column_ptr _key_sorted_order; ///< Indices to produce _keys in sorted order
column_ptr _unsorted_keys_labels; ///< Group labels for unsorted _keys
column_ptr _keys_bitmask_column; ///< Column representing rows with one or more nulls values
table_view _keys; ///< Input keys to sort by
index_vector_ptr
_group_offsets; ///< Indices into sorted _keys indicating starting index of each groups
index_vector_ptr _group_labels; ///< Group labels for sorted _keys
size_type _num_keys; ///< Number of effective rows in _keys (adjusted for _include_null_keys)
sorted _keys_pre_sorted; ///< Whether _keys are pre-sorted
null_policy _include_null_keys; ///< Whether to use rows with nulls in _keys for grouping
std::vector<null_order> _null_precedence; ///< How to sort NULLs
};
} // namespace sort
} // namespace detail
} // namespace groupby
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/groupby/group_replace_nulls.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/replace.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace groupby {
namespace detail {
/**
* @brief Internal API to replace nulls with preceding/following non-null values in @p value
*
* @param[in] grouped_value A column whose null values will be replaced.
* @param[in] group_labels Group labels for @p grouped_value, corresponding to group keys.
* @param[in] replace_policy Specify the position of replacement values relative to null values.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate device memory of the returned column.
*/
std::unique_ptr<column> group_replace_nulls(cudf::column_view const& grouped_value,
device_span<size_type const> group_labels,
cudf::replace_policy replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace groupby
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/aggregation/result_cache.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/types.hpp>
#include <unordered_map>
namespace cudf {
namespace detail {
struct pair_column_aggregation_equal_to {
bool operator()(std::pair<column_view, aggregation const&> const& lhs,
std::pair<column_view, aggregation const&> const& rhs) const
{
return is_shallow_equivalent(lhs.first, rhs.first) and lhs.second.is_equal(rhs.second);
}
};
struct pair_column_aggregation_hash {
size_t operator()(std::pair<column_view, aggregation const&> const& key) const
{
return cudf::hashing::detail::hash_combine(shallow_hash(key.first), key.second.do_hash());
}
};
class result_cache {
public:
result_cache() = delete;
~result_cache() = default;
result_cache(result_cache const&) = delete;
result_cache& operator=(result_cache const& other) = delete;
result_cache(size_t num_columns) : _cache(num_columns) {}
[[nodiscard]] bool has_result(column_view const& input, aggregation const& agg) const;
void add_result(column_view const& input, aggregation const& agg, std::unique_ptr<column>&& col);
[[nodiscard]] column_view get_result(column_view const& input, aggregation const& agg) const;
std::unique_ptr<column> release_result(column_view const& input, aggregation const& agg);
private:
std::unordered_map<std::pair<column_view, std::reference_wrapper<aggregation const>>,
std::pair<std::unique_ptr<aggregation>, std::unique_ptr<column>>,
pair_column_aggregation_hash,
pair_column_aggregation_equal_to>
_cache;
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/aggregation/aggregation.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <functional>
#include <numeric>
namespace cudf {
namespace detail {
// Visitor pattern
class simple_aggregations_collector { // Declares the interface for the simple aggregations
// collector
public:
// Declare overloads for each kind of a agg to dispatch
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class sum_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class product_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class min_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class max_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class count_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class histogram_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class any_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class all_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(
data_type col_type, class sum_of_squares_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class mean_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class m2_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class var_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class std_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class median_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class quantile_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class argmax_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class argmin_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class nunique_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class nth_element_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class row_number_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class rank_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(
data_type col_type, class collect_list_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class collect_set_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class lead_lag_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class udf_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class merge_lists_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class merge_sets_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class merge_m2_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(
data_type col_type, class merge_histogram_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class covariance_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class correlation_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(data_type col_type,
class tdigest_aggregation const& agg);
virtual std::vector<std::unique_ptr<aggregation>> visit(
data_type col_type, class merge_tdigest_aggregation const& agg);
};
class aggregation_finalizer { // Declares the interface for the finalizer
public:
// Declare overloads for each kind of a agg to dispatch
virtual void visit(aggregation const& agg);
virtual void visit(class sum_aggregation const& agg);
virtual void visit(class product_aggregation const& agg);
virtual void visit(class min_aggregation const& agg);
virtual void visit(class max_aggregation const& agg);
virtual void visit(class count_aggregation const& agg);
virtual void visit(class histogram_aggregation const& agg);
virtual void visit(class any_aggregation const& agg);
virtual void visit(class all_aggregation const& agg);
virtual void visit(class sum_of_squares_aggregation const& agg);
virtual void visit(class mean_aggregation const& agg);
virtual void visit(class m2_aggregation const& agg);
virtual void visit(class var_aggregation const& agg);
virtual void visit(class std_aggregation const& agg);
virtual void visit(class median_aggregation const& agg);
virtual void visit(class quantile_aggregation const& agg);
virtual void visit(class argmax_aggregation const& agg);
virtual void visit(class argmin_aggregation const& agg);
virtual void visit(class nunique_aggregation const& agg);
virtual void visit(class nth_element_aggregation const& agg);
virtual void visit(class row_number_aggregation const& agg);
virtual void visit(class rank_aggregation const& agg);
virtual void visit(class collect_list_aggregation const& agg);
virtual void visit(class collect_set_aggregation const& agg);
virtual void visit(class lead_lag_aggregation const& agg);
virtual void visit(class udf_aggregation const& agg);
virtual void visit(class merge_lists_aggregation const& agg);
virtual void visit(class merge_sets_aggregation const& agg);
virtual void visit(class merge_m2_aggregation const& agg);
virtual void visit(class merge_histogram_aggregation const& agg);
virtual void visit(class covariance_aggregation const& agg);
virtual void visit(class correlation_aggregation const& agg);
virtual void visit(class tdigest_aggregation const& agg);
virtual void visit(class merge_tdigest_aggregation const& agg);
};
/**
* @brief Derived class for specifying a sum aggregation
*/
class sum_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public groupby_scan_aggregation,
public reduce_aggregation,
public scan_aggregation,
public segmented_reduce_aggregation {
public:
sum_aggregation() : aggregation(SUM) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<sum_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a product aggregation
*/
class product_aggregation final : public groupby_aggregation,
public reduce_aggregation,
public scan_aggregation,
public segmented_reduce_aggregation {
public:
product_aggregation() : aggregation(PRODUCT) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<product_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a min aggregation
*/
class min_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public groupby_scan_aggregation,
public reduce_aggregation,
public scan_aggregation,
public segmented_reduce_aggregation {
public:
min_aggregation() : aggregation(MIN) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<min_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a max aggregation
*/
class max_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public groupby_scan_aggregation,
public reduce_aggregation,
public scan_aggregation,
public segmented_reduce_aggregation {
public:
max_aggregation() : aggregation(MAX) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<max_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a count aggregation
*/
class count_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public groupby_scan_aggregation {
public:
count_aggregation(aggregation::Kind kind) : aggregation(kind) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<count_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a histogram aggregation
*/
class histogram_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
histogram_aggregation() : aggregation(HISTOGRAM) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<histogram_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying an any aggregation
*/
class any_aggregation final : public reduce_aggregation, public segmented_reduce_aggregation {
public:
any_aggregation() : aggregation(ANY) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<any_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying an all aggregation
*/
class all_aggregation final : public reduce_aggregation, public segmented_reduce_aggregation {
public:
all_aggregation() : aggregation(ALL) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<all_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a sum_of_squares aggregation
*/
class sum_of_squares_aggregation final : public groupby_aggregation,
public reduce_aggregation,
public segmented_reduce_aggregation {
public:
sum_of_squares_aggregation() : aggregation(SUM_OF_SQUARES) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<sum_of_squares_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a mean aggregation
*/
class mean_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public reduce_aggregation,
public segmented_reduce_aggregation {
public:
mean_aggregation() : aggregation(MEAN) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<mean_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a m2 aggregation
*/
class m2_aggregation : public groupby_aggregation {
public:
m2_aggregation() : aggregation{M2} {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<m2_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a standard deviation/variance aggregation
*/
class std_var_aggregation : public rolling_aggregation,
public groupby_aggregation,
public reduce_aggregation,
public segmented_reduce_aggregation {
public:
size_type _ddof; ///< Delta degrees of freedom
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<std_var_aggregation const&>(_other);
return _ddof == other._ddof;
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
protected:
std_var_aggregation(aggregation::Kind k, size_type ddof) : rolling_aggregation(k), _ddof{ddof}
{
CUDF_EXPECTS(k == aggregation::STD or k == aggregation::VARIANCE,
"std_var_aggregation can accept only STD, VARIANCE");
}
[[nodiscard]] size_type hash_impl() const { return std::hash<size_type>{}(_ddof); }
};
/**
* @brief Derived class for specifying a variance aggregation
*/
class var_aggregation final : public std_var_aggregation {
public:
var_aggregation(size_type ddof)
: aggregation{aggregation::VARIANCE}, std_var_aggregation{aggregation::VARIANCE, ddof}
{
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<var_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a standard deviation aggregation
*/
class std_aggregation final : public std_var_aggregation {
public:
std_aggregation(size_type ddof)
: aggregation{aggregation::STD}, std_var_aggregation{aggregation::STD, ddof}
{
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<std_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a median aggregation
*/
class median_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
median_aggregation() : aggregation(MEDIAN) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<median_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a quantile aggregation
*/
class quantile_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
quantile_aggregation(std::vector<double> const& q, interpolation i)
: aggregation{QUANTILE}, _quantiles{q}, _interpolation{i}
{
}
std::vector<double> _quantiles; ///< Desired quantile(s)
interpolation _interpolation; ///< Desired interpolation
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<quantile_aggregation const&>(_other);
return _interpolation == other._interpolation &&
std::equal(_quantiles.begin(), _quantiles.end(), other._quantiles.begin());
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<quantile_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
private:
size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_interpolation)) ^
std::accumulate(
_quantiles.cbegin(), _quantiles.cend(), size_t{0}, [](size_t a, double b) {
return a ^ std::hash<double>{}(b);
});
}
};
/**
* @brief Derived class for specifying an argmax aggregation
*/
class argmax_aggregation final : public rolling_aggregation, public groupby_aggregation {
public:
argmax_aggregation() : aggregation(ARGMAX) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<argmax_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying an argmin aggregation
*/
class argmin_aggregation final : public rolling_aggregation, public groupby_aggregation {
public:
argmin_aggregation() : aggregation(ARGMIN) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<argmin_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a nunique aggregation
*/
class nunique_aggregation final : public groupby_aggregation,
public reduce_aggregation,
public segmented_reduce_aggregation {
public:
nunique_aggregation(null_policy null_handling)
: aggregation{NUNIQUE}, _null_handling{null_handling}
{
}
null_policy _null_handling; ///< include or exclude nulls
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<nunique_aggregation const&>(_other);
return _null_handling == other._null_handling;
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<nunique_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
private:
size_t hash_impl() const { return std::hash<int>{}(static_cast<int>(_null_handling)); }
};
/**
* @brief Derived class for specifying a nth element aggregation
*/
class nth_element_aggregation final : public groupby_aggregation,
public reduce_aggregation,
public rolling_aggregation {
public:
nth_element_aggregation(size_type n, null_policy null_handling)
: aggregation{NTH_ELEMENT}, _n{n}, _null_handling{null_handling}
{
}
size_type _n; ///< nth index to return
null_policy _null_handling; ///< include or exclude nulls
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<nth_element_aggregation const&>(_other);
return _n == other._n and _null_handling == other._null_handling;
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<nth_element_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
private:
size_t hash_impl() const
{
return std::hash<size_type>{}(_n) ^ std::hash<int>{}(static_cast<int>(_null_handling));
}
};
/**
* @brief Derived class for specifying a row_number aggregation
*/
class row_number_aggregation final : public rolling_aggregation {
public:
row_number_aggregation() : aggregation(ROW_NUMBER) {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<row_number_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived class for specifying a rank aggregation
*/
class rank_aggregation final : public rolling_aggregation,
public groupby_scan_aggregation,
public scan_aggregation {
public:
rank_aggregation(rank_method method,
order column_order,
null_policy null_handling,
null_order null_precedence,
rank_percentage percentage)
: aggregation{RANK},
_method{method},
_column_order{column_order},
_null_handling{null_handling},
_null_precedence{null_precedence},
_percentage(percentage)
{
}
rank_method const _method; ///< rank method
order const _column_order; ///< order of the column to rank
null_policy const _null_handling; ///< include or exclude nulls in ranks
null_order const _null_precedence; ///< order of nulls in ranks
rank_percentage const _percentage; ///< whether to return percentage ranks
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<rank_aggregation const&>(_other);
return _method == other._method and _null_handling == other._null_handling and
_column_order == other._column_order and _null_precedence == other._null_precedence and
_percentage == other._percentage;
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<rank_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
private:
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_method)) ^
std::hash<int>{}(static_cast<int>(_column_order)) ^
std::hash<int>{}(static_cast<int>(_null_handling)) ^
std::hash<int>{}(static_cast<int>(_null_precedence)) ^
std::hash<int>{}(static_cast<int>(_percentage));
}
};
/**
* @brief Derived aggregation class for specifying COLLECT_LIST aggregation
*/
class collect_list_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public reduce_aggregation {
public:
explicit collect_list_aggregation(null_policy null_handling = null_policy::INCLUDE)
: aggregation{COLLECT_LIST}, _null_handling{null_handling}
{
}
null_policy _null_handling; ///< include or exclude nulls
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<collect_list_aggregation const&>(_other);
return (_null_handling == other._null_handling);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<collect_list_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
private:
size_t hash_impl() const { return std::hash<int>{}(static_cast<int>(_null_handling)); }
};
/**
* @brief Derived aggregation class for specifying COLLECT_SET aggregation
*/
class collect_set_aggregation final : public rolling_aggregation,
public groupby_aggregation,
public reduce_aggregation {
public:
explicit collect_set_aggregation(null_policy null_handling = null_policy::INCLUDE,
null_equality nulls_equal = null_equality::EQUAL,
nan_equality nans_equal = nan_equality::UNEQUAL)
: aggregation{COLLECT_SET},
_null_handling{null_handling},
_nulls_equal(nulls_equal),
_nans_equal(nans_equal)
{
}
null_policy _null_handling; ///< include or exclude nulls
null_equality _nulls_equal; ///< whether to consider nulls as equal values
nan_equality _nans_equal; ///< whether to consider NaNs as equal value (applicable only to
///< floating point types)
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<collect_set_aggregation const&>(_other);
return (_null_handling == other._null_handling && _nulls_equal == other._nulls_equal &&
_nans_equal == other._nans_equal);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<collect_set_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
protected:
size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_null_handling) ^ static_cast<int>(_nulls_equal) ^
static_cast<int>(_nans_equal));
}
};
/**
* @brief Derived aggregation class for specifying LEAD/LAG window aggregations
*/
class lead_lag_aggregation final : public rolling_aggregation {
public:
lead_lag_aggregation(Kind kind, size_type offset)
: aggregation{offset < 0 ? (kind == LAG ? LEAD : LAG) : kind}, row_offset{std::abs(offset)}
{
}
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<lead_lag_aggregation const&>(_other);
return (row_offset == other.row_offset);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<lead_lag_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
size_type row_offset;
private:
[[nodiscard]] size_t hash_impl() const { return std::hash<size_type>()(row_offset); }
};
/**
* @brief Derived class for specifying a custom aggregation
* specified in udf
*/
class udf_aggregation final : public rolling_aggregation {
public:
udf_aggregation(aggregation::Kind type,
std::string const& user_defined_aggregator,
data_type output_type)
: aggregation{type},
_source{user_defined_aggregator},
_operator_name{(type == aggregation::PTX) ? "rolling_udf_ptx" : "rolling_udf_cuda"},
_function_name{"rolling_udf"},
_output_type{output_type}
{
CUDF_EXPECTS(type == aggregation::PTX or type == aggregation::CUDA,
"udf_aggregation can accept only PTX, CUDA");
}
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<udf_aggregation const&>(_other);
return (_source == other._source and _operator_name == other._operator_name and
_function_name == other._function_name and _output_type == other._output_type);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<udf_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
std::string const _source;
std::string const _operator_name;
std::string const _function_name;
data_type _output_type;
protected:
[[nodiscard]] size_t hash_impl() const
{
return std::hash<std::string>{}(_source) ^ std::hash<std::string>{}(_operator_name) ^
std::hash<std::string>{}(_function_name) ^
std::hash<int>{}(static_cast<int32_t>(_output_type.id()));
}
};
/**
* @brief Derived aggregation class for specifying MERGE_LISTS aggregation
*/
class merge_lists_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
explicit merge_lists_aggregation() : aggregation{MERGE_LISTS} {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<merge_lists_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived aggregation class for specifying MERGE_SETS aggregation
*/
class merge_sets_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
explicit merge_sets_aggregation(null_equality nulls_equal, nan_equality nans_equal)
: aggregation{MERGE_SETS}, _nulls_equal(nulls_equal), _nans_equal(nans_equal)
{
}
null_equality _nulls_equal; ///< whether to consider nulls as equal value
nan_equality _nans_equal; ///< whether to consider NaNs as equal value (applicable only to
///< floating point types)
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<merge_sets_aggregation const&>(_other);
return (_nulls_equal == other._nulls_equal && _nans_equal == other._nans_equal);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<merge_sets_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
protected:
size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_nulls_equal) ^ static_cast<int>(_nans_equal));
}
};
/**
* @brief Derived aggregation class for specifying MERGE_M2 aggregation
*/
class merge_m2_aggregation final : public groupby_aggregation {
public:
explicit merge_m2_aggregation() : aggregation{MERGE_M2} {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<merge_m2_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived aggregation class for specifying MERGE_HISTOGRAM aggregation
*/
class merge_histogram_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
explicit merge_histogram_aggregation() : aggregation{MERGE_HISTOGRAM} {}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<merge_histogram_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived aggregation class for specifying COVARIANCE aggregation
*/
class covariance_aggregation final : public groupby_aggregation {
public:
explicit covariance_aggregation(size_type min_periods, size_type ddof)
: aggregation{COVARIANCE}, _min_periods{min_periods}, _ddof(ddof)
{
}
size_type _min_periods;
size_type _ddof;
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<covariance_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
protected:
size_t hash_impl() const
{
return std::hash<size_type>{}(_min_periods) ^ std::hash<size_type>{}(_ddof);
}
};
/**
* @brief Derived aggregation class for specifying CORRELATION aggregation
*/
class correlation_aggregation final : public groupby_aggregation {
public:
explicit correlation_aggregation(correlation_type type, size_type min_periods)
: aggregation{CORRELATION}, _type{type}, _min_periods{min_periods}
{
}
correlation_type _type;
size_type _min_periods;
[[nodiscard]] bool is_equal(aggregation const& _other) const override
{
if (!this->aggregation::is_equal(_other)) { return false; }
auto const& other = dynamic_cast<correlation_aggregation const&>(_other);
return (_type == other._type);
}
[[nodiscard]] size_t do_hash() const override
{
return this->aggregation::do_hash() ^ hash_impl();
}
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<correlation_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
protected:
size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_type)) ^ std::hash<size_type>{}(_min_periods);
}
};
/**
* @brief Derived aggregation class for specifying TDIGEST aggregation
*/
class tdigest_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
explicit tdigest_aggregation(int max_centroids_)
: aggregation{TDIGEST}, max_centroids{max_centroids_}
{
}
int const max_centroids;
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<tdigest_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Derived aggregation class for specifying MERGE_TDIGEST aggregation
*/
class merge_tdigest_aggregation final : public groupby_aggregation, public reduce_aggregation {
public:
explicit merge_tdigest_aggregation(int max_centroids_)
: aggregation{MERGE_TDIGEST}, max_centroids{max_centroids_}
{
}
int const max_centroids;
[[nodiscard]] std::unique_ptr<aggregation> clone() const override
{
return std::make_unique<merge_tdigest_aggregation>(*this);
}
std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, simple_aggregations_collector& collector) const override
{
return collector.visit(col_type, *this);
}
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }
};
/**
* @brief Sentinel value used for `ARGMAX` aggregation.
*
* The output column for an `ARGMAX` aggregation is initialized with the
* sentinel value to indicate an unused element.
*/
constexpr size_type ARGMAX_SENTINEL{-1};
/**
* @brief Sentinel value used for `ARGMIN` aggregation.
*
* The output column for an `ARGMIN` aggregation is initialized with the
* sentinel value to indicate an unused element.
*/
constexpr size_type ARGMIN_SENTINEL{-1};
/**
* @brief Determines accumulator type based on input type and aggregation.
*
* @tparam Source The type on which the aggregation is computed
* @tparam k The aggregation performed
*/
template <typename Source, aggregation::Kind k, typename Enable = void>
struct target_type_impl {
using type = void;
};
// Computing MIN of Source, use Source accumulator
template <typename Source>
struct target_type_impl<Source, aggregation::MIN> {
using type = Source;
};
// Computing MAX of Source, use Source accumulator
template <typename Source>
struct target_type_impl<Source, aggregation::MAX> {
using type = Source;
};
// Always use size_type accumulator for COUNT_VALID
template <typename Source>
struct target_type_impl<Source, aggregation::COUNT_VALID> {
using type = size_type;
};
// Always use size_type accumulator for COUNT_ALL
template <typename Source>
struct target_type_impl<Source, aggregation::COUNT_ALL> {
using type = size_type;
};
// Use list for HISTOGRAM
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::HISTOGRAM> {
using type = list_view;
};
// Computing ANY of any type, use bool accumulator
template <typename Source>
struct target_type_impl<Source, aggregation::ANY> {
using type = bool;
};
// Computing ALL of any type, use bool accumulator
template <typename Source>
struct target_type_impl<Source, aggregation::ALL> {
using type = bool;
};
// Always use `double` for MEAN except for durations and fixed point types.
template <typename Source, aggregation::Kind k>
struct target_type_impl<
Source,
k,
std::enable_if_t<is_fixed_width<Source>() and not is_chrono<Source>() and
not is_fixed_point<Source>() and (k == aggregation::MEAN)>> {
using type = double;
};
template <typename Source, aggregation::Kind k>
struct target_type_impl<Source,
k,
std::enable_if_t<(is_duration<Source>() or is_fixed_point<Source>()) &&
(k == aggregation::MEAN)>> {
using type = Source;
};
constexpr bool is_sum_product_agg(aggregation::Kind k)
{
return (k == aggregation::SUM) || (k == aggregation::PRODUCT) ||
(k == aggregation::SUM_OF_SQUARES);
}
// Summing/Multiplying integers of any type, always use int64_t accumulator
template <typename Source, aggregation::Kind k>
struct target_type_impl<Source,
k,
std::enable_if_t<std::is_integral_v<Source> && is_sum_product_agg(k)>> {
using type = int64_t;
};
// Summing fixed_point numbers
template <typename Source, aggregation::Kind k>
struct target_type_impl<
Source,
k,
std::enable_if_t<cudf::is_fixed_point<Source>() && (k == aggregation::SUM)>> {
using type = Source;
};
// Summing/Multiplying float/doubles, use same type accumulator
template <typename Source, aggregation::Kind k>
struct target_type_impl<
Source,
k,
std::enable_if_t<std::is_floating_point_v<Source> && is_sum_product_agg(k)>> {
using type = Source;
};
// Summing duration types, use same type accumulator
template <typename Source, aggregation::Kind k>
struct target_type_impl<Source,
k,
std::enable_if_t<is_duration<Source>() && (k == aggregation::SUM)>> {
using type = Source;
};
// Always use `double` for M2
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::M2> {
using type = double;
};
// Always use `double` for VARIANCE
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::VARIANCE> {
using type = double;
};
// Always use `double` for STD
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::STD> {
using type = double;
};
// Always use `double` for quantile
template <typename Source>
struct target_type_impl<Source, aggregation::QUANTILE> {
using type = double;
};
// MEDIAN is a special case of a QUANTILE
template <typename Source>
struct target_type_impl<Source, aggregation::MEDIAN> {
using type = typename target_type_impl<Source, aggregation::QUANTILE>::type;
};
// Always use `size_type` for ARGMAX index
template <typename Source>
struct target_type_impl<Source, aggregation::ARGMAX> {
using type = size_type;
};
// Always use `size_type` for ARGMIN index
template <typename Source>
struct target_type_impl<Source, aggregation::ARGMIN> {
using type = size_type;
};
// Always use size_type accumulator for NUNIQUE
template <typename Source>
struct target_type_impl<Source, aggregation::NUNIQUE> {
using type = size_type;
};
// Always use Source for NTH_ELEMENT
template <typename Source>
struct target_type_impl<Source, aggregation::NTH_ELEMENT> {
using type = Source;
};
// Always use size_type accumulator for ROW_NUMBER
template <typename Source>
struct target_type_impl<Source, aggregation::ROW_NUMBER> {
using type = size_type;
};
// Always use size_type accumulator for RANK
template <typename Source>
struct target_type_impl<Source, aggregation::RANK> {
using type = size_type; // double for percentage=true.
};
// Always use list for COLLECT_LIST
template <typename Source>
struct target_type_impl<Source, aggregation::COLLECT_LIST> {
using type = list_view;
};
// Always use list for COLLECT_SET
template <typename Source>
struct target_type_impl<Source, aggregation::COLLECT_SET> {
using type = list_view;
};
// Always use Source for LEAD
template <typename Source>
struct target_type_impl<Source, aggregation::LEAD> {
using type = Source;
};
// Always use Source for LAG
template <typename Source>
struct target_type_impl<Source, aggregation::LAG> {
using type = Source;
};
// Always use list for MERGE_LISTS
template <typename Source>
struct target_type_impl<Source, aggregation::MERGE_LISTS> {
using type = list_view;
};
// Always use list for MERGE_SETS
template <typename Source>
struct target_type_impl<Source, aggregation::MERGE_SETS> {
using type = list_view;
};
// Always use struct for MERGE_M2
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::MERGE_M2> {
using type = struct_view;
};
// Use list for MERGE_HISTOGRAM
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::MERGE_HISTOGRAM> {
using type = list_view;
};
// Always use double for COVARIANCE
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::COVARIANCE> {
using type = double;
};
// Always use double for CORRELATION
template <typename SourceType>
struct target_type_impl<SourceType, aggregation::CORRELATION> {
using type = double;
};
// Always use numeric types for TDIGEST
template <typename Source>
struct target_type_impl<Source,
aggregation::TDIGEST,
std::enable_if_t<(is_numeric<Source>() || is_fixed_point<Source>())>> {
using type = struct_view;
};
// TDIGEST_MERGE. The root column type for a tdigest column is a list_view. Strictly
// speaking, this check is not sufficient to guarantee we are actually being given a
// real tdigest column, but we will do further verification inside the aggregation code.
template <typename Source>
struct target_type_impl<Source,
aggregation::MERGE_TDIGEST,
std::enable_if_t<std::is_same_v<Source, cudf::struct_view>>> {
using type = struct_view;
};
/**
* @brief Helper alias to get the accumulator type for performing aggregation
* `k` on elements of type `Source`
*
* @tparam Source The type on which the aggregation is computed
* @tparam k The aggregation performed
*/
template <typename Source, aggregation::Kind k>
using target_type_t = typename target_type_impl<Source, k>::type;
template <aggregation::Kind k>
struct kind_to_type_impl {
using type = aggregation;
};
template <aggregation::Kind k>
using kind_to_type = typename kind_to_type_impl<k>::type;
#ifndef AGG_KIND_MAPPING
#define AGG_KIND_MAPPING(k, Type) \
template <> \
struct kind_to_type_impl<k> { \
using type = Type; \
}
#endif
AGG_KIND_MAPPING(aggregation::QUANTILE, quantile_aggregation);
AGG_KIND_MAPPING(aggregation::STD, std_aggregation);
AGG_KIND_MAPPING(aggregation::VARIANCE, var_aggregation);
/**
* @brief Dispatches `k` as a non-type template parameter to a callable, `f`.
*
* @tparam F Type of callable
* @param k The `aggregation::Kind` value to dispatch
* aram f The callable that accepts an `aggregation::Kind` non-type template
* argument.
* @param args Parameter pack forwarded to the `operator()` invocation
* @return Forwards the return value of the callable.
*/
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
template <typename F, typename... Ts>
CUDF_HOST_DEVICE inline decltype(auto) aggregation_dispatcher(aggregation::Kind k,
F&& f,
Ts&&... args)
{
switch (k) {
case aggregation::SUM:
return f.template operator()<aggregation::SUM>(std::forward<Ts>(args)...);
case aggregation::PRODUCT:
return f.template operator()<aggregation::PRODUCT>(std::forward<Ts>(args)...);
case aggregation::MIN:
return f.template operator()<aggregation::MIN>(std::forward<Ts>(args)...);
case aggregation::MAX:
return f.template operator()<aggregation::MAX>(std::forward<Ts>(args)...);
case aggregation::COUNT_VALID:
return f.template operator()<aggregation::COUNT_VALID>(std::forward<Ts>(args)...);
case aggregation::COUNT_ALL:
return f.template operator()<aggregation::COUNT_ALL>(std::forward<Ts>(args)...);
case aggregation::HISTOGRAM:
return f.template operator()<aggregation::HISTOGRAM>(std::forward<Ts>(args)...);
case aggregation::ANY:
return f.template operator()<aggregation::ANY>(std::forward<Ts>(args)...);
case aggregation::ALL:
return f.template operator()<aggregation::ALL>(std::forward<Ts>(args)...);
case aggregation::SUM_OF_SQUARES:
return f.template operator()<aggregation::SUM_OF_SQUARES>(std::forward<Ts>(args)...);
case aggregation::MEAN:
return f.template operator()<aggregation::MEAN>(std::forward<Ts>(args)...);
case aggregation::M2: return f.template operator()<aggregation::M2>(std::forward<Ts>(args)...);
case aggregation::VARIANCE:
return f.template operator()<aggregation::VARIANCE>(std::forward<Ts>(args)...);
case aggregation::STD:
return f.template operator()<aggregation::STD>(std::forward<Ts>(args)...);
case aggregation::MEDIAN:
return f.template operator()<aggregation::MEDIAN>(std::forward<Ts>(args)...);
case aggregation::QUANTILE:
return f.template operator()<aggregation::QUANTILE>(std::forward<Ts>(args)...);
case aggregation::ARGMAX:
return f.template operator()<aggregation::ARGMAX>(std::forward<Ts>(args)...);
case aggregation::ARGMIN:
return f.template operator()<aggregation::ARGMIN>(std::forward<Ts>(args)...);
case aggregation::NUNIQUE:
return f.template operator()<aggregation::NUNIQUE>(std::forward<Ts>(args)...);
case aggregation::NTH_ELEMENT:
return f.template operator()<aggregation::NTH_ELEMENT>(std::forward<Ts>(args)...);
case aggregation::ROW_NUMBER:
return f.template operator()<aggregation::ROW_NUMBER>(std::forward<Ts>(args)...);
case aggregation::RANK:
return f.template operator()<aggregation::RANK>(std::forward<Ts>(args)...);
case aggregation::COLLECT_LIST:
return f.template operator()<aggregation::COLLECT_LIST>(std::forward<Ts>(args)...);
case aggregation::COLLECT_SET:
return f.template operator()<aggregation::COLLECT_SET>(std::forward<Ts>(args)...);
case aggregation::LEAD:
return f.template operator()<aggregation::LEAD>(std::forward<Ts>(args)...);
case aggregation::LAG:
return f.template operator()<aggregation::LAG>(std::forward<Ts>(args)...);
case aggregation::MERGE_LISTS:
return f.template operator()<aggregation::MERGE_LISTS>(std::forward<Ts>(args)...);
case aggregation::MERGE_SETS:
return f.template operator()<aggregation::MERGE_SETS>(std::forward<Ts>(args)...);
case aggregation::MERGE_M2:
return f.template operator()<aggregation::MERGE_M2>(std::forward<Ts>(args)...);
case aggregation::MERGE_HISTOGRAM:
return f.template operator()<aggregation::MERGE_HISTOGRAM>(std::forward<Ts>(args)...);
case aggregation::COVARIANCE:
return f.template operator()<aggregation::COVARIANCE>(std::forward<Ts>(args)...);
case aggregation::CORRELATION:
return f.template operator()<aggregation::CORRELATION>(std::forward<Ts>(args)...);
case aggregation::TDIGEST:
return f.template operator()<aggregation::TDIGEST>(std::forward<Ts>(args)...);
case aggregation::MERGE_TDIGEST:
return f.template operator()<aggregation::MERGE_TDIGEST>(std::forward<Ts>(args)...);
default: {
#ifndef __CUDA_ARCH__
CUDF_FAIL("Unsupported aggregation.");
#else
CUDF_UNREACHABLE("Unsupported aggregation.");
#endif
}
}
}
template <typename Element>
struct dispatch_aggregation {
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
template <aggregation::Kind k, typename F, typename... Ts>
CUDF_HOST_DEVICE inline decltype(auto) operator()(F&& f, Ts&&... args) const
{
return f.template operator()<Element, k>(std::forward<Ts>(args)...);
}
};
struct dispatch_source {
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
template <typename Element, typename F, typename... Ts>
CUDF_HOST_DEVICE inline decltype(auto) operator()(aggregation::Kind k, F&& f, Ts&&... args) const
{
return aggregation_dispatcher(
k, dispatch_aggregation<Element>{}, std::forward<F>(f), std::forward<Ts>(args)...);
}
};
/**
* @brief Dispatches both a type and `aggregation::Kind` template parameters to
* a callable.
*
* This function expects a callable `f` with an `operator()` template accepting
* two template parameters. The first is a type dispatched from `type`. The
* second is an `aggregation::Kind` dispatched from `k`.
*
* @param type The `data_type` used to dispatch a type for the first template
* parameter of the callable `F`
* @param k The `aggregation::Kind` used to dispatch an `aggregation::Kind`
* non-type template parameter for the second template parameter of the callable
* @param args Parameter pack forwarded to the `operator()` invocation
* `F`.
*/
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
template <typename F, typename... Ts>
CUDF_HOST_DEVICE inline constexpr decltype(auto) dispatch_type_and_aggregation(data_type type,
aggregation::Kind k,
F&& f,
Ts&&... args)
{
return type_dispatcher(type, dispatch_source{}, k, std::forward<F>(f), std::forward<Ts>(args)...);
}
/**
* @brief Returns the target `data_type` for the specified aggregation k
* performed on elements of type source_type.
*
* aram source_type The element type to be aggregated
* aram k The aggregation
* @return data_type The target_type of k performed on source_type
* elements
*/
data_type target_type(data_type source_type, aggregation::Kind k);
/**
* @brief Indicates whether the specified aggregation `k` is valid to perform on
* the type `Source`.
*
* @tparam Source Type on which the aggregation is performed
* @tparam k The aggregation to perform
*/
template <typename Source, aggregation::Kind k>
constexpr inline bool is_valid_aggregation()
{
return (not std::is_void_v<target_type_t<Source, k>>);
}
/**
* @brief Indicates whether the specified aggregation `k` is valid to perform on
* the `data_type` `source`.
*
* @param source Source `data_type` on which the aggregation is performed
* @param k The aggregation to perform
*/
bool is_valid_aggregation(data_type source, aggregation::Kind k);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/aggregation/aggregation.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/traits.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
namespace cudf {
namespace detail {
/**
* @brief Maps an `aggregation::Kind` value to it's corresponding binary
* operator.
*
* @note Not all values of `aggregation::Kind` have a valid corresponding binary
* operator. For these values `E`,
* `std::is_same_v<corresponding_operator<E>::type, void>`.
*
* @tparam k The `aggregation::Kind` value to map to its corresponding operator
*/
template <aggregation::Kind k>
struct corresponding_operator {
using type = void;
};
template <>
struct corresponding_operator<aggregation::MIN> {
using type = DeviceMin;
};
template <>
struct corresponding_operator<aggregation::MAX> {
using type = DeviceMax;
};
template <>
struct corresponding_operator<aggregation::ARGMIN> {
using type = DeviceMin;
};
template <>
struct corresponding_operator<aggregation::ARGMAX> {
using type = DeviceMax;
};
template <>
struct corresponding_operator<aggregation::ANY> {
using type = DeviceMax;
};
template <>
struct corresponding_operator<aggregation::ALL> {
using type = DeviceMin;
};
template <>
struct corresponding_operator<aggregation::SUM> {
using type = DeviceSum;
};
template <>
struct corresponding_operator<aggregation::PRODUCT> {
using type = DeviceProduct;
};
template <>
struct corresponding_operator<aggregation::SUM_OF_SQUARES> {
using type = DeviceSum;
};
template <>
struct corresponding_operator<aggregation::STD> {
using type = DeviceSum;
};
template <>
struct corresponding_operator<aggregation::VARIANCE> {
using type = DeviceSum;
};
template <>
struct corresponding_operator<aggregation::MEAN> {
using type = DeviceSum;
};
template <>
struct corresponding_operator<aggregation::COUNT_VALID> {
using type = DeviceCount;
};
template <>
struct corresponding_operator<aggregation::COUNT_ALL> {
using type = DeviceCount;
};
template <aggregation::Kind k>
using corresponding_operator_t = typename corresponding_operator<k>::type;
template <aggregation::Kind k>
constexpr bool has_corresponding_operator()
{
return !std::is_same_v<typename corresponding_operator<k>::type, void>;
}
template <typename Source,
aggregation::Kind k,
bool target_has_nulls,
bool source_has_nulls,
typename Enable = void>
struct update_target_element {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
CUDF_UNREACHABLE("Invalid source type and aggregation combination.");
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::MIN,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_fixed_width<Source>() && cudf::has_atomic_support<Source>() &&
!is_fixed_point<Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::MIN>;
atomicMin(&target.element<Target>(target_index),
static_cast<Target>(source.element<Source>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::MIN,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_fixed_point<Source>() &&
cudf::has_atomic_support<device_storage_type_t<Source>>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::MIN>;
using DeviceTarget = device_storage_type_t<Target>;
using DeviceSource = device_storage_type_t<Source>;
atomicMin(&target.element<DeviceTarget>(target_index),
static_cast<DeviceTarget>(source.element<DeviceSource>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::MAX,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_fixed_width<Source>() && cudf::has_atomic_support<Source>() &&
!is_fixed_point<Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::MAX>;
atomicMax(&target.element<Target>(target_index),
static_cast<Target>(source.element<Source>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::MAX,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_fixed_point<Source>() &&
cudf::has_atomic_support<device_storage_type_t<Source>>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::MAX>;
using DeviceTarget = device_storage_type_t<Target>;
using DeviceSource = device_storage_type_t<Source>;
atomicMax(&target.element<DeviceTarget>(target_index),
static_cast<DeviceTarget>(source.element<DeviceSource>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::SUM,
target_has_nulls,
source_has_nulls,
std::enable_if_t<cudf::is_fixed_width<Source>() && cudf::has_atomic_support<Source>() &&
!cudf::is_fixed_point<Source>() && !cudf::is_timestamp<Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::SUM>;
atomicAdd(&target.element<Target>(target_index),
static_cast<Target>(source.element<Source>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::SUM,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_fixed_point<Source>() &&
cudf::has_atomic_support<device_storage_type_t<Source>>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::SUM>;
using DeviceTarget = device_storage_type_t<Target>;
using DeviceSource = device_storage_type_t<Source>;
atomicAdd(&target.element<DeviceTarget>(target_index),
static_cast<DeviceTarget>(source.element<DeviceSource>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
/**
* @brief Function object to update a single element in a target column using
* the dictionary key addressed by the specific index.
*
* SFINAE is used to prevent recursion for dictionary type. Dictionary keys cannot be a
* dictionary.
*
*/
template <bool target_has_nulls = true>
struct update_target_from_dictionary {
template <typename Source,
aggregation::Kind k,
std::enable_if_t<!is_dictionary<Source>()>* = nullptr>
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
update_target_element<Source, k, target_has_nulls, false>{}(
target, target_index, source, source_index);
}
template <typename Source,
aggregation::Kind k,
std::enable_if_t<is_dictionary<Source>()>* = nullptr>
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
}
};
/**
* @brief Specialization function for dictionary type and aggregations.
*
* The `source` column is a dictionary type. This functor de-references the
* dictionary's keys child column and maps the input source index through
* the dictionary's indices child column to pass to the `update_target_element`
* in the above `update_target_from_dictionary` using the type-dispatcher to
* resolve the keys column type.
*
* `update_target_element( target, target_index, source.keys(), source.indices()[source_index] )`
*
* @tparam target_has_nulls Indicates presence of null elements in `target`
* @tparam source_has_nulls Indicates presence of null elements in `source`.
*/
template <aggregation::Kind k, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
dictionary32,
k,
target_has_nulls,
source_has_nulls,
std::enable_if_t<not(k == aggregation::ARGMIN or k == aggregation::ARGMAX or
k == aggregation::COUNT_VALID or k == aggregation::COUNT_ALL)>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
dispatch_type_and_aggregation(
source.child(cudf::dictionary_column_view::keys_column_index).type(),
k,
update_target_from_dictionary<target_has_nulls>{},
target,
target_index,
source.child(cudf::dictionary_column_view::keys_column_index),
static_cast<cudf::size_type>(source.element<dictionary32>(source_index)));
}
};
template <typename T>
constexpr bool is_product_supported()
{
return is_numeric<T>();
}
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<Source,
aggregation::SUM_OF_SQUARES,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_product_supported<Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::SUM_OF_SQUARES>;
auto value = static_cast<Target>(source.element<Source>(source_index));
atomicAdd(&target.element<Target>(target_index), value * value);
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<Source,
aggregation::PRODUCT,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_product_supported<Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::PRODUCT>;
atomicMul(&target.element<Target>(target_index),
static_cast<Target>(source.element<Source>(source_index)));
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::COUNT_VALID,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_valid_aggregation<Source, aggregation::COUNT_VALID>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::COUNT_VALID>;
atomicAdd(&target.element<Target>(target_index), Target{1});
// It is assumed the output for COUNT_VALID is initialized to be all valid
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::COUNT_ALL,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_valid_aggregation<Source, aggregation::COUNT_ALL>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
using Target = target_type_t<Source, aggregation::COUNT_ALL>;
atomicAdd(&target.element<Target>(target_index), Target{1});
// It is assumed the output for COUNT_ALL is initialized to be all valid
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::ARGMAX,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_valid_aggregation<Source, aggregation::ARGMAX>() and
cudf::is_relationally_comparable<Source, Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::ARGMAX>;
auto old = atomicCAS(&target.element<Target>(target_index), ARGMAX_SENTINEL, source_index);
if (old != ARGMAX_SENTINEL) {
while (source.element<Source>(source_index) > source.element<Source>(old)) {
old = atomicCAS(&target.element<Target>(target_index), old, source_index);
}
}
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
template <typename Source, bool target_has_nulls, bool source_has_nulls>
struct update_target_element<
Source,
aggregation::ARGMIN,
target_has_nulls,
source_has_nulls,
std::enable_if_t<is_valid_aggregation<Source, aggregation::ARGMIN>() and
cudf::is_relationally_comparable<Source, Source>()>> {
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
if (source_has_nulls and source.is_null(source_index)) { return; }
using Target = target_type_t<Source, aggregation::ARGMIN>;
auto old = atomicCAS(&target.element<Target>(target_index), ARGMIN_SENTINEL, source_index);
if (old != ARGMIN_SENTINEL) {
while (source.element<Source>(source_index) < source.element<Source>(old)) {
old = atomicCAS(&target.element<Target>(target_index), old, source_index);
}
}
if (target_has_nulls and target.is_null(target_index)) { target.set_valid(target_index); }
}
};
/**
* @brief Function object to update a single element in a target column by
* performing an aggregation operation with a single element from a source
* column.
*
* @tparam target_has_nulls Indicates presence of null elements in `target`
* @tparam source_has_nulls Indicates presence of null elements in `source`.
*/
template <bool target_has_nulls = true, bool source_has_nulls = true>
struct elementwise_aggregator {
template <typename Source, aggregation::Kind k>
__device__ void operator()(mutable_column_device_view target,
size_type target_index,
column_device_view source,
size_type source_index) const noexcept
{
update_target_element<Source, k, target_has_nulls, source_has_nulls>{}(
target, target_index, source, source_index);
}
};
/**
* @brief Updates a row in `target` by performing elementwise aggregation
* operations with a row in `source`.
*
* For the row in `target` specified by `target_index`, each element at `i` is
* updated by:
* ```c++
* target_row[i] = aggs[i](target_row[i], source_row[i])
* ```
*
* This function only supports aggregations that can be done in a "single pass",
* i.e., given an initial value `R`, the aggregation `op` can be computed on a series
* of elements `e[i] for i in [0,n)` by computing `R = op(e[i],R)` for any order
* of the values of `i`.
*
* The initial value and validity of `R` depends on the aggregation:
* SUM: 0 and NULL
* MIN: Max value of type and NULL
* MAX: Min value of type and NULL
* COUNT_VALID: 0 and VALID
* COUNT_ALL: 0 and VALID
* ARGMAX: `ARGMAX_SENTINEL` and NULL
* ARGMIN: `ARGMIN_SENTINEL` and NULL
*
* It is required that the elements of `target` be initialized with the corresponding
* initial values and validity specified above.
*
* Handling of null elements in both `source` and `target` depends on the aggregation:
* SUM, MIN, MAX, ARGMIN, ARGMAX:
* - `source`: Skipped
* - `target`: Updated from null to valid upon first successful aggregation
* COUNT_VALID, COUNT_ALL:
* - `source`: Skipped
* - `target`: Cannot be null
*
* @param target Table containing the row to update
* @param target_index Index of the row to update in `target`
* @param source Table containing the row used to update the row in `target`.
* The invariant `source.num_columns() >= target.num_columns()` must hold.
* @param source_index Index of the row to use in `source`
* @param aggs Array of aggregations to perform between elements of the `target`
* and `source` rows. Must contain at least `target.num_columns()` valid
* `aggregation::Kind` values.
*/
template <bool target_has_nulls = true, bool source_has_nulls = true>
__device__ inline void aggregate_row(mutable_table_device_view target,
size_type target_index,
table_device_view source,
size_type source_index,
aggregation::Kind const* aggs)
{
for (auto i = 0; i < target.num_columns(); ++i) {
dispatch_type_and_aggregation(source.column(i).type(),
aggs[i],
elementwise_aggregator<target_has_nulls, source_has_nulls>{},
target.column(i),
target_index,
source.column(i),
source_index);
}
}
/**
* @brief Dispatched functor to initialize a column with the identity of an
* aggregation operation.
*
* Given a type `T` and `aggregation kind k`, determines and sets the value of
* each element of the passed column to the appropriate initial value for the
* aggregation.
*
* The initial values set as per aggregation are:
* SUM: 0
* COUNT_VALID: 0 and VALID
* COUNT_ALL: 0 and VALID
* MIN: Max value of type `T`
* MAX: Min value of type `T`
* ARGMAX: `ARGMAX_SENTINEL`
* ARGMIN: `ARGMIN_SENTINEL`
*
* Only works on columns of fixed-width types.
*/
struct identity_initializer {
private:
template <typename T, aggregation::Kind k>
static constexpr bool is_supported()
{
return cudf::is_fixed_width<T>() and
(k == aggregation::SUM or k == aggregation::MIN or k == aggregation::MAX or
k == aggregation::COUNT_VALID or k == aggregation::COUNT_ALL or
k == aggregation::ARGMAX or k == aggregation::ARGMIN or
k == aggregation::SUM_OF_SQUARES or k == aggregation::STD or
k == aggregation::VARIANCE or
(k == aggregation::PRODUCT and is_product_supported<T>()));
}
template <typename T, aggregation::Kind k>
std::enable_if_t<not std::is_same_v<corresponding_operator_t<k>, void>, T>
identity_from_operator()
{
using DeviceType = device_storage_type_t<T>;
return corresponding_operator_t<k>::template identity<DeviceType>();
}
template <typename T, aggregation::Kind k>
std::enable_if_t<std::is_same_v<corresponding_operator_t<k>, void>, T> identity_from_operator()
{
CUDF_FAIL("Unable to get identity/sentinel from device operator");
}
template <typename T, aggregation::Kind k>
T get_identity()
{
if (k == aggregation::ARGMAX || k == aggregation::ARGMIN) {
if constexpr (cudf::is_timestamp<T>())
return k == aggregation::ARGMAX ? T{typename T::duration(ARGMAX_SENTINEL)}
: T{typename T::duration(ARGMIN_SENTINEL)};
else {
using DeviceType = device_storage_type_t<T>;
return k == aggregation::ARGMAX ? static_cast<DeviceType>(ARGMAX_SENTINEL)
: static_cast<DeviceType>(ARGMIN_SENTINEL);
}
}
return identity_from_operator<T, k>();
}
public:
template <typename T, aggregation::Kind k>
std::enable_if_t<is_supported<T, k>(), void> operator()(mutable_column_view const& col,
rmm::cuda_stream_view stream)
{
using DeviceType = device_storage_type_t<T>;
thrust::fill(rmm::exec_policy(stream),
col.begin<DeviceType>(),
col.end<DeviceType>(),
get_identity<DeviceType, k>());
}
template <typename T, aggregation::Kind k>
std::enable_if_t<not is_supported<T, k>(), void> operator()(mutable_column_view const& col,
rmm::cuda_stream_view stream)
{
CUDF_FAIL("Unsupported aggregation for initializing values");
}
};
/**
* @brief Initializes each column in a table with a corresponding identity value
* of an aggregation operation.
*
* The `i`th column will be initialized with the identity value of the `i`th
* aggregation operation in `aggs`.
*
* @throw cudf::logic_error if column type and corresponding agg are incompatible
* @throw cudf::logic_error if column type is not fixed-width
*
* @param table The table of columns to initialize.
* @param aggs A vector of aggregation operations corresponding to the table
* columns. The aggregations determine the identity value for each column.
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void initialize_with_identity(mutable_table_view& table,
std::vector<aggregation::Kind> const& aggs,
rmm::cuda_stream_view stream);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/structs/utilities.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
namespace cudf::structs::detail {
enum class column_nullability {
MATCH_INCOMING, ///< generate a null column if the incoming column has nulls
FORCE ///< always generate a null column
};
/**
* @brief The struct to hold temporary data that is not directly used but needs to be kept alive to
* support the output generated by `superimpose_nulls(column_view)` and
* `superimpose_nulls(table_view)`.
*/
struct temporary_nullable_data {
/**
* @brief Store the newly generated null masks (if any).
*/
std::vector<rmm::device_buffer> new_null_masks;
/**
* @brief Store the newly generated columns with new null masks (if any).
*/
std::vector<std::unique_ptr<column>> new_columns;
/**
* @brief Take over the content of another instance and append it into the internal data.
*
* @param other The other instance to take over its content
*/
void emplace_back(temporary_nullable_data&& other);
};
/**
* @brief Flatten the children of the input columns into a vector where the i'th element
* is a vector of column_views representing the i'th child from each input column_view.
*
* @code{.pseudo}
* s1 = [ col0 : {0, 1}
* col1 : {2, 3, 4, 5, 6}
* col2 : {"abc", "def", "ghi"} ]
*
* s2 = [ col0 : {7, 8}
* col1 : {-4, -5, -6}
* col2 : {"uvw", "xyz"} ]
*
* e = extract_ordered_struct_children({s1, s2})
*
* e is now [ {{0, 1}, {7, 8}}
* {{2, 3, 4, 5, 6}, {-4, -5, -6}}
* {{"abc", "def", "ghi"}, {"uvw", "xyz"} ]
* @endcode
*
* @param columns Vector of structs columns to extract from.
* @return New column with concatenated results.
*/
std::vector<std::vector<column_view>> extract_ordered_struct_children(
host_span<column_view const> struct_cols, rmm::cuda_stream_view stream);
/**
* @brief Check whether the specified column is of type LIST, or any LISTs in its descendent
* columns.
* @param col column to check for lists.
* @return true if the column or any of it's children is a list, false otherwise.
*/
bool is_or_has_nested_lists(cudf::column_view const& col);
/**
* @brief Result of `flatten_nested_columns()`, where all `STRUCT` columns are replaced with
* their non-nested member columns, and `BOOL8` columns for their null masks.
*
* `flatten_nested_columns()` produces a "flattened" table_view with all `STRUCT` columns
* replaced with their child column_views, preceded by their null masks.
* All newly allocated columns and device_buffers that back the returned table_view
* are also encapsulated in `flatten_result`.
*
* Objects of `flatten_result` need to kept alive while its table_view is accessed.
*/
class flattened_table {
public:
/**
* @brief Constructor, to be used from `flatten_nested_columns()`.
*
* @param flattened_columns_ table_view resulting from `flatten_nested_columns()`
* @param orders_ Per-column ordering of the table_view
* @param null_orders_ Per-column null_order of the table_view
* @param columns_ Newly allocated columns to back the table_view
* @param nullable_data_ Newly generated temporary data that needs to be kept alive
*/
flattened_table(table_view const& flattened_columns_,
std::vector<order> const& orders_,
std::vector<null_order> const& null_orders_,
std::vector<std::unique_ptr<column>>&& columns_,
temporary_nullable_data&& nullable_data_)
: _flattened_columns{flattened_columns_},
_orders{orders_},
_null_orders{null_orders_},
_columns{std::move(columns_)},
_nullable_data{std::move(nullable_data_)}
{
}
flattened_table() = default;
/**
* @brief Getter for the flattened columns, as a `table_view`.
*/
[[nodiscard]] table_view flattened_columns() const { return _flattened_columns; }
/**
* @brief Getter for the cudf::order of the table_view's columns.
*/
[[nodiscard]] std::vector<order> orders() const { return _orders; }
/**
* @brief Getter for the cudf::null_order of the table_view's columns.
*/
[[nodiscard]] std::vector<null_order> null_orders() const { return _null_orders; }
/**
* @brief Conversion to `table_view`, to fetch flattened columns.
*/
operator table_view() const { return flattened_columns(); }
private:
table_view _flattened_columns;
std::vector<order> _orders;
std::vector<null_order> _null_orders;
std::vector<std::unique_ptr<column>> _columns;
temporary_nullable_data _nullable_data;
};
/**
* @brief Flatten table with struct columns to table with constituent columns of struct columns.
*
* If a table does not have struct columns, same input arguments are returned.
*
* @param input input table to be flattened
* @param column_order column order for input table
* @param null_precedence null order for input table
* @param nullability force output to have nullability columns even if input columns are all valid
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate new device memory
* @return A pointer of type `flattened_table` containing flattened columns, flattened column
* orders, flattened null precedence, alongside the supporting columns and device_buffers
* for the flattened table.
*/
[[nodiscard]] std::unique_ptr<flattened_table> flatten_nested_columns(
table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
column_nullability nullability,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Superimpose nulls from a given null mask into the input column, using bitwise AND.
*
* This function will recurse through all struct descendants. It is expected that the size of
* the given null mask in bits is the same as size of the input column.
*
* Any null strings/lists in the input (if any) will also be sanitized to make sure nulls in the
* output always have their sizes equal to 0.
*
* @param null_mask Null mask to be applied to the input column
* @param null_count Null count in the given null mask
* @param input Column to apply the null mask to
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate new device memory
* @return A new column with potentially new null mask
*/
[[nodiscard]] std::unique_ptr<column> superimpose_nulls(bitmask_type const* null_mask,
size_type null_count,
std::unique_ptr<column>&& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Push down nulls from the given input column into its children columns, using bitwise AND.
*
* This function constructs a new column_view instance equivalent to the input column_view,
* with possibly new child column_view, all with possibly new null mask reflecting null rows from
* the parent column:
* 1. If the specified column is not STRUCT, the column_view is returned unmodified, with no
* temporary data allocated.
* 2. If the column is STRUCT, the null masks of the parent and child are bitwise-ANDed, and a
* modified column_view is returned. This applies recursively.
*
* Any null strings/lists in the input (if any) will also be sanitized to make sure nulls in the
* output always have their sizes equal to 0.
*
* @param input The input (possibly STRUCT) column whose nulls need to be pushed to its children
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate new device memory
* @return A pair of:
* 1. column_view with nulls pushed down to child columns, as appropriate.
* 2. An instance of `temporary_nullable_data` holding the temporary data that needs
* to be kept alive.
*/
[[nodiscard]] std::pair<column_view, temporary_nullable_data> push_down_nulls(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
/**
* @brief Push down nulls from columns of the input table into their children columns, using
* bitwise AND.
*
* This function constructs a new table_view containing new column_view instances equivalent to
* the corresponding column_views in the input table. Each column_view might contain possibly new
* child column_views, all with possibly new null mask reflecting null rows from the parent column:
* 1. If the specified column is not STRUCT, the column_view is returned unmodified, with no
* temporary data allocated.
* 2. If the column is STRUCT, the null masks of the parent and child are bitwise-ANDed, and a
* modified column_view is returned. This applies recursively.
*
* Any null strings/lists in the input (if any) will also be sanitized to make sure nulls in the
* output always have their sizes equal to 0.
*
* @param input The table_view of (possibly STRUCT) columns whose nulls need to be pushed to their
* children
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate new device memory
* @return A pair of:
* 1. table_view of columns with nulls pushed down to child columns, as appropriate.
* 2. An instance of `temporary_nullable_data` holding the temporary data that needs
* to be kept alive.
*/
[[nodiscard]] std::pair<table_view, temporary_nullable_data> push_down_nulls(
table_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr);
/**
* @brief Checks if a column or any of its children is a struct column with structs that are null.
*
* This function searches for structs that are null -- differentiating between structs that are null
* and structs containing null values. Null structs add a column to the result of the flatten column
* utility and necessitates column_nullability::FORCE when flattening the column for comparison
* operations.
*
* @param col Column to check for null structs
* @return A boolean indicating if the column is or contains a struct column that contains a null
* struct.
*/
bool contains_null_structs(column_view const& col);
} // namespace cudf::structs::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/linked_column.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <memory>
#include <vector>
namespace cudf::detail {
struct linked_column_view;
using LinkedColPtr = std::shared_ptr<linked_column_view>;
using LinkedColVector = std::vector<LinkedColPtr>;
/**
* @brief A column_view class with pointer to parent's column_view
*/
struct linked_column_view : public column_view_base {
linked_column_view(linked_column_view const&) = delete;
linked_column_view& operator=(linked_column_view const&) = delete;
/**
* @brief Construct from column_view
*
* @param col column_view to wrap
*/
linked_column_view(column_view const& col);
/**
* @brief Construct from column_view with it's parent
*
* @param parent Pointer to the column_view's parent column_view
* @param col column_view to wrap
*/
linked_column_view(linked_column_view* parent, column_view const& col);
/**
* @brief Conversion operator to cast this instance to it's column_view
*/
operator column_view() const;
linked_column_view* parent; ///< Pointer to parent of this column; nullptr if root
LinkedColVector children; ///< Vector of children of this instance
};
/**
* @brief Converts all column_views of a table into linked_column_views
*
* @param table table of columns to convert
* @return Vector of converted linked_column_views
*/
LinkedColVector table_to_linked_columns(table_view const& table);
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/device_operators.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @brief Definition of the device operators
* @file
*/
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/fixed_point/temporary.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <type_traits>
namespace cudf {
namespace detail {
/**
* @brief SFINAE enabled min function suitable for std::is_invocable
*/
template <typename LHS,
typename RHS,
std::enable_if_t<cudf::is_relationally_comparable<LHS, RHS>()>* = nullptr>
CUDF_HOST_DEVICE inline auto min(LHS const& lhs, RHS const& rhs)
{
return std::min(lhs, rhs);
}
/**
* @brief SFINAE enabled max function suitable for std::is_invocable
*/
template <typename LHS,
typename RHS,
std::enable_if_t<cudf::is_relationally_comparable<LHS, RHS>()>* = nullptr>
CUDF_HOST_DEVICE inline auto max(LHS const& lhs, RHS const& rhs)
{
return std::max(lhs, rhs);
}
} // namespace detail
/**
* @brief Binary `sum` operator
*/
struct DeviceSum {
template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs + rhs)
{
return lhs + rhs;
}
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
static constexpr T identity()
{
return T{typename T::duration{0}};
}
template <typename T,
std::enable_if_t<!cudf::is_timestamp<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
return T{0};
}
template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
CUDF_FAIL("fixed_point does not yet support device operator identity");
return T{};
}
};
/**
* @brief `count` operator - used in rolling windows
*/
struct DeviceCount {
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline T operator()(T const& lhs, T const& rhs)
{
return T{DeviceCount{}(lhs.time_since_epoch(), rhs.time_since_epoch())};
}
template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline T operator()(T const&, T const& rhs)
{
return rhs + T{1};
}
template <typename T>
static constexpr T identity()
{
return T{};
}
};
/**
* @brief binary `min` operator
*/
struct DeviceMin {
template <typename T>
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs)
-> decltype(cudf::detail::min(lhs, rhs))
{
return numeric::detail::min(lhs, rhs);
}
template <typename T,
std::enable_if_t<!std::is_same_v<T, cudf::string_view> && !cudf::is_dictionary<T>() &&
!cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
// chrono types do not have std::numeric_limits specializations and should use T::max()
// https://eel.is/c++draft/numeric.limits.general#6
if constexpr (cudf::is_chrono<T>()) {
return T::max();
} else if constexpr (cuda::std::numeric_limits<T>::has_infinity) {
return cuda::std::numeric_limits<T>::infinity();
} else {
return cuda::std::numeric_limits<T>::max();
}
}
template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
CUDF_FAIL("fixed_point does not yet support DeviceMin identity");
return cuda::std::numeric_limits<T>::max();
}
// @brief identity specialized for string_view
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
CUDF_HOST_DEVICE inline static constexpr T identity()
{
return string_view::max();
}
template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr>
static constexpr T identity()
{
return static_cast<T>(T::max_value());
}
};
/**
* @brief binary `max` operator
*/
struct DeviceMax {
template <typename T>
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs)
-> decltype(cudf::detail::max(lhs, rhs))
{
return numeric::detail::max(lhs, rhs);
}
template <typename T,
std::enable_if_t<!std::is_same_v<T, cudf::string_view> && !cudf::is_dictionary<T>() &&
!cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
// chrono types do not have std::numeric_limits specializations and should use T::min()
// https://eel.is/c++draft/numeric.limits.general#6
if constexpr (cudf::is_chrono<T>()) {
return T::min();
} else if constexpr (cuda::std::numeric_limits<T>::has_infinity) {
return -cuda::std::numeric_limits<T>::infinity();
} else {
return cuda::std::numeric_limits<T>::lowest();
}
}
template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
CUDF_FAIL("fixed_point does not yet support DeviceMax identity");
return cuda::std::numeric_limits<T>::lowest();
}
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
CUDF_HOST_DEVICE inline static constexpr T identity()
{
return string_view::min();
}
template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr>
static constexpr T identity()
{
return static_cast<T>(T::lowest_value());
}
};
/**
* @brief binary `product` operator
*/
struct DeviceProduct {
template <typename T, std::enable_if_t<!cudf::is_timestamp<T>()>* = nullptr>
CUDF_HOST_DEVICE inline auto operator()(T const& lhs, T const& rhs) -> decltype(lhs * rhs)
{
return lhs * rhs;
}
template <typename T, std::enable_if_t<!cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
return T{1};
}
template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr>
static constexpr T identity()
{
CUDF_FAIL("fixed_point does not yet support DeviceProduct identity");
return T{1, numeric::scale_type{0}};
}
};
/**
* @brief Operator for calculating Lead/Lag window function.
*/
struct DeviceLeadLag {
size_type const row_offset;
explicit CUDF_HOST_DEVICE inline DeviceLeadLag(size_type offset_) : row_offset(offset_) {}
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/stream_pool.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cstddef>
#include <vector>
namespace cudf::detail {
/**
* @brief Acquire a set of `cuda_stream_view` objects and synchronize them to an event on another
* stream.
*
* By default an underlying `rmm::cuda_stream_pool` is used to obtain the streams. The only other
* implementation at present is a debugging version that always returns the stream returned by
* `cudf::get_default_stream()`. To use this debugging version, set the environment variable
* `LIBCUDF_USE_DEBUG_STREAM_POOL`.
*
* Example usage:
* @code{.cpp}
* auto stream = cudf::get_default_stream();
* auto const num_streams = 2;
* // do work on stream
* // allocate streams and wait for an event on stream before executing on any of streams
* auto streams = cudf::detail::fork_stream(stream, num_streams);
* // do work on streams[0] and streams[1]
* // wait for event on streams before continuing to do work on stream
* cudf::detail::join_streams(streams, stream);
* @endcode
*
* @param stream Stream that the returned streams will wait on.
* @param count The number of `cuda_stream_view` objects to return.
* @return Vector containing `count` stream views.
*/
[[nodiscard]] std::vector<rmm::cuda_stream_view> fork_streams(rmm::cuda_stream_view stream,
std::size_t count);
/**
* @brief Synchronize a stream to an event on a set of streams.
*
* @param streams Streams to wait on.
* @param stream Joined stream that synchronizes with the waited-on streams.
*/
void join_streams(host_span<rmm::cuda_stream_view const> streams, rmm::cuda_stream_view stream);
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/element_argminmax.cuh
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <type_traits>
namespace cudf {
namespace detail {
/**
* @brief Binary `argmin`/`argmax` operator
*
* @tparam T Type of the underlying column. Must support '<' operator.
*/
template <typename T>
struct element_argminmax_fn {
column_device_view const d_col;
bool const has_nulls;
bool const arg_min;
__device__ inline auto operator()(size_type const& lhs_idx, size_type const& rhs_idx) const
{
// The extra bounds checking is due to issue github.com/rapidsai/cudf/9156 and
// github.com/NVIDIA/thrust/issues/1525
// where invalid random values may be passed here by thrust::reduce_by_key
auto out_of_bound_or_null = [this] __device__(size_type const& idx) {
return idx < 0 || idx >= this->d_col.size() ||
(this->has_nulls && this->d_col.is_null_nocheck(idx));
};
if (out_of_bound_or_null(lhs_idx)) { return rhs_idx; }
if (out_of_bound_or_null(rhs_idx)) { return lhs_idx; }
// Return `lhs_idx` iff:
// row(lhs_idx) < row(rhs_idx) and finding ArgMin, or
// row(lhs_idx) >= row(rhs_idx) and finding ArgMax.
auto const less = d_col.element<T>(lhs_idx) < d_col.element<T>(rhs_idx);
return less == arg_min ? lhs_idx : rhs_idx;
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/cuda.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cub/cub.cuh>
#include <type_traits>
namespace cudf {
namespace detail {
/**
* @brief Size of a warp in a CUDA kernel.
*/
static constexpr size_type warp_size{32};
/**
* @brief A kernel grid configuration construction gadget for simple
* one-dimensional kernels, with protection against integer overflow.
*/
class grid_1d {
public:
int const num_threads_per_block;
int const num_blocks;
/**
* @param overall_num_elements The number of elements the kernel needs to
* handle/process, in its main, one-dimensional/linear input (e.g. one or more
* cuDF columns)
* @param num_threads_per_block The grid block size, determined according to
* the kernel's specific features (amount of shared memory necessary, SM
* functional units use pattern etc.); this can't be determined
* generically/automatically (as opposed to the number of blocks)
* @param elements_per_thread Typically, a single kernel thread processes more
* than a single element; this affects the number of threads the grid must
* contain
*/
grid_1d(cudf::size_type overall_num_elements,
cudf::size_type num_threads_per_block,
cudf::size_type elements_per_thread = 1)
: num_threads_per_block(num_threads_per_block),
num_blocks(util::div_rounding_up_safe(overall_num_elements,
elements_per_thread * num_threads_per_block))
{
CUDF_EXPECTS(num_threads_per_block > 0, "num_threads_per_block must be > 0");
CUDF_EXPECTS(num_blocks > 0, "num_blocks must be > 0");
}
/**
* @brief Returns the global thread index in a 1D grid.
*
* The returned index is unique across the entire grid.
*
* @param thread_id The thread index within the block
* @param block_id The block index within the grid
* @param num_threads_per_block The number of threads per block
* @return thread_index_type The global thread index
*/
static constexpr thread_index_type global_thread_id(thread_index_type thread_id,
thread_index_type block_id,
thread_index_type num_threads_per_block)
{
return thread_id + block_id * num_threads_per_block;
}
/**
* @brief Returns the global thread index of the current thread in a 1D grid.
*
* @return thread_index_type The global thread index
*/
static __device__ thread_index_type global_thread_id()
{
return global_thread_id(threadIdx.x, blockIdx.x, blockDim.x);
}
/**
* @brief Returns the stride of a 1D grid.
*
* The returned stride is the total number of threads in the grid.
*
* @param thread_id The thread index within the block
* @param block_id The block index within the grid
* @param num_threads_per_block The number of threads per block
* @return thread_index_type The global thread index
*/
static constexpr thread_index_type grid_stride(thread_index_type num_threads_per_block,
thread_index_type num_blocks_per_grid)
{
return num_threads_per_block * num_blocks_per_grid;
}
/**
* @brief Returns the stride of the current 1D grid.
*
* @return thread_index_type The number of threads in the grid.
*/
static __device__ thread_index_type grid_stride() { return grid_stride(blockDim.x, gridDim.x); }
};
/**
* @brief Performs a sum reduction of values from the same lane across all
* warps in a thread block and returns the result on thread 0 of the block.
*
* All threads in a block must call this function, but only values from the
* threads indicated by `leader_lane` will contribute to the result. Similarly,
* the returned result is only defined on `threadIdx.x==0`.
*
* @tparam block_size The number of threads in the thread block (must be less
* than or equal to 1024)
* @tparam leader_lane The id of the lane in the warp whose value contributes to
* the reduction
* @tparam T Arithmetic type
* @param lane_value The value from the lane that contributes to the reduction
* @return The sum reduction of the values from each lane. Only valid on
* `threadIdx.x == 0`. The returned value on all other threads is undefined.
*/
template <int32_t block_size, int32_t leader_lane = 0, typename T>
__device__ T single_lane_block_sum_reduce(T lane_value)
{
static_assert(block_size <= 1024, "Invalid block size.");
static_assert(std::is_arithmetic_v<T>, "Invalid non-arithmetic type.");
constexpr auto warps_per_block{block_size / warp_size};
auto const lane_id{threadIdx.x % warp_size};
auto const warp_id{threadIdx.x / warp_size};
__shared__ T lane_values[warp_size];
// Load each lane's value into a shared memory array
if (lane_id == leader_lane) { lane_values[warp_id] = lane_value; }
__syncthreads();
// Use a single warp to do the reduction, result is only defined on
// threadId.x == 0
T result{0};
if (warp_id == 0) {
__shared__ typename cub::WarpReduce<T>::TempStorage temp;
lane_value = (lane_id < warps_per_block) ? lane_values[lane_id] : T{0};
result = cub::WarpReduce<T>(temp).Sum(lane_value);
}
// Shared memory has block scope, so sync here to ensure no data
// races between successive calls to this function in the same
// kernel.
__syncthreads();
return result;
}
/**
* @brief Get the number of elements that can be processed per thread.
*
* @param[in] kernel The kernel for which the elements per thread needs to be assessed
* @param[in] total_size Number of elements
* @param[in] block_size Expected block size
*
* @return cudf::size_type Elements per thread that can be processed for given specification.
*/
template <typename Kernel>
cudf::size_type elements_per_thread(Kernel kernel,
cudf::size_type total_size,
cudf::size_type block_size,
cudf::size_type max_per_thread = 32)
{
CUDF_FUNC_RANGE();
// calculate theoretical occupancy
int max_blocks = 0;
CUDF_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks, kernel, block_size, 0));
int device = 0;
CUDF_CUDA_TRY(cudaGetDevice(&device));
int num_sms = 0;
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, device));
int per_thread = total_size / (max_blocks * num_sms * block_size);
return std::clamp(per_thread, 1, max_per_thread);
}
/**
* @brief Finds the smallest value not less than `number_to_round` and modulo `modulus` is
* zero. Expects modulus to be a power of 2.
*
* @note Does not throw or otherwise verify the user has passed in a modulus that is a
* power of 2.
*
* @param[in] number_to_round The value to be rounded up
* @param[in] modulus The modulus to be rounded up to. Must be a power of 2.
*
* @return cudf::size_type Elements per thread that can be processed for given specification.
*/
template <typename T>
__device__ inline T round_up_pow2(T number_to_round, T modulus)
{
return (number_to_round + (modulus - 1)) & -modulus;
}
template <class F>
__global__ void single_thread_kernel(F f)
{
f();
}
/**
* @brief single thread cuda kernel
*
* @tparam Functor Device functor type
* @param functor device functor object or device lambda function
* @param stream CUDA stream used for the kernel launch
*/
template <class Functor>
void device_single_thread(Functor functor, rmm::cuda_stream_view stream)
{
single_thread_kernel<<<1, 1, 0, stream.value()>>>(functor);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/integer_utils.hpp
|
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Eyal Rozenberg <[email protected]>
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @file Utility code involving integer arithmetic
*/
#include <cudf/fixed_point/temporary.hpp>
#include <cmath>
#include <cstdlib>
#include <stdexcept>
#include <type_traits>
namespace cudf {
//! Utility functions
namespace util {
/**
* @brief Rounds `number_to_round` up to the next multiple of modulus
*
* @tparam S type to return
* @param number_to_round number that is being rounded
* @param modulus value to which to round
* @return smallest integer greater than `number_to_round` and modulo `S` is zero.
*
* @note This function assumes that `number_to_round` is non-negative and
* `modulus` is positive. The safety is in regard to rollover.
*/
template <typename S>
constexpr S round_up_safe(S number_to_round, S modulus)
{
auto remainder = number_to_round % modulus;
if (remainder == 0) { return number_to_round; }
auto rounded_up = number_to_round - remainder + modulus;
if (rounded_up < number_to_round) {
throw std::invalid_argument("Attempt to round up beyond the type's maximum value");
}
return rounded_up;
}
/**
* @brief Rounds `number_to_round` down to the last multiple of modulus
*
* @tparam S type to return
* @param number_to_round number that is being rounded
* @param modulus value to which to round
* @return largest integer not greater than `number_to_round` and modulo `S` is zero.
*
* @note This function assumes that `number_to_round` is non-negative and
* `modulus` is positive and does not check for overflow.
*/
template <typename S>
constexpr S round_down_safe(S number_to_round, S modulus) noexcept
{
auto remainder = number_to_round % modulus;
auto rounded_down = number_to_round - remainder;
return rounded_down;
}
/**
* @brief Rounds `number_to_round` up to the next multiple of modulus
*
* @tparam S type to return
* @param number_to_round number that is being rounded
* @param modulus value to which to round
* @return smallest integer greater than `number_to_round` and modulo `S` is zero.
*
* @note This function assumes that `number_to_round` is non-negative and
* `modulus` is positive and does not check for overflow.
*/
template <typename S>
constexpr S round_up_unsafe(S number_to_round, S modulus) noexcept
{
auto remainder = number_to_round % modulus;
if (remainder == 0) { return number_to_round; }
auto rounded_up = number_to_round - remainder + modulus;
return rounded_up;
}
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number by which to divide
* @return The least integer multiple of {@link divisor} which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note sensitive to overflow, i.e. if dividend > std::numeric_limits<S>::max() - divisor,
* the result will be incorrect
*/
template <typename S, typename T>
constexpr S div_rounding_up_unsafe(S const& dividend, T const& divisor) noexcept
{
return (dividend + divisor - 1) / divisor;
}
namespace detail {
template <typename I>
constexpr I div_rounding_up_safe(std::integral_constant<bool, false>,
I dividend,
I divisor) noexcept
{
// TODO: This could probably be implemented faster
return (dividend > divisor) ? 1 + div_rounding_up_unsafe(dividend - divisor, divisor)
: (dividend > 0);
}
template <typename I>
constexpr I div_rounding_up_safe(std::integral_constant<bool, true>, I dividend, I divisor) noexcept
{
auto quotient = dividend / divisor;
auto remainder = dividend % divisor;
return quotient + (remainder != 0);
}
} // namespace detail
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number of by which to divide
* @return The least integer multiple of {@link divisor} which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note will not overflow, and may _or may not_ be slower than the intuitive
* approach of using (dividend + divisor - 1) / divisor
*/
template <typename I>
constexpr I div_rounding_up_safe(I dividend, I divisor) noexcept
{
using i_is_a_signed_type = std::integral_constant<bool, std::is_signed_v<I>>;
return detail::div_rounding_up_safe(i_is_a_signed_type{}, dividend, divisor);
}
template <typename I>
constexpr bool is_a_power_of_two(I val) noexcept
{
static_assert(std::is_integral_v<I>, "This function only applies to integral types");
return ((val - 1) & val) == 0;
}
/**
* @brief Return the absolute value of a number.
*
* This calls `std::abs()` which performs equivalent: `(value < 0) ? -value : value`.
*
* This was created to prevent compile errors calling `std::abs()` with unsigned integers.
* An example compile error appears as follows:
* @code{.pseudo}
* error: more than one instance of overloaded function "std::abs" matches the argument list:
* function "abs(int)"
* function "std::abs(long)"
* function "std::abs(long long)"
* function "std::abs(double)"
* function "std::abs(float)"
* function "std::abs(long double)"
* argument types are: (uint64_t)
* @endcode
*
* Not all cases could be if-ed out using `std::is_signed_v<T>` and satisfy the compiler.
*
* @param value Numeric value can be either integer or float type.
* @return Absolute value if value type is signed.
*/
template <typename T>
constexpr auto absolute_value(T value) -> T
{
if constexpr (cuda::std::is_signed<T>()) return numeric::detail::abs(value);
return value;
}
} // namespace util
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/pinned_host_vector.hpp
|
/*
* Copyright 2008-2023 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <limits>
#include <new> // for bad_alloc
#include <cudf/utilities/error.hpp>
#include <thrust/host_vector.h>
namespace cudf::detail {
/*! \p pinned_allocator is a CUDA-specific host memory allocator
* that employs \c cudaMallocHost for allocation.
*
* This implementation is ported from the experimental/pinned_allocator
* that Thrust used to provide.
*
* \see https://en.cppreference.com/w/cpp/memory/allocator
*/
template <typename T>
class pinned_allocator;
/*! \p pinned_allocator is a CUDA-specific host memory allocator
* that employs \c cudaMallocHost for allocation.
*
* This implementation is ported from the experimental/pinned_allocator
* that Thrust used to provide.
*
* \see https://en.cppreference.com/w/cpp/memory/allocator
*/
template <>
class pinned_allocator<void> {
public:
using value_type = void; ///< The type of the elements in the allocator
using pointer = void*; ///< The type returned by address() / allocate()
using const_pointer = void const*; ///< The type returned by address()
using size_type = std::size_t; ///< The type used for the size of the allocation
using difference_type = std::ptrdiff_t; ///< The type of the distance between two pointers
/**
* @brief converts a `pinned_allocator<void>` to `pinned_allocator<U>`
*/
template <typename U>
struct rebind {
using other = pinned_allocator<U>; ///< The rebound type
};
};
/*! \p pinned_allocator is a CUDA-specific host memory allocator
* that employs \c cudaMallocHost for allocation.
*
* This implementation is ported from the experimental/pinned_allocator
* that Thrust used to provide.
*
* \see https://en.cppreference.com/w/cpp/memory/allocator
*/
template <typename T>
class pinned_allocator {
public:
using value_type = T; ///< The type of the elements in the allocator
using pointer = T*; ///< The type returned by address() / allocate()
using const_pointer = T const*; ///< The type returned by address()
using reference = T&; ///< The parameter type for address()
using const_reference = T const&; ///< The parameter type for address()
using size_type = std::size_t; ///< The type used for the size of the allocation
using difference_type = std::ptrdiff_t; ///< The type of the distance between two pointers
/**
* @brief converts a `pinned_allocator<T>` to `pinned_allocator<U>`
*/
template <typename U>
struct rebind {
using other = pinned_allocator<U>; ///< The rebound type
};
/**
* @brief pinned_allocator's null constructor does nothing.
*/
__host__ __device__ inline pinned_allocator() {}
/**
* @brief pinned_allocator's null destructor does nothing.
*/
__host__ __device__ inline ~pinned_allocator() {}
/**
* @brief pinned_allocator's copy constructor does nothing.
*/
__host__ __device__ inline pinned_allocator(pinned_allocator const&) {}
/**
* @brief pinned_allocator's copy constructor does nothing.
*
* This version of pinned_allocator's copy constructor
* is templated on the \c value_type of the pinned_allocator
* to copy from. It is provided merely for convenience; it
* does nothing.
*/
template <typename U>
__host__ __device__ inline pinned_allocator(pinned_allocator<U> const&)
{
}
/**
* @brief This method returns the address of a \c reference of
* interest.
*
* @param r The \c reference of interest.
* @return \c r's address.
*/
__host__ __device__ inline pointer address(reference r) { return &r; }
/**
* @brief This method returns the address of a \c const_reference
* of interest.
*
* @param r The \c const_reference of interest.
* @return \c r's address.
*/
__host__ __device__ inline const_pointer address(const_reference r) { return &r; }
/**
* @brief This method allocates storage for objects in pinned host
* memory.
*
* @param cnt The number of objects to allocate.
* @return a \c pointer to the newly allocated objects.
* @note The second parameter to this function is meant as a
* hint pointer to a nearby memory location, but is
* not used by this allocator.
* @note This method does not invoke \p value_type's constructor.
* It is the responsibility of the caller to initialize the
* objects at the returned \c pointer.
*/
__host__ inline pointer allocate(size_type cnt, const_pointer /*hint*/ = 0)
{
if (cnt > this->max_size()) { throw std::bad_alloc(); } // end if
pointer result(0);
CUDF_CUDA_TRY(cudaMallocHost(reinterpret_cast<void**>(&result), cnt * sizeof(value_type)));
return result;
}
/**
* @brief This method deallocates pinned host memory previously allocated
* with this \c pinned_allocator.
*
* @param p A \c pointer to the previously allocated memory.
* @note The second parameter is the number of objects previously allocated
* but is ignored by this allocator.
* @note This method does not invoke \p value_type's destructor.
* It is the responsibility of the caller to destroy
* the objects stored at \p p.
*/
__host__ inline void deallocate(pointer p, size_type /*cnt*/)
{
auto dealloc_worked = cudaFreeHost(p);
(void)dealloc_worked;
assert(dealloc_worked == cudaSuccess);
}
/**
* @brief This method returns the maximum size of the \c cnt parameter
* accepted by the \p allocate() method.
*
* @return The maximum number of objects that may be allocated
* by a single call to \p allocate().
*/
inline size_type max_size() const { return (std::numeric_limits<size_type>::max)() / sizeof(T); }
/**
* @brief This method tests this \p pinned_allocator for equality to
* another.
*
* @param x The other \p pinned_allocator of interest.
* @return This method always returns \c true.
*/
__host__ __device__ inline bool operator==(pinned_allocator const& x) const { return true; }
/**
* @brief This method tests this \p pinned_allocator for inequality
* to another.
*
* @param x The other \p pinned_allocator of interest.
* @return This method always returns \c false.
*/
__host__ __device__ inline bool operator!=(pinned_allocator const& x) const
{
return !operator==(x);
}
};
/**
* @brief A vector class with pinned host memory allocator
*/
template <typename T>
using pinned_host_vector = thrust::host_vector<T, pinned_allocator<T>>;
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/stacktrace.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <string>
namespace cudf::detail {
/**
* @addtogroup utility_stacktrace
* @{
* @file
*/
/**
* @brief Specify whether the last stackframe is included in the stacktrace.
*/
enum class capture_last_stackframe : bool { YES, NO };
/**
* @brief Query the current stacktrace and return the whole stacktrace as one string.
*
* Depending on the value of the flag `capture_last_frame`, the caller that executes stacktrace
* retrieval can be included in the output result.
*
* @param capture_last_frame Flag to specify if the current stackframe will be included into
* the output
* @return A string storing the whole current stacktrace
*/
std::string get_stacktrace(capture_last_stackframe capture_last_frame);
/** @} */ // end of group
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/transform_unary_functions.cuh
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @brief unary functions for thrust::transform_iterator
* @file transform_unary_functions.cuh
*
* These are designed for using as AdaptableUnaryFunction
* for thrust::transform_iterator.
* For the detail of example cases,
* @see iterator.cuh iterator_test.cu
*/
#pragma once
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
namespace cudf {
/**
* @brief Transforms non-null input using `Functor`, and for null, returns `null_replacement`.
*
* This functor argument is considered null if second value of functor argument pair is false.
*
* @tparam ResultType Output type of `Functor` and null replacement type.
* @tparam Functor functor to transform first value of argument pair to ResultType.
*/
template <typename ResultType, typename Functor>
struct null_replacing_transformer {
using type = ResultType;
Functor f;
type replacement;
CUDF_HOST_DEVICE inline null_replacing_transformer(type null_replacement, Functor transformer)
: f(transformer), replacement(null_replacement)
{
}
template <typename ElementType>
CUDF_HOST_DEVICE inline type operator()(thrust::pair<ElementType, bool> const& pair_value)
{
if (pair_value.second)
return f(pair_value.first);
else
return replacement;
}
};
/**
* @brief intermediate struct to calculate mean and variance
* This is an example case to output a struct from column input.
*
* this will be used to calculate and hold `sum of values`, 'sum of squares',
* 'sum of valid count'.
* Those will be used to compute `mean` (= sum / count)
* and `variance` (= sum of squares / count - mean^2).
*
* @tparam ElementType element data type of value and value_squared.
*/
template <typename ElementType>
struct meanvar {
ElementType value; /// the value
ElementType value_squared; /// the value of squared
cudf::size_type count; /// the count
CUDF_HOST_DEVICE inline meanvar(ElementType _value = 0,
ElementType _value_squared = 0,
cudf::size_type _count = 0)
: value(_value), value_squared(_value_squared), count(_count){};
using this_t = cudf::meanvar<ElementType>;
CUDF_HOST_DEVICE inline this_t operator+(this_t const& rhs) const
{
return this_t((this->value + rhs.value),
(this->value_squared + rhs.value_squared),
(this->count + rhs.count));
};
CUDF_HOST_DEVICE inline bool operator==(this_t const& rhs) const
{
return ((this->value == rhs.value) && (this->value_squared == rhs.value_squared) &&
(this->count == rhs.count));
};
};
// --------------------------------------------------------------------------
// transformers
/**
* @brief Transforms a scalar by first casting to another type, and then squaring the result.
*
* This struct transforms the output value as
* `value * value`.
*
* This will be used to compute "sum of squares".
*
* @tparam ResultType scalar data type of output
*/
template <typename ElementType>
struct transformer_squared {
CUDF_HOST_DEVICE inline ElementType operator()(ElementType const& value)
{
return (value * value);
};
};
/**
* @brief Uses a scalar value to construct a `meanvar` object.
* This transforms `thrust::pair<ElementType, bool>` into
* `ResultType = meanvar<ElementType>` form.
*
* This struct transforms the value and the squared value and the count at once.
*
* @tparam ElementType scalar data type of input
*/
template <typename ElementType>
struct transformer_meanvar {
using ResultType = meanvar<ElementType>;
CUDF_HOST_DEVICE inline ResultType operator()(thrust::pair<ElementType, bool> const& pair)
{
ElementType v = pair.first;
return meanvar<ElementType>(v, v * v, (pair.second) ? 1 : 0);
};
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/logger.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/logger.hpp>
// Log messages that require computation should only be used at level TRACE and DEBUG
#define CUDF_LOG_TRACE(...) SPDLOG_LOGGER_TRACE(&cudf::logger(), __VA_ARGS__)
#define CUDF_LOG_DEBUG(...) SPDLOG_LOGGER_DEBUG(&cudf::logger(), __VA_ARGS__)
#define CUDF_LOG_INFO(...) SPDLOG_LOGGER_INFO(&cudf::logger(), __VA_ARGS__)
#define CUDF_LOG_WARN(...) SPDLOG_LOGGER_WARN(&cudf::logger(), __VA_ARGS__)
#define CUDF_LOG_ERROR(...) SPDLOG_LOGGER_ERROR(&cudf::logger(), __VA_ARGS__)
#define CUDF_LOG_CRITICAL(...) SPDLOG_LOGGER_CRITICAL(&cudf::logger(), __VA_ARGS__)
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/default_stream.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @brief Default stream for cudf
*
* Use this value to ensure the correct stream is used when compiled with per
* thread default stream.
*/
extern rmm::cuda_stream_view const default_stream_value;
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/int_fastdiv.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Copyright 2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
class int_fastdiv {
public:
// divisor != 0
__host__ __device__ __forceinline__ int_fastdiv(int divisor = 0) : d(divisor)
{
update_magic_numbers();
}
__host__ __device__ __forceinline__ int_fastdiv& operator=(int divisor)
{
this->d = divisor;
update_magic_numbers();
return *this;
}
__host__ __device__ __forceinline__ operator int() const { return d; }
private:
int d;
int M;
int s;
int n_add_sign;
// Hacker's Delight, Second Edition, Chapter 10, Integer Division By Constants
__host__ __device__ __forceinline__ void update_magic_numbers()
{
if (d == 1) {
M = 0;
s = -1;
n_add_sign = 1;
return;
} else if (d == -1) {
M = 0;
s = -1;
n_add_sign = -1;
return;
}
int p;
unsigned int ad, anc, delta, q1, r1, q2, r2, t;
unsigned const two31 = 0x8000'0000u;
ad = (d == 0) ? 1 : abs(d);
t = two31 + ((unsigned int)d >> 31);
anc = t - 1 - t % ad;
p = 31;
q1 = two31 / anc;
r1 = two31 - q1 * anc;
q2 = two31 / ad;
r2 = two31 - q2 * ad;
do {
++p;
q1 = 2 * q1;
r1 = 2 * r1;
if (r1 >= anc) {
++q1;
r1 -= anc;
}
q2 = 2 * q2;
r2 = 2 * r2;
if (r2 >= ad) {
++q2;
r2 -= ad;
}
delta = ad - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
this->M = q2 + 1;
if (d < 0) this->M = -this->M;
this->s = p - 32;
if ((d > 0) && (M < 0))
n_add_sign = 1;
else if ((d < 0) && (M > 0))
n_add_sign = -1;
else
n_add_sign = 0;
}
__host__ __device__ __forceinline__ friend int operator/(int const divident,
int_fastdiv const& divisor);
};
__host__ __device__ __forceinline__ int operator/(int const n, int_fastdiv const& divisor)
{
int q;
#ifdef __CUDA_ARCH__
asm("mul.hi.s32 %0, %1, %2;" : "=r"(q) : "r"(divisor.M), "r"(n));
#else
q = (((unsigned long long)((long long)divisor.M * (long long)n)) >> 32);
#endif
q += n * divisor.n_add_sign;
if (divisor.s >= 0) {
q >>= divisor.s; // we rely on this to be implemented as arithmetic shift
q += (((unsigned int)q) >> 31);
}
return q;
}
__host__ __device__ __forceinline__ int operator%(int const n, int_fastdiv const& divisor)
{
int quotient = n / divisor;
int remainder = n - quotient * divisor;
return remainder;
}
__host__ __device__ __forceinline__ int operator/(unsigned int const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}
__host__ __device__ __forceinline__ int operator%(unsigned int const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
__host__ __device__ __forceinline__ int operator/(short const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}
__host__ __device__ __forceinline__ int operator%(short const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
__host__ __device__ __forceinline__ int operator/(unsigned short const n,
int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}
__host__ __device__ __forceinline__ int operator%(unsigned short const n,
int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
__host__ __device__ __forceinline__ int operator/(char const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}
__host__ __device__ __forceinline__ int operator%(char const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
__host__ __device__ __forceinline__ int operator/(unsigned char const n, int_fastdiv const& divisor)
{
return ((int)n) / divisor;
}
__host__ __device__ __forceinline__ int operator%(unsigned char const n, int_fastdiv const& divisor)
{
return ((int)n) % divisor;
}
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/device_atomics.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @brief overloads for CUDA atomic operations
* @file device_atomics.cuh
*
* Provides the overloads for all of possible cudf's data types,
* where cudf's data types are, int8_t, int16_t, int32_t, int64_t, float, double,
* cudf::timestamp_D, cudf::timestamp_s, cudf::timestamp_ms, cudf::timestamp_us,
* cudf::timestamp_ns, cudf::duration_D, cudf::duration_s, cudf::duration_ms,
* cudf::duration_us, cudf::duration_ns and bool
* where CUDA atomic operations are, `atomicAdd`, `atomicMin`, `atomicMax`,
* `atomicCAS`.
* Also provides `cudf::genericAtomicOperation` which performs atomic operation
* with the given binary operator.
*/
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <type_traits>
namespace cudf {
namespace detail {
template <typename T_output, typename T_input>
__forceinline__ __device__ T_output type_reinterpret(T_input value)
{
static_assert(sizeof(T_output) == sizeof(T_input), "type_reinterpret for different size");
return *(reinterpret_cast<T_output*>(&value));
}
// -----------------------------------------------------------------------
// the implementation of `genericAtomicOperation`
template <typename T, typename Op, size_t N = sizeof(T)>
struct genericAtomicOperationImpl;
// single byte atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 1> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
auto* address_uint32 = reinterpret_cast<T_int*>(addr - (reinterpret_cast<size_t>(addr) & 3));
T_int shift = ((reinterpret_cast<size_t>(addr) & 3) * 8);
T_int old = *address_uint32;
T_int assumed;
do {
assumed = old;
T target_value = T((old >> shift) & 0xff);
uint8_t updating_value = type_reinterpret<uint8_t, T>(op(target_value, update_value));
T_int new_value = (old & ~(0x0000'00ff << shift)) | (T_int(updating_value) << shift);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return T((old >> shift) & 0xff);
}
};
// 2 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 2> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
bool is_32_align = (reinterpret_cast<size_t>(addr) & 2) == 0;
auto* address_uint32 =
reinterpret_cast<T_int*>(reinterpret_cast<size_t>(addr) - (is_32_align ? 0 : 2));
T_int old = *address_uint32;
T_int assumed;
do {
assumed = old;
T const target_value = (is_32_align) ? T(old & 0xffff) : T(old >> 16);
uint16_t updating_value = type_reinterpret<uint16_t, T>(op(target_value, update_value));
T_int const new_value = (is_32_align) ? (old & 0xffff'0000) | updating_value
: (old & 0xffff) | (T_int(updating_value) << 16);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return (is_32_align) ? T(old & 0xffff) : T(old >> 16);
;
}
};
// 4 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 4> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
T old_value = *addr;
T_int assumed;
T_int ret;
do {
T_int const new_value = type_reinterpret<T_int, T>(op(old_value, update_value));
assumed = type_reinterpret<T_int, T>(old_value);
ret = atomicCAS(reinterpret_cast<T_int*>(addr), assumed, new_value);
old_value = type_reinterpret<T, T_int>(ret);
} while (assumed != ret);
return old_value;
}
};
// 8 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int));
T old_value = *addr;
T_int assumed;
T_int ret;
do {
T_int const new_value = type_reinterpret<T_int, T>(op(old_value, update_value));
assumed = type_reinterpret<T_int, T>(old_value);
ret = atomicCAS(reinterpret_cast<T_int*>(addr), assumed, new_value);
old_value = type_reinterpret<T, T_int>(ret);
} while (assumed != ret);
return old_value;
}
};
// -----------------------------------------------------------------------
// specialized functions for operators
// `atomicAdd` supports int32, float, double (signed int64 is not supported.)
// `atomicMin`, `atomicMax` support int32_t, int64_t
template <>
struct genericAtomicOperationImpl<float, DeviceSum, 4> {
using T = float;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
return atomicAdd(addr, update_value);
}
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
// `atomicAdd(double)` is supported after cuda architecture 6.0
template <>
struct genericAtomicOperationImpl<double, DeviceSum, 8> {
using T = double;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
return atomicAdd(addr, update_value);
}
};
#endif
template <>
struct genericAtomicOperationImpl<int32_t, DeviceSum, 4> {
using T = int32_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
return atomicAdd(addr, update_value);
}
};
// Cuda natively supports `unsigned long long int` for `atomicAdd`,
// but doesn't supports `signed long long int`.
// However, since the signed integer is represented as Two's complement,
// the fundamental arithmetic operations of addition are identical to
// those for unsigned binary numbers.
// Then, this computes as `unsigned long long int` with `atomicAdd`
// @sa https://en.wikipedia.org/wiki/Two%27s_complement
template <>
struct genericAtomicOperationImpl<int64_t, DeviceSum, 8> {
using T = int64_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int));
T ret = atomicAdd(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return ret;
}
};
template <>
struct genericAtomicOperationImpl<int32_t, DeviceMin, 4> {
using T = int32_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMin op)
{
return atomicMin(addr, update_value);
}
};
template <>
struct genericAtomicOperationImpl<int32_t, DeviceMax, 4> {
using T = int32_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMax op)
{
return atomicMax(addr, update_value);
}
};
template <>
struct genericAtomicOperationImpl<int64_t, DeviceMin, 8> {
using T = int64_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMin op)
{
using T_int = long long int;
static_assert(sizeof(T) == sizeof(T_int));
T ret = atomicMin(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return ret;
}
};
template <>
struct genericAtomicOperationImpl<int64_t, DeviceMax, 8> {
using T = int64_t;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMax op)
{
using T_int = long long int;
static_assert(sizeof(T) == sizeof(T_int));
T ret = atomicMax(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return ret;
}
};
// -----------------------------------------------------------------------
// the implementation of `typesAtomicCASImpl`
template <typename T, size_t N = sizeof(T)>
struct typesAtomicCASImpl;
template <typename T>
struct typesAtomicCASImpl<T, 1> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
T_int shift = ((reinterpret_cast<size_t>(addr) & 3) * 8);
auto* address_uint32 = reinterpret_cast<T_int*>(addr - (reinterpret_cast<size_t>(addr) & 3));
// the 'target_value' in `old` can be different from `compare`
// because other thread may update the value
// before fetching a value from `address_uint32` in this function
T_int old = *address_uint32;
T_int assumed;
T target_value;
uint8_t u_val = type_reinterpret<uint8_t, T>(update_value);
do {
assumed = old;
target_value = T((old >> shift) & 0xff);
// have to compare `target_value` and `compare` before calling atomicCAS
// the `target_value` in `old` can be different with `compare`
if (target_value != compare) break;
T_int new_value = (old & ~(0x0000'00ff << shift)) | (T_int(u_val) << shift);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return target_value;
}
};
template <typename T>
struct typesAtomicCASImpl<T, 2> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
bool is_32_align = (reinterpret_cast<size_t>(addr) & 2) == 0;
auto* address_uint32 =
reinterpret_cast<T_int*>(reinterpret_cast<size_t>(addr) - (is_32_align ? 0 : 2));
T_int old = *address_uint32;
T_int assumed;
T target_value;
uint16_t u_val = type_reinterpret<uint16_t, T>(update_value);
do {
assumed = old;
target_value = (is_32_align) ? T(old & 0xffff) : T(old >> 16);
if (target_value != compare) break;
T_int new_value =
(is_32_align) ? (old & 0xffff'0000) | u_val : (old & 0xffff) | (T_int(u_val) << 16);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return target_value;
}
};
template <typename T>
struct typesAtomicCASImpl<T, 4> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(compare),
type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
// 8 bytes atomic operation
template <typename T>
struct typesAtomicCASImpl<T, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int));
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(compare),
type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
} // namespace detail
/**
* @brief compute atomic binary operation
* reads the `old` located at the `address` in global or shared memory,
* computes 'BinaryOp'('old', 'update_value'),
* and stores the result back to memory at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported cudf types for `genericAtomicOperation` are:
* int8_t, int16_t, int32_t, int64_t, float, double
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
* @param[in] op The binary operator used for compute
*
* @returns The old value at `address`
*/
template <typename T, typename BinaryOp>
std::enable_if_t<cudf::is_numeric<T>(), T> __forceinline__ __device__
genericAtomicOperation(T* address, T const& update_value, BinaryOp op)
{
auto fun = cudf::detail::genericAtomicOperationImpl<T, BinaryOp>{};
return T(fun(address, update_value, op));
}
// specialization for cudf::detail::timestamp types
template <typename T, typename BinaryOp>
std::enable_if_t<cudf::is_timestamp<T>(), T> __forceinline__ __device__
genericAtomicOperation(T* address, T const& update_value, BinaryOp op)
{
using R = typename T::rep;
// Unwrap the input timestamp to its underlying duration value representation.
// Use the underlying representation's type to apply operation for the cudf::detail::timestamp
auto update_value_rep = update_value.time_since_epoch().count();
auto fun = cudf::detail::genericAtomicOperationImpl<R, BinaryOp>{};
return T{T::duration(fun(reinterpret_cast<R*>(address), update_value_rep, op))};
}
// specialization for cudf::detail::duration types
template <typename T, typename BinaryOp>
std::enable_if_t<cudf::is_duration<T>(), T> __forceinline__ __device__
genericAtomicOperation(T* address, T const& update_value, BinaryOp op)
{
using R = typename T::rep;
// Unwrap the input duration to its underlying duration value representation.
// Use the underlying representation's type to apply operation for the cudf::detail::duration
auto update_value_rep = update_value.count();
auto fun = cudf::detail::genericAtomicOperationImpl<R, BinaryOp>{};
return T(fun(reinterpret_cast<R*>(address), update_value_rep, op));
}
// specialization for bool types
template <typename BinaryOp>
__forceinline__ __device__ bool genericAtomicOperation(bool* address,
bool const& update_value,
BinaryOp op)
{
using T = bool;
// don't use underlying type to apply operation for bool
auto fun = cudf::detail::genericAtomicOperationImpl<T, BinaryOp>{};
return T(fun(address, update_value, op));
}
} // namespace cudf
/**
* @brief Overloads for `atomicAdd`
* reads the `old` located at the `address` in global or shared memory,
* computes (old + val), and stores the result back to memory at the same
* address. These three operations are performed in one atomic transaction.
*
* The supported cudf types for `atomicAdd` are:
* int8_t, int16_t, int32_t, int64_t, float, double,
* cudf::timestamp_D, cudf::timestamp_s, cudf::timestamp_ms cudf::timestamp_us,
* cudf::timestamp_ns, cudf::duration_D, cudf::duration_s, cudf::duration_ms,
* cudf::duration_us, cudf::duration_ns and bool
*
* Cuda natively supports `sint32`, `uint32`, `uint64`, `float`, `double.
* (`double` is supported after Pascal).
* Other types are implemented by `atomicCAS`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be added
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicAdd(T* address, T val)
{
return cudf::genericAtomicOperation(address, val, cudf::DeviceSum{});
}
/**
* @brief Overloads for `atomicMul`
* reads the `old` located at the `address` in global or shared memory,
* computes (old * val), and stores the result back to memory at the same
* address. These three operations are performed in one atomic transaction.
*
* The supported cudf types for `atomicMul` are:
* int8_t, int16_t, int32_t, int64_t, float, double, and bool
*
* All types are implemented by `atomicCAS`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be multiplied
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicMul(T* address, T val)
{
return cudf::genericAtomicOperation(address, val, cudf::DeviceProduct{});
}
/**
* @brief Overloads for `atomicMin`
* reads the `old` located at the `address` in global or shared memory,
* computes the minimum of old and val, and stores the result back to memory
* at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported cudf types for `atomicMin` are:
* int8_t, int16_t, int32_t, int64_t, float, double,
* cudf::timestamp_D, cudf::timestamp_s, cudf::timestamp_ms, cudf::timestamp_us,
* cudf::timestamp_ns, cudf::duration_D, cudf::duration_s, cudf::duration_ms,
* cudf::duration_us, cudf::duration_ns and bool
* Cuda natively supports `sint32`, `uint32`, `sint64`, `uint64`.
* Other types are implemented by `atomicCAS`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicMin(T* address, T val)
{
return cudf::genericAtomicOperation(address, val, cudf::DeviceMin{});
}
/**
* @brief Overloads for `atomicMax`
* reads the `old` located at the `address` in global or shared memory,
* computes the maximum of old and val, and stores the result back to memory
* at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported cudf types for `atomicMax` are:
* int8_t, int16_t, int32_t, int64_t, float, double,
* cudf::timestamp_D, cudf::timestamp_s, cudf::timestamp_ms, cudf::timestamp_us,
* cudf::timestamp_ns, cudf::duration_D, cudf::duration_s, cudf::duration_ms,
* cudf::duration_us, cudf::duration_ns and bool
* Cuda natively supports `sint32`, `uint32`, `sint64`, `uint64`.
* Other types are implemented by `atomicCAS`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicMax(T* address, T val)
{
return cudf::genericAtomicOperation(address, val, cudf::DeviceMax{});
}
/**
* @brief Overloads for `atomicCAS`
* reads the `old` located at the `address` in global or shared memory,
* computes (`old` == `compare` ? `val` : `old`),
* and stores the result back to memory at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported cudf types for `atomicCAS` are:
* int8_t, int16_t, int32_t, int64_t, float, double,
* cudf::timestamp_D, cudf::timestamp_s, cudf::timestamp_ms, cudf::timestamp_us,
* cudf::timestamp_ns, cudf::duration_D, cudf::duration_s, cudf::duration_ms,
* cudf::duration_us, cudf::duration_ns and bool
* Cuda natively supports `sint32`, `uint32`, `uint64`.
* Other types are implemented by `atomicCAS`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] compare The value to be compared
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicCAS(T* address, T compare, T val)
{
return cudf::detail::typesAtomicCASImpl<T>()(address, compare, val);
}
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/vector_factories.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @brief Convenience factories for creating device vectors from host spans
* @file vector_factories.hpp
*/
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/host_vector.h>
#include <vector>
namespace cudf {
namespace detail {
/**
* @brief Asynchronously construct a `device_uvector` and set all elements to zero.
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param size The number of elements in the created vector
* @param stream The stream on which to allocate memory and perform the memset
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing zeros
*/
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_async(std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(size, stream, mr);
CUDF_CUDA_TRY(cudaMemsetAsync(ret.data(), 0, size * sizeof(T), stream.value()));
return ret;
}
/**
* @brief Synchronously construct a `device_uvector` and set all elements to zero.
*
* @note This function synchronizes `stream`.
*
* @tparam T The type of the data to copy
* @param size The number of elements in the created vector
* @param stream The stream on which to allocate memory and perform the memset
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing zeros
*/
template <typename T>
rmm::device_uvector<T> make_zeroed_device_uvector_sync(std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(size, stream, mr);
CUDF_CUDA_TRY(cudaMemsetAsync(ret.data(), 0, size * sizeof(T), stream.value()));
stream.synchronize();
return ret;
}
/**
* @brief Asynchronously construct a `device_uvector` containing a deep copy of data from a
* `host_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The host_span of data to deep copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
CUDF_CUDA_TRY(cudaMemcpyAsync(ret.data(),
source_data.data(),
source_data.size() * sizeof(T),
cudaMemcpyDefault,
stream.value()));
return ret;
}
/**
* @brief Asynchronously construct a `device_uvector` containing a deep copy of data from a host
* container
*
* @note This function does not synchronize `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input host container from which to copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, host_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_async(host_span<typename Container::value_type const>{c}, stream, mr);
}
/**
* @brief Asynchronously construct a `device_uvector` containing a deep copy of data from a
* `device_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device_span of data to deep copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_async(device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_uvector<T> ret(source_data.size(), stream, mr);
CUDF_CUDA_TRY(cudaMemcpyAsync(ret.data(),
source_data.data(),
source_data.size() * sizeof(T),
cudaMemcpyDefault,
stream.value()));
return ret;
}
/**
* @brief Asynchronously construct a `device_uvector` containing a deep copy of data from a device
* container
*
* @note This function does not synchronize `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_async(
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_async(
device_span<typename Container::value_type const>{c}, stream, mr);
}
/**
* @brief Synchronously construct a `device_uvector` containing a deep copy of data from a
* `host_span`
*
* @note This function synchronizes `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The host_span of data to deep copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_sync(host_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto ret = make_device_uvector_async(source_data, stream, mr);
stream.synchronize();
return ret;
}
/**
* @brief Synchronously construct a `device_uvector` containing a deep copy of data from a host
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input host container from which to copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, host_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_sync(host_span<typename Container::value_type const>{c}, stream, mr);
}
/**
* @brief Synchronously construct a `device_uvector` containing a deep copy of data from a
* `device_span`
*
* @note This function synchronizes `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device_span of data to deep copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <typename T>
rmm::device_uvector<T> make_device_uvector_sync(device_span<T const> source_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto ret = make_device_uvector_async(source_data, stream, mr);
stream.synchronize();
return ret;
}
/**
* @brief Synchronously construct a `device_uvector` containing a deep copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to allocate memory and perform the copy
* @param mr The memory resource to use for allocating the returned device_uvector
* @return A device_uvector containing the copied data
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
rmm::device_uvector<typename Container::value_type> make_device_uvector_sync(
Container const& c, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
return make_device_uvector_sync(device_span<typename Container::value_type const>{c}, stream, mr);
}
// Utility function template to allow copying to either a thrust::host_vector or std::vector
template <typename T, typename OutContainer>
OutContainer make_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
OutContainer result(v.size());
CUDF_CUDA_TRY(cudaMemcpyAsync(
result.data(), v.data(), v.size() * sizeof(T), cudaMemcpyDefault, stream.value()));
return result;
}
/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a
* `device_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
return make_vector_async<T, std::vector<T>>(v, stream);
}
/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
std::vector<typename Container::value_type> make_std_vector_async(Container const& c,
rmm::cuda_stream_view stream)
{
return make_std_vector_async(device_span<typename Container::value_type const>{c}, stream);
}
/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a
* `device_span`
*
* @note This function does a synchronize on `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
std::vector<T> make_std_vector_sync(device_span<T const> v, rmm::cuda_stream_view stream)
{
auto result = make_std_vector_async(v, stream);
stream.synchronize();
return result;
}
/**
* @brief Synchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
std::vector<typename Container::value_type> make_std_vector_sync(Container const& c,
rmm::cuda_stream_view stream)
{
return make_std_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}
/**
* @brief Asynchronously construct a `thrust::host_vector` containing a copy of data from a
* `device_span`
*
* @note This function does not synchronize `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_async(device_span<T const> v, rmm::cuda_stream_view stream)
{
return make_vector_async<T, thrust::host_vector<T>>(v, stream);
}
/**
* @brief Asynchronously construct a `std::vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_async(
Container const& c, rmm::cuda_stream_view stream)
{
return make_host_vector_async(device_span<typename Container::value_type const>{c}, stream);
}
/**
* @brief Synchronously construct a `thrust::host_vector` containing a copy of data from a
* `device_span`
*
* @note This function does a synchronize on `stream`.
*
* @tparam T The type of the data to copy
* @param source_data The device data to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <typename T>
thrust::host_vector<T> make_host_vector_sync(device_span<T const> v, rmm::cuda_stream_view stream)
{
auto result = make_host_vector_async(v, stream);
stream.synchronize();
return result;
}
/**
* @brief Synchronously construct a `thrust::host_vector` containing a copy of data from a device
* container
*
* @note This function synchronizes `stream`.
*
* @tparam Container The type of the container to copy from
* @tparam T The type of the data to copy
* @param c The input device container from which to copy
* @param stream The stream on which to perform the copy
* @return The data copied to the host
*/
template <
typename Container,
std::enable_if_t<
std::is_convertible_v<Container, device_span<typename Container::value_type const>>>* = nullptr>
thrust::host_vector<typename Container::value_type> make_host_vector_sync(
Container const& c, rmm::cuda_stream_view stream)
{
return make_host_vector_sync(device_span<typename Container::value_type const>{c}, stream);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/algorithm.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
namespace cudf::detail {
template <typename Iterator, typename T, typename BinaryOp>
__device__ __forceinline__ T accumulate(Iterator first, Iterator last, T init, BinaryOp op)
{
for (; first != last; ++first) {
init = op(std::move(init), *first);
}
return init;
}
/**
* @copydoc cudf::detail::copy_if_safe(rmm::exec_policy, InputIterator, InputIterator,
* OutputIterator, Predicate, rmm::cuda_stream_view)
*
* @tparam StencilIterator Type of the stencil iterator
* @param stencil The beginning of the stencil sequence
*/
template <typename InputIterator,
typename StencilIterator,
typename OutputIterator,
typename Predicate>
OutputIterator copy_if_safe(InputIterator first,
InputIterator last,
StencilIterator stencil,
OutputIterator result,
Predicate pred,
rmm::cuda_stream_view stream)
{
auto const copy_size = std::min(static_cast<std::size_t>(std::distance(first, last)),
static_cast<std::size_t>(std::numeric_limits<int>::max()));
auto itr = first;
while (itr != last) {
auto const copy_end =
static_cast<std::size_t>(std::distance(itr, last)) <= copy_size ? last : itr + copy_size;
result = thrust::copy_if(rmm::exec_policy(stream), itr, copy_end, stencil, result, pred);
stencil += std::distance(itr, copy_end);
itr = copy_end;
}
return result;
}
/**
* @brief Utility for calling `thrust::copy_if`.
*
* This is a proxy for `thrust::copy_if` which is a workaround for its bug
* (https://github.com/NVIDIA/thrust/issues/1302) where it cannot iterate over int-max values
* `distance(first,last) > int-max` This calls thrust::copy_if in 2B chunks instead.
*
* @tparam InputIterator Type of the input iterator
* @tparam OutputIterator Type of the output iterator
* @tparam Predicate Type of the binary predicate used to determine elements to copy
*
* @param first The beginning of the sequence from which to copy
* @param last The end of the sequence from which to copy
* @param result The beginning of the sequence into which to copy
* @param pred The predicate to test on every value of the range `[first, last)`
* @param stream CUDA stream used for device memory operations and kernel launches
* @return An iterator pointing to the position `result + n`, where `n` is equal to the number of
* times `pred` evaluated to `true` in the range `[first, last)`.
*/
template <typename InputIterator, typename OutputIterator, typename Predicate>
OutputIterator copy_if_safe(InputIterator first,
InputIterator last,
OutputIterator result,
Predicate pred,
rmm::cuda_stream_view stream)
{
return copy_if_safe(first, last, first, result, pred, stream);
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/assert.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
/**
* @brief `assert`-like macro for device code
*
* This is effectively the same as the standard `assert` macro, except it
* relies on the `__PRETTY_FUNCTION__` macro which is specific to GCC and Clang
* to produce better assert messages.
*/
#if !defined(NDEBUG) && defined(__CUDA_ARCH__) && (defined(__clang__) || defined(__GNUC__))
#define __ASSERT_STR_HELPER(x) #x
#define cudf_assert(e) \
((e) ? static_cast<void>(0) \
: __assert_fail(__ASSERT_STR_HELPER(e), __FILE__, __LINE__, __PRETTY_FUNCTION__))
#else
#define cudf_assert(e) (static_cast<void>(0))
#endif
/**
* @brief Macro indicating that a location in the code is unreachable.
*
* The CUDF_UNREACHABLE macro should only be used where CUDF_FAIL cannot be used
* due to performance or due to being used in device code. In the majority of
* host code situations, an exception should be thrown in "unreachable" code
* paths as those usually aren't tight inner loops like they are in device code.
*
* One example where this macro may be used is in conjunction with dispatchers
* to indicate that a function does not need to return a default value because
* it has already exhausted all possible cases in a `switch` statement.
*
* The assert in this macro can be used when compiling in debug mode to help
* debug functions that may reach the supposedly unreachable code.
*
* Example usage:
* ```
* CUDF_UNREACHABLE("Invalid type_id.");
* ```
*/
#define CUDF_UNREACHABLE(msg) \
do { \
assert(false && "Unreachable: " msg); \
__builtin_unreachable(); \
} while (0)
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/alignment.hpp
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
namespace cudf {
namespace detail {
/**
* @brief Returns the aligned address for holding array of type T in pre-allocated memory.
*
* @tparam T The data type to align upon.
*
* @param destination pointer to pre-allocated contiguous storage to store type T.
* @return Pointer of type T, aligned to alignment of type T.
*/
template <typename T>
T* align_ptr_for_type(void* destination)
{
constexpr std::size_t bytes_needed{sizeof(T)};
constexpr std::size_t alignment{alignof(T)};
// pad the allocation for aligning the first pointer
auto padded_bytes_needed = bytes_needed + (alignment - 1);
// std::align captures last argument by reference and modifies it, but we don't want it modified
return reinterpret_cast<T*>(
std::align(alignment, bytes_needed, destination, padded_bytes_needed));
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/detail
|
rapidsai_public_repos/cudf/cpp/include/cudf/detail/utilities/visitor_overload.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cudf::detail {
/**
* @brief Helper class to support inline-overloading for all of a variant's alternative types
*/
template <class... Ts>
struct visitor_overload : Ts... {
using Ts::operator()...;
};
template <class... Ts>
visitor_overload(Ts...) -> visitor_overload<Ts...>;
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/scalar/scalar_factories.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup scalar_factories
* @{
* @file
* @brief Scalar factory APIs
*/
/**
* @brief Construct scalar with uninitialized storage to hold a value of the
* specified numeric `data_type`.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a numeric type
*
* @param type The desired numeric element type
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns An uninitialized numeric scalar
*/
std::unique_ptr<scalar> make_numeric_scalar(
data_type type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct scalar with uninitialized storage to hold a value of the
* specified timestamp `data_type`.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a timestamp type
*
* @param type The desired timestamp element type
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @return An uninitialized timestamp scalar
*/
std::unique_ptr<scalar> make_timestamp_scalar(
data_type type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct scalar with uninitialized storage to hold a value of the
* specified duration `data_type`.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a duration type
*
* @param type The desired duration element type
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @return An uninitialized duration scalar
*/
std::unique_ptr<scalar> make_duration_scalar(
data_type type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct scalar with uninitialized storage to hold a value of the
* specified fixed-width `data_type`.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a fixed-width type
*
* @param type The desired fixed-width element type
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @return An uninitialized fixed-width scalar
*/
std::unique_ptr<scalar> make_fixed_width_scalar(
data_type type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct STRING type scalar given a `std::string`.
* The size of the `std::string` must not exceed the maximum size of size_type.
* The string characters are expected to be UTF-8 encoded sequence of char bytes.
*
* @throws std::bad_alloc if device memory allocation fails
*
* @param string The `std::string` to copy to device
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A string scalar with the contents of `string`
*/
std::unique_ptr<scalar> make_string_scalar(
std::string const& string,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Constructs default constructed scalar of type `type`
*
* @throws std::bad_alloc if device memory allocation fails
*
* @param type The desired element type
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A scalar of type `type`
*/
std::unique_ptr<scalar> make_default_constructed_scalar(
data_type type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates an empty (invalid) scalar of the same type as the `input` column_view.
*
* @throw cudf::logic_error if the `input` column is struct type and empty
*
* @param input Immutable view of input column to emulate
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A scalar of type of `input` column
*/
std::unique_ptr<scalar> make_empty_scalar_like(
column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct scalar using the given value of fixed width type
*
* @tparam T Datatype of the value to be represented by the scalar
* @param value The value to store in the scalar object
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A scalar of type `T`
*/
template <typename T>
std::unique_ptr<scalar> make_fixed_width_scalar(
T value,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return std::make_unique<scalar_type_t<T>>(value, true, stream, mr);
}
/**
* @brief Construct scalar using the given value of fixed_point type
*
* @tparam T Datatype of the value to be represented by the scalar
* @param value The value to store in the scalar object
* @param scale The scale of the fixed point value
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A scalar of type `T`
*/
template <typename T>
std::unique_ptr<scalar> make_fixed_point_scalar(
typename T::rep value,
numeric::scale_type scale,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return std::make_unique<scalar_type_t<T>>(value, scale, true, stream, mr);
}
/**
* @brief Construct scalar using the given column of elements
*
* @param elements Elements of the list
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A list scalar
*/
std::unique_ptr<scalar> make_list_scalar(
column_view elements,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a struct scalar using the given table_view.
*
* The columns must have 1 row.
*
* @param data The columnar data to store in the scalar object
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A struct scalar
*/
std::unique_ptr<scalar> make_struct_scalar(
table_view const& data,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a struct scalar using the given span of column views.
*
* The columns must have 1 row.
*
* @param data The columnar data to store in the scalar object
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used to allocate the scalar's `data` and `is_valid` bool.
* @returns A struct scalar
*/
std::unique_ptr<scalar> make_struct_scalar(
host_span<column_view const> data,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/scalar/scalar_device_view.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/string_view.hpp>
#include <cudf/types.hpp>
/**
* @file scalar_device_view.cuh
* @brief Scalar device view class definitions
*/
namespace cudf {
namespace detail {
/**
* @brief A non-owning view of scalar from device that is trivially copyable
* and usable in CUDA device code.
*/
class scalar_device_view_base {
public:
~scalar_device_view_base() = default;
/**
* @brief Returns the value type
*
* @returns The value type
*/
[[nodiscard]] __host__ __device__ data_type type() const noexcept { return _type; }
/**
* @brief Returns whether the scalar holds a valid value (i.e., not null).
*
* @return true The element is valid
* @return false The element is null
*/
[[nodiscard]] __device__ bool is_valid() const noexcept { return *_is_valid; }
/**
* @brief Updates the validity of the value
*
* @param is_valid true: set the value to valid. false: set it to null
*/
__device__ void set_valid(bool is_valid) noexcept { *_is_valid = is_valid; }
protected:
data_type _type{type_id::EMPTY}; ///< Value data type
bool* _is_valid{}; ///< Pointer to device memory containing
///< boolean representing validity of the value.
/**
* @brief Construct a new scalar device view base object from a device pointer
* and a validity boolean.
*
* @param type The data type of the scalar
* @param is_valid Pointer to device memory containing boolean representing
* validity of the scalar.
*/
scalar_device_view_base(data_type type, bool* is_valid) : _type(type), _is_valid(is_valid) {}
scalar_device_view_base() = default;
};
/**
* @brief A type-erased scalar_device_view where the value is a fixed width type
*/
class fixed_width_scalar_device_view_base : public detail::scalar_device_view_base {
public:
/**
* @brief Returns reference to stored value.
*
* @tparam T The desired type
* @returns Reference to stored value
*/
template <typename T>
__device__ T& value() noexcept
{
return *data<T>();
}
/**
* @brief Returns const reference to stored value.
*
* @tparam T The desired type
* @returns Const reference to stored value
*/
template <typename T>
__device__ T const& value() const noexcept
{
return *data<T>();
}
/**
* @brief Stores the value in scalar
*
* @tparam T The desired type
* @param value The value to store in scalar
*/
template <typename T>
__device__ void set_value(T value)
{
*static_cast<T*>(_data) = value;
}
/**
* @brief Returns a raw pointer to the value in device memory
*
* @tparam T The desired type
* @returns Raw pointer to the value in device memory
*/
template <typename T>
__device__ T* data() noexcept
{
return static_cast<T*>(_data);
}
/**
* @brief Returns a const raw pointer to the value in device memory
*
* @tparam T The desired type
* @returns Const raw pointer to the value in device memory
*/
template <typename T>
__device__ T const* data() const noexcept
{
return static_cast<T const*>(_data);
}
protected:
void* _data{}; ///< Pointer to device memory containing the value
/**
* @brief Construct a new fixed width scalar device view object
*
* This constructor should not be used directly. get_scalar_device_view
* should be used to get the view of an existing scalar
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
fixed_width_scalar_device_view_base(data_type type, void* data, bool* is_valid)
: detail::scalar_device_view_base(type, is_valid), _data(data)
{
}
};
/**
* @brief A type of scalar_device_view where the value is a fixed width type
*/
template <typename T>
class fixed_width_scalar_device_view : public detail::fixed_width_scalar_device_view_base {
public:
using value_type = T; ///< The value type of the scalar
/**
* @brief Returns reference to stored value.
*
* @returns Reference to stored value
*/
__device__ T& value() noexcept { return fixed_width_scalar_device_view_base::value<T>(); }
/**
* @brief Returns const reference to stored value.
*
* @returns Const reference to stored value
*/
__device__ T const& value() const noexcept
{
return fixed_width_scalar_device_view_base::value<T>();
}
/**
* @brief Stores the value in scalar
*
* @param value The value to store in scalar
*/
__device__ void set_value(T value) { fixed_width_scalar_device_view_base::set_value<T>(value); }
/**
* @brief Returns a raw pointer to the value in device memory
*
* @returns Raw pointer to the value in device memory
*/
__device__ T* data() noexcept { return fixed_width_scalar_device_view_base::data<T>(); }
/**
* @brief Returns a const raw pointer to the value in device memory
*
* @returns Const raw pointer to the value in device memory
*/
__device__ T const* data() const noexcept
{
return fixed_width_scalar_device_view_base::data<T>();
}
protected:
/**
* @brief Construct a new fixed width scalar device view object
*
* This constructor should not be used directly. get_scalar_device_view
* should be used to get the view of an existing scalar
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
fixed_width_scalar_device_view(data_type type, T* data, bool* is_valid)
: detail::fixed_width_scalar_device_view_base(type, data, is_valid)
{
}
};
} // namespace detail
/**
* @brief A type of scalar_device_view that stores a pointer to a numerical value
*/
template <typename T>
class numeric_scalar_device_view : public detail::fixed_width_scalar_device_view<T> {
public:
/**
* @brief Construct a new numeric scalar device view object from data and validity pointers.
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
numeric_scalar_device_view(data_type type, T* data, bool* is_valid)
: detail::fixed_width_scalar_device_view<T>(type, data, is_valid)
{
}
};
/**
* @brief A type of scalar_device_view that stores a pointer to a fixed_point value
*/
template <typename T>
class fixed_point_scalar_device_view : public detail::scalar_device_view_base {
public:
using rep_type = typename T::rep; ///< The representation type of the fixed_point value
/**
* @brief Construct a new fixed point scalar device view object from data and validity pointers.
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
fixed_point_scalar_device_view(data_type type, rep_type* data, bool* is_valid)
: detail::scalar_device_view_base(type, is_valid), _data(data)
{
}
/**
* @brief Stores the value in scalar
*
* @param value The value to store in scalar
*/
__device__ void set_value(rep_type value) { *_data = value; }
/**
* @brief Get the value of the scalar, as a `rep_type`.
*
* @returns The value of the scalar, as a `rep_type`
*/
__device__ rep_type const& rep() const noexcept { return *_data; }
private:
rep_type* _data{};
};
/**
* @brief A type of scalar_device_view that stores a pointer to a string value
*/
class string_scalar_device_view : public detail::scalar_device_view_base {
public:
using ValueType = cudf::string_view; ///< The value type of the string scalar
/**
* @brief Construct a new string scalar device view object from string data, size and validity
* pointers.
*
* @param type The data type of the value
* @param data The pointer to the string data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
* @param size The pointer to the size of the string in device memory
*/
string_scalar_device_view(data_type type, char const* data, bool* is_valid, size_type size)
: detail::scalar_device_view_base(type, is_valid), _data(data), _size(size)
{
}
/**
* @brief Returns string_view of the value of this scalar.
*
* @returns string_view of the value of this scalar
*/
[[nodiscard]] __device__ ValueType value() const noexcept
{
return ValueType{this->data(), _size};
}
/**
* @brief Returns a raw pointer to the value in device memory
*
* @returns Raw pointer to the value in device memory
*/
[[nodiscard]] __device__ char const* data() const noexcept
{
return static_cast<char const*>(_data);
}
/**
* @brief Returns the size of the string in bytes.
*
* @returns The size of the string in bytes
*/
[[nodiscard]] __device__ size_type size() const noexcept { return _size; }
private:
char const* _data{}; ///< Pointer to device memory containing the value
size_type _size; ///< Size of the string in bytes
};
/**
* @brief A type of scalar_device_view that stores a pointer to a timestamp value
*/
template <typename T>
class timestamp_scalar_device_view : public detail::fixed_width_scalar_device_view<T> {
public:
/**
* @brief Construct a new timestamp scalar device view object
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
timestamp_scalar_device_view(data_type type, T* data, bool* is_valid)
: detail::fixed_width_scalar_device_view<T>(type, data, is_valid)
{
}
};
/**
* @brief A type of scalar_device_view that stores a pointer to a duration value
*/
template <typename T>
class duration_scalar_device_view : public detail::fixed_width_scalar_device_view<T> {
public:
/**
* @brief Construct a new duration scalar device view object from data and validity pointers.
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
duration_scalar_device_view(data_type type, T* data, bool* is_valid)
: detail::fixed_width_scalar_device_view<T>(type, data, is_valid)
{
}
};
/**
* @brief Get the device view of a numeric_scalar
*
* @param s The numeric_scalar to get the device view of
* @return A device view of a numeric_scalar
*/
template <typename T>
auto get_scalar_device_view(numeric_scalar<T>& s)
{
return numeric_scalar_device_view<T>(s.type(), s.data(), s.validity_data());
}
/**
* @brief Get the device view of a string_scalar
*
* @param s The string_scalar to get the device view of
* @return A device view of a string_scalar
*/
inline auto get_scalar_device_view(string_scalar& s)
{
return string_scalar_device_view(s.type(), s.data(), s.validity_data(), s.size());
}
/**
* @brief Get the device view of a timestamp_scalar
*
* @param s The timestamp_scalar to get the device view of
* @return A device view of a timestamp_scalar
*/
template <typename T>
auto get_scalar_device_view(timestamp_scalar<T>& s)
{
return timestamp_scalar_device_view<T>(s.type(), s.data(), s.validity_data());
}
/**
* @brief Get the device view of a duration_scalar
*
* @param s The duration_scalar to get the device view of
* @return A device view of a duration_scalar
*/
template <typename T>
auto get_scalar_device_view(duration_scalar<T>& s)
{
return duration_scalar_device_view<T>(s.type(), s.data(), s.validity_data());
}
/**
* @brief Get the device view of a fixed_point_scalar
*
* @param s The fixed_point_scalar to get the device view of
* @return The device view of the fixed_point_scalar
*/
template <typename T>
auto get_scalar_device_view(fixed_point_scalar<T>& s)
{
return fixed_point_scalar_device_view<T>(s.type(), s.data(), s.validity_data());
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/scalar/scalar.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
/**
* @file
* @brief Class definitions for cudf::scalar
*/
namespace cudf {
/**
* @addtogroup scalar_classes
* @{
*/
/**
* @brief An owning class to represent a singular value.
*
* A scalar is a singular value of any of the supported datatypes in cudf.
* Classes derived from this class are used to represent a scalar. Objects of
* derived classes should be upcasted to this class while passing to an
* external libcudf API.
*/
class scalar {
public:
virtual ~scalar() = default;
scalar& operator=(scalar const& other) = delete;
scalar& operator=(scalar&& other) = delete;
/**
* @brief Returns the scalar's logical value type.
*
* @return The scalar's logical value type
*/
[[nodiscard]] data_type type() const noexcept;
/**
* @brief Updates the validity of the value.
*
* @param is_valid true: set the value to valid. false: set it to null.
* @param stream CUDA stream used for device memory operations.
*/
void set_valid_async(bool is_valid, rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Indicates whether the scalar contains a valid value.
*
* @note Using the value when `is_valid() == false` is undefined behavior. In addition, this
* function does a stream synchronization.
*
* @param stream CUDA stream used for device memory operations.
* @return true Value is valid
* @return false Value is invalid/null
*/
[[nodiscard]] bool is_valid(rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Returns a raw pointer to the validity bool in device memory.
*
* @return Raw pointer to the validity bool in device memory
*/
bool* validity_data();
/**
* @brief Return a const raw pointer to the validity bool in device memory.
*
* @return Raw pointer to the validity bool in device memory
*/
[[nodiscard]] bool const* validity_data() const;
protected:
data_type _type{type_id::EMPTY}; ///< Logical type of value in the scalar
rmm::device_scalar<bool> _is_valid; ///< Device bool signifying validity
scalar() = delete;
/**
* @brief Move constructor for scalar.
* @param other The other scalar to move from.
*/
scalar(scalar&& other) = default;
/**
* @brief Construct a new scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
scalar(scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new scalar object.
*
* @note Do not use this constructor directly. Instead, use a factory method
* like make_numeric_scalar or make_string_scalar
*
* @param type Data type of the scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
scalar(data_type type,
bool is_valid = false,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
};
namespace detail {
/**
* @brief An owning class to represent a fixed-width type value in device memory.
*
* @tparam T the data type of the fixed-width type value.
*/
template <typename T>
class fixed_width_scalar : public scalar {
static_assert(is_fixed_width<T>(), "Unexpected non-fixed-width type.");
public:
using value_type = T; ///< Type of the value held by the scalar.
~fixed_width_scalar() override = default;
/**
* @brief Move constructor for fixed_width_scalar.
* @param other The other fixed_width_scalar to move from.
*/
fixed_width_scalar(fixed_width_scalar&& other) = default;
fixed_width_scalar& operator=(fixed_width_scalar const& other) = delete;
fixed_width_scalar& operator=(fixed_width_scalar&& other) = delete;
/**
* @brief Construct a new fixed-width scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_width_scalar(fixed_width_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Set the value of the scalar.
*
* @param value New value of scalar.
* @param stream CUDA stream used for device memory operations.
*/
void set_value(T value, rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Explicit conversion operator to get the value of the scalar on the host.
*/
explicit operator value_type() const;
/**
* @brief Get the value of the scalar.
*
* @param stream CUDA stream used for device memory operations.
* @return Value of the scalar
*/
T value(rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Returns a raw pointer to the value in device memory.
* @return A raw pointer to the value in device memory
*/
T* data();
/**
* @brief Returns a const raw pointer to the value in device memory.
* @return A const raw pointer to the value in device memory
*/
T const* data() const;
protected:
rmm::device_scalar<T> _data; ///< device memory containing the value
fixed_width_scalar() = delete;
/**
* @brief Construct a new fixed width scalar object.
*
* @param value The initial value of the scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_width_scalar(T value,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new fixed width scalar object from existing device memory.
*
* @param data The scalar's data in device memory.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_width_scalar(rmm::device_scalar<T>&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
};
} // namespace detail
/**
* @brief An owning class to represent a numerical value in device memory.
*
* @tparam T the data type of the numerical value.
*/
template <typename T>
class numeric_scalar : public detail::fixed_width_scalar<T> {
static_assert(is_numeric<T>(), "Unexpected non-numeric type.");
public:
numeric_scalar() = delete;
~numeric_scalar() = default;
/**
* @brief Move constructor for numeric_scalar.
* @param other The other numeric_scalar to move from.
*/
numeric_scalar(numeric_scalar&& other) = default;
numeric_scalar& operator=(numeric_scalar const& other) = delete;
numeric_scalar& operator=(numeric_scalar&& other) = delete;
/**
* @brief Construct a new numeric scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
numeric_scalar(numeric_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new numeric scalar object.
*
* @param value The initial value of the scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
numeric_scalar(T value,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new numeric scalar object from existing device memory.
*
* @param data The scalar's data in device memory.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
numeric_scalar(rmm::device_scalar<T>&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
};
/**
* @brief An owning class to represent a fixed_point number in device memory.
*
* @tparam T the data type of the fixed_point number.
*/
template <typename T>
class fixed_point_scalar : public scalar {
static_assert(is_fixed_point<T>(), "Unexpected non-fixed_point type.");
public:
using rep_type = typename T::rep; ///< The representation type of the fixed_point number.
using value_type = T; ///< The value type of the fixed_point number.
fixed_point_scalar() = delete;
~fixed_point_scalar() override = default;
/**
* @brief Move constructor for fixed_point_scalar.
* @param other The other fixed_point_scalar to move from.
*/
fixed_point_scalar(fixed_point_scalar&& other) = default;
fixed_point_scalar& operator=(fixed_point_scalar const& other) = delete;
fixed_point_scalar& operator=(fixed_point_scalar&& other) = delete;
/**
* @brief Construct a new fixed_point scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_point_scalar(fixed_point_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new fixed_point scalar object from already shifted value and scale.
*
* @param value The initial shifted value of the fixed_point scalar.
* @param scale The scale of the fixed_point scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_point_scalar(rep_type value,
numeric::scale_type scale,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new fixed_point scalar object from a value and default 0-scale.
*
* @param value The initial value of the fixed_point scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_point_scalar(rep_type value,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new fixed_point scalar object from a fixed_point number.
*
* @param value The fixed_point number from which the fixed_point scalar will be initialized.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_point_scalar(T value,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new fixed_point scalar object from existing device memory.
*
* @param data The scalar's data in device memory.
* @param scale The scale of the fixed_point scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
fixed_point_scalar(rmm::device_scalar<rep_type>&& data,
numeric::scale_type scale,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Get the value of the scalar.
*
* @param stream CUDA stream used for device memory operations.
* @return The value of the scalar
*/
rep_type value(rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Get the decimal32, decimal64 or decimal128.
*
* @param stream CUDA stream used for device memory operations.
* @return The decimal32, decimal64 or decimal128 value
*/
T fixed_point_value(rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Explicit conversion operator to get the value of the scalar on the host.
*/
explicit operator value_type() const;
/**
* @brief Returns a raw pointer to the value in device memory.
* @return A raw pointer to the value in device memory
*/
rep_type* data();
/**
* @brief Returns a const raw pointer to the value in device memory.
* @return a const raw pointer to the value in device memory
*/
rep_type const* data() const;
protected:
rmm::device_scalar<rep_type> _data; ///< device memory containing the value
};
/**
* @brief An owning class to represent a string in device memory.
*/
class string_scalar : public scalar {
public:
using value_type = cudf::string_view; ///< The value type of the string scalar.
string_scalar() = delete;
~string_scalar() override = default;
/**
* @brief Move constructor for string_scalar.
* @param other The other string_scalar to move from.
*/
string_scalar(string_scalar&& other) = default;
// string_scalar(string_scalar const& other) = delete;
string_scalar& operator=(string_scalar const& other) = delete;
string_scalar& operator=(string_scalar&& other) = delete;
/**
* @brief Construct a new string scalar object by deep copying another string_scalar.
*
* @param other The other string_scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
string_scalar(string_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new string scalar object.
*
* @throws std::overflow_error If the size of the input string exceeds cudf::size_type
*
* @param string The value of the string.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
string_scalar(std::string const& string,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new string scalar object from string_view.
*
* Note that this function copies the data pointed by string_view.
*
* @param source The string_view pointing the string value to copy.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
string_scalar(value_type const& source,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new string scalar object from string_view in device memory.
*
* Note that this function copies the data pointed by string_view.
*
* @param data The device_scalar of string_view pointing to the string value to copy.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
string_scalar(rmm::device_scalar<value_type>& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new string scalar object by moving an existing string data buffer.
*
* Note that this constructor moves the existing buffer into the internal data buffer;
* no copy is performed.
*
* @param data The existing buffer to take over.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
string_scalar(rmm::device_buffer&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Explicit conversion operator to get the value of the scalar in a host std::string.
*/
explicit operator std::string() const;
/**
* @brief Get the value of the scalar in a host std::string.
*
* @param stream CUDA stream used for device memory operations.
* @return The value of the scalar in a host std::string
*/
[[nodiscard]] std::string to_string(
rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Get the value of the scalar as a string_view.
*
* @param stream CUDA stream used for device memory operations.
* @return The value of the scalar as a string_view
*/
[[nodiscard]] value_type value(rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* @brief Returns the size of the string in bytes.
* @return The size of the string in bytes
*/
[[nodiscard]] size_type size() const;
/**
* @brief Returns a raw pointer to the string in device memory.
* @return a raw pointer to the string in device memory
*/
[[nodiscard]] char const* data() const;
protected:
rmm::device_buffer _data{}; ///< device memory containing the string
};
/**
* @brief An owning class to represent a timestamp/duration value in device memory.
*
* @tparam T the data type of the timestamp/duration value.
* @see cudf/wrappers/timestamps.hpp, cudf/wrappers/durations.hpp for a list of allowed types.
*/
template <typename T>
class chrono_scalar : public detail::fixed_width_scalar<T> {
static_assert(is_chrono<T>(), "Unexpected non-chrono type");
public:
chrono_scalar() = delete;
~chrono_scalar() = default;
/**
* @brief Move constructor for chrono_scalar.
* @param other The other chrono_scalar to move from.
*/
chrono_scalar(chrono_scalar&& other) = default;
chrono_scalar& operator=(chrono_scalar const& other) = delete;
chrono_scalar& operator=(chrono_scalar&& other) = delete;
/**
* @brief Construct a new chrono scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
chrono_scalar(chrono_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new chrono scalar object.
*
* @param value The initial value of the scalar.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
chrono_scalar(T value,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new chrono scalar object from existing device memory.
*
* @param data The scalar's data in device memory.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
chrono_scalar(rmm::device_scalar<T>&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
};
/**
* @brief An owning class to represent a timestamp value in device memory.
*
* @tparam T the data type of the timestamp value.
* @see cudf/wrappers/timestamps.hpp for a list of allowed types.
*/
template <typename T>
class timestamp_scalar : public chrono_scalar<T> {
public:
static_assert(is_timestamp<T>(), "Unexpected non-timestamp type");
using chrono_scalar<T>::chrono_scalar;
using rep_type = typename T::rep; ///< The underlying representation type of the timestamp.
timestamp_scalar() = delete;
/**
* @brief Move constructor for timestamp_scalar.
* @param other The other timestamp_scalar to move from.
*/
timestamp_scalar(timestamp_scalar&& other) = default;
/**
* @brief Construct a new timestamp scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
timestamp_scalar(timestamp_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new timestamp scalar object from a duration that is
* convertible to T::duration
*
* @param value Duration representing number of ticks since the UNIX epoch or another duration
* that is convertible to timestamps duration.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
template <typename Duration2>
timestamp_scalar(Duration2 const& value,
bool is_valid,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the duration in number of ticks since the UNIX epoch.
* @return The duration in number of ticks since the UNIX epoch
*/
rep_type ticks_since_epoch();
};
/**
* @brief An owning class to represent a duration value in device memory.
*
* @tparam T the data type of the duration value.
* @see cudf/wrappers/durations.hpp for a list of allowed types.
*/
template <typename T>
class duration_scalar : public chrono_scalar<T> {
public:
static_assert(is_duration<T>(), "Unexpected non-duration type");
using chrono_scalar<T>::chrono_scalar;
using rep_type = typename T::rep; ///< The duration's underlying representation type.
duration_scalar() = delete;
/**
* @brief Move constructor for duration_scalar.
* @param other The other duration_scalar to move from.
*/
duration_scalar(duration_scalar&& other) = default;
/**
* @brief Construct a new duration scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
duration_scalar(duration_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new duration scalar object from tick counts.
*
* @param value Integer representing number of ticks since the UNIX epoch.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
duration_scalar(rep_type value,
bool is_valid,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the duration in number of ticks.
* @return The duration in number of ticks
*/
rep_type count();
};
/**
* @brief An owning class to represent a list value in device memory.
*/
class list_scalar : public scalar {
public:
list_scalar() = delete;
~list_scalar() override = default;
/**
* @brief Move constructor for list_scalar.
* @param other The other list_scalar to move from.
*/
list_scalar(list_scalar&& other) = default;
list_scalar& operator=(list_scalar const& other) = delete;
list_scalar& operator=(list_scalar&& other) = delete;
/**
* @brief Construct a new list scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
list_scalar(list_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new list scalar object from column_view.
*
* The input column_view is copied.
*
* @param data The column data to copy.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
list_scalar(cudf::column_view const& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new list scalar object from existing column.
*
* @param data The column to take ownership of.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
list_scalar(cudf::column&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a non-owning, immutable view to underlying device data.
* @return A non-owning, immutable view to underlying device data
*/
[[nodiscard]] column_view view() const;
private:
cudf::column _data;
};
/**
* @brief An owning class to represent a struct value in device memory.
*/
class struct_scalar : public scalar {
public:
struct_scalar() = delete;
~struct_scalar() override = default;
/**
* @brief Move constructor for struct_scalar.
* @param other The other struct_scalar to move from.
*/
struct_scalar(struct_scalar&& other) = default;
struct_scalar& operator=(struct_scalar const& other) = delete;
struct_scalar& operator=(struct_scalar&& other) = delete;
/**
* @brief Construct a new struct scalar object by deep copying another.
*
* @param other The scalar to copy.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
struct_scalar(struct_scalar const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new struct scalar object from table_view.
*
* The input table_view is deep-copied.
*
* @param data The table data to copy.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
struct_scalar(table_view const& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new struct scalar object from a host_span of column_views.
*
* The input column_views are deep-copied.
*
* @param data The column_views to copy.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
struct_scalar(host_span<column_view const> data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a new struct scalar object from an existing table in device memory.
*
* Note that this constructor moves the existing table data into the internal table data;
* no copies are performed.
*
* @param data The existing table data to take over.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
*/
struct_scalar(table&& data,
bool is_valid = true,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a non-owning, immutable view to underlying device data.
* @return A non-owning, immutable view to underlying device data
*/
[[nodiscard]] table_view view() const;
private:
table _data;
/**
* @brief Check if all the input columns constructing this struct scalar have valid size.
*/
void assert_valid_size();
/**
* @brief Initialize the internal table data for struct scalar.
*
* @param data The existing table data to take over.
* @param is_valid Whether the value held by the scalar is valid.
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for device memory allocation.
* @return The table after initialization
*/
static table init_data(table&& data,
bool is_valid,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
};
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/fixed_point/fixed_point.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/fixed_point/temporary.hpp>
#include <cudf/types.hpp>
#include <cuda/std/limits>
#include <cuda/std/type_traits>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <string>
/// `fixed_point` and supporting types
namespace numeric {
/// The scale type for fixed_point
enum scale_type : int32_t {};
/**
* @brief Scoped enumerator to use when constructing `fixed_point`
*
* Examples:
* ```cpp
* using decimal32 = fixed_point<int32_t, Radix::BASE_10>;
* using binary64 = fixed_point<int64_t, Radix::BASE_2>;
* ```
*/
enum class Radix : int32_t { BASE_2 = 2, BASE_10 = 10 };
/**
* @brief Returns `true` if the representation type is supported by `fixed_point`
*
* @tparam T The representation type
* @return `true` if the type is supported by `fixed_point` implementation
*/
template <typename T>
constexpr inline auto is_supported_representation_type()
{
return cuda::std::is_same_v<T, int32_t> || //
cuda::std::is_same_v<T, int64_t> || //
cuda::std::is_same_v<T, __int128_t>;
}
/**
* @brief Returns `true` if the value type is supported for constructing a `fixed_point`
*
* @tparam T The construction value type
* @return `true` if the value type is supported to construct a `fixed_point` type
*/
template <typename T>
constexpr inline auto is_supported_construction_value_type()
{
return cuda::std::is_integral<T>() || cuda::std::is_floating_point_v<T>;
}
// Helper functions for `fixed_point` type
namespace detail {
/**
* @brief A function for integer exponentiation by squaring
*
* https://simple.wikipedia.org/wiki/Exponentiation_by_squaring <br>
* Note: this is the iterative equivalent of the recursive definition (faster) <br>
* Quick-bench: http://quick-bench.com/Wg7o7HYQC9FW5M0CO0wQAjSwP_Y
*
* @tparam Rep Representation type for return type
* @tparam Base The base to be exponentiated
* @param exponent The exponent to be used for exponentiation
* @return Result of `Base` to the power of `exponent` of type `Rep`
*/
template <typename Rep,
Radix Base,
typename T,
typename cuda::std::enable_if_t<(cuda::std::is_same_v<int32_t, T> &&
is_supported_representation_type<Rep>())>* = nullptr>
CUDF_HOST_DEVICE inline Rep ipow(T exponent)
{
cudf_assert(exponent >= 0 && "integer exponentiation with negative exponent is not possible.");
if (exponent == 0) { return static_cast<Rep>(1); }
auto extra = static_cast<Rep>(1);
auto square = static_cast<Rep>(Base);
while (exponent > 1) {
if (exponent & 1 /* odd */) {
extra *= square;
exponent -= 1;
}
exponent /= 2;
square *= square;
}
return square * extra;
}
/** @brief Function that performs a `right shift` scale "times" on the `val`
*
* Note: perform this operation when constructing with positive scale
*
* @tparam Rep Representation type needed for integer exponentiation
* @tparam Rad The radix which will act as the base in the exponentiation
* @tparam T Type for value `val` being shifted and the return type
* @param val The value being shifted
* @param scale The amount to shift the value by
* @return Shifted value of type T
*/
template <typename Rep, Radix Rad, typename T>
CUDF_HOST_DEVICE inline constexpr T right_shift(T const& val, scale_type const& scale)
{
return val / ipow<Rep, Rad>(static_cast<int32_t>(scale));
}
/** @brief Function that performs a `left shift` scale "times" on the `val`
*
* Note: perform this operation when constructing with negative scale
*
* @tparam Rep Representation type needed for integer exponentiation
* @tparam Rad The radix which will act as the base in the exponentiation
* @tparam T Type for value `val` being shifted and the return type
* @param val The value being shifted
* @param scale The amount to shift the value by
* @return Shifted value of type T
*/
template <typename Rep, Radix Rad, typename T>
CUDF_HOST_DEVICE inline constexpr T left_shift(T const& val, scale_type const& scale)
{
return val * ipow<Rep, Rad>(static_cast<int32_t>(-scale));
}
/** @brief Function that performs a `right` or `left shift`
* scale "times" on the `val`
*
* Note: Function will call the correct right or left shift based
* on the sign of `val`
*
* @tparam Rep Representation type needed for integer exponentiation
* @tparam Rad The radix which will act as the base in the exponentiation
* @tparam T Type for value `val` being shifted and the return type
* @param val The value being shifted
* @param scale The amount to shift the value by
* @return Shifted value of type T
*/
template <typename Rep, Radix Rad, typename T>
CUDF_HOST_DEVICE inline constexpr T shift(T const& val, scale_type const& scale)
{
if (scale == 0) { return val; }
if (scale > 0) { return right_shift<Rep, Rad>(val, scale); }
return left_shift<Rep, Rad>(val, scale);
}
} // namespace detail
/**
* @addtogroup fixed_point_classes
* @{
* @file
* @brief Class definition for fixed point data type
*/
/**
* @brief Helper struct for constructing `fixed_point` when value is already shifted
*
* Example:
* ```cpp
* using decimal32 = fixed_point<int32_t, Radix::BASE_10>;
* auto n = decimal32{scaled_integer{1001, 3}}; // n = 1.001
* ```
*
* @tparam Rep The representation type (either `int32_t` or `int64_t`)
*/
template <typename Rep,
typename cuda::std::enable_if_t<is_supported_representation_type<Rep>()>* = nullptr>
struct scaled_integer {
Rep value; ///< The value of the fixed point number
scale_type scale; ///< The scale of the value
/**
* @brief Constructor for `scaled_integer`
*
* @param v The value of the fixed point number
* @param s The scale of the value
*/
CUDF_HOST_DEVICE inline explicit scaled_integer(Rep v, scale_type s) : value{v}, scale{s} {}
};
/**
* @brief A type for representing a number with a fixed amount of precision
*
* Currently, only binary and decimal `fixed_point` numbers are supported.
* Binary operations can only be performed with other `fixed_point` numbers
*
* @tparam Rep The representation type (either `int32_t` or `int64_t`)
* @tparam Rad The radix/base (either `Radix::BASE_2` or `Radix::BASE_10`)
*/
template <typename Rep, Radix Rad>
class fixed_point {
Rep _value{};
scale_type _scale;
public:
using rep = Rep; ///< The representation type
/**
* @brief Constructor that will perform shifting to store value appropriately (from floating point
* types)
*
* @tparam T The floating point type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_floating_point<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
: _value{static_cast<Rep>(detail::shift<Rep, Rad>(value, scale))}, _scale{scale}
{
}
/**
* @brief Constructor that will perform shifting to store value appropriately (from integral
* types)
*
* @tparam T The integral type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_integral<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
// `value` is cast to `Rep` to avoid overflow in cases where
// constructing to `Rep` that is wider than `T`
: _value{detail::shift<Rep, Rad>(static_cast<Rep>(value), scale)}, _scale{scale}
{
}
/**
* @brief Constructor that will not perform shifting (assumes value already shifted)
*
* @param s scaled_integer that contains scale and already shifted value
*/
CUDF_HOST_DEVICE inline explicit fixed_point(scaled_integer<Rep> s)
: _value{s.value}, _scale{s.scale}
{
}
/**
* @brief "Scale-less" constructor that constructs `fixed_point` number with a specified
* value and scale of zero
*
* @tparam T The value type being constructing from
* @param value The value that will be constructed from
*/
template <typename T,
typename cuda::std::enable_if_t<is_supported_construction_value_type<T>()>* = nullptr>
CUDF_HOST_DEVICE inline fixed_point(T const& value)
: _value{static_cast<Rep>(value)}, _scale{scale_type{0}}
{
}
/**
* @brief Default constructor that constructs `fixed_point` number with a
* value and scale of zero
*/
CUDF_HOST_DEVICE inline fixed_point() : _scale{scale_type{0}} {}
/**
* @brief Explicit conversion operator for casting to floating point types
*
* @tparam U The floating point type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<U>>* = nullptr>
explicit constexpr operator U() const
{
return detail::shift<Rep, Rad>(static_cast<U>(_value), scale_type{-_scale});
}
/**
* @brief Explicit conversion operator for casting to integral types
*
* @tparam U The integral type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U, typename cuda::std::enable_if_t<cuda::std::is_integral_v<U>>* = nullptr>
explicit constexpr operator U() const
{
// Cast to the larger of the two types (of U and Rep) before converting to Rep because in
// certain cases casting to U before shifting will result in integer overflow (i.e. if U =
// int32_t, Rep = int64_t and _value > 2 billion)
auto const value = std::common_type_t<U, Rep>(_value);
return static_cast<U>(detail::shift<Rep, Rad>(value, scale_type{-_scale}));
}
/**
* @brief Converts the `fixed_point` number to a `scaled_integer`
*
* @return The `scaled_integer` representation of the `fixed_point` number
*/
CUDF_HOST_DEVICE inline operator scaled_integer<Rep>() const
{
return scaled_integer<Rep>{_value, _scale};
}
/**
* @brief Method that returns the underlying value of the `fixed_point` number
*
* @return The underlying value of the `fixed_point` number
*/
CUDF_HOST_DEVICE inline rep value() const { return _value; }
/**
* @brief Method that returns the scale of the `fixed_point` number
*
* @return The scale of the `fixed_point` number
*/
CUDF_HOST_DEVICE inline scale_type scale() const { return _scale; }
/**
* @brief Explicit conversion operator to `bool`
*
* @return The `fixed_point` value as a boolean (zero is `false`, nonzero is `true`)
*/
CUDF_HOST_DEVICE inline explicit constexpr operator bool() const
{
return static_cast<bool>(_value);
}
/**
* @brief operator +=
*
* @tparam Rep1 Representation type of the operand `rhs`
* @tparam Rad1 Radix (base) type of the operand `rhs`
* @param rhs The number being added to `this`
* @return The sum
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1>& operator+=(fixed_point<Rep1, Rad1> const& rhs)
{
*this = *this + rhs;
return *this;
}
/**
* @brief operator *=
*
* @tparam Rep1 Representation type of the operand `rhs`
* @tparam Rad1 Radix (base) type of the operand `rhs`
* @param rhs The number being multiplied to `this`
* @return The product
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1>& operator*=(fixed_point<Rep1, Rad1> const& rhs)
{
*this = *this * rhs;
return *this;
}
/**
* @brief operator -=
*
* @tparam Rep1 Representation type of the operand `rhs`
* @tparam Rad1 Radix (base) type of the operand `rhs`
* @param rhs The number being subtracted from `this`
* @return The difference
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1>& operator-=(fixed_point<Rep1, Rad1> const& rhs)
{
*this = *this - rhs;
return *this;
}
/**
* @brief operator /=
*
* @tparam Rep1 Representation type of the operand `rhs`
* @tparam Rad1 Radix (base) type of the operand `rhs`
* @param rhs The number being divided from `this`
* @return The quotient
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1>& operator/=(fixed_point<Rep1, Rad1> const& rhs)
{
*this = *this / rhs;
return *this;
}
/**
* @brief operator ++ (post-increment)
*
* @return The incremented result
*/
CUDF_HOST_DEVICE inline fixed_point<Rep, Rad>& operator++()
{
*this = *this + fixed_point<Rep, Rad>{1, scale_type{_scale}};
return *this;
}
/**
* @brief operator + (for adding two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are added.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are added.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return The resulting `fixed_point` sum
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend fixed_point<Rep1, Rad1> operator+(
fixed_point<Rep1, Rad1> const& lhs, fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator - (for subtracting two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are subtracted.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are subtracted.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return The resulting `fixed_point` difference
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend fixed_point<Rep1, Rad1> operator-(
fixed_point<Rep1, Rad1> const& lhs, fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator * (for multiplying two `fixed_point` numbers)
*
* `_scale`s are added and `_value`s are multiplied.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return The resulting `fixed_point` product
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend fixed_point<Rep1, Rad1> operator*(
fixed_point<Rep1, Rad1> const& lhs, fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator / (for dividing two `fixed_point` numbers)
*
* `_scale`s are subtracted and `_value`s are divided.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return The resulting `fixed_point` quotient
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend fixed_point<Rep1, Rad1> operator/(
fixed_point<Rep1, Rad1> const& lhs, fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator % (for computing the modulo operation of two `fixed_point` numbers)
*
* If `_scale`s are equal, the modulus is computed directly.
* If `_scale`s are not equal, the number with larger `_scale` is shifted to the
* smaller `_scale`, and then the modulus is computed.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return The resulting `fixed_point` number
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend fixed_point<Rep1, Rad1> operator%(
fixed_point<Rep1, Rad1> const& lhs, fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator == (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` and `rhs` are equal, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator==(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator != (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` and `rhs` are not equal, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator!=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator <= (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` less than or equal to `rhs`, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator<=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator >= (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` greater than or equal to `rhs`, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator>=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator < (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` less than `rhs`, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator<(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief operator > (for comparing two `fixed_point` numbers)
*
* If `_scale`s are equal, `_value`s are compared.
* If `_scale`s are not equal, the number with the larger `_scale` is shifted to the
* smaller `_scale`, and then the `_value`s are compared.
*
* @tparam Rep1 Representation type of the operand `lhs` and `rhs`
* @tparam Rad1 Radix (base) type of the operand `lhs` and `rhs`
* @param lhs The left hand side operand
* @param rhs The right hand side operand
* @return true if `lhs` greater than `rhs`, false if not
*/
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline friend bool operator>(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs);
/**
* @brief Method for creating a `fixed_point` number with a new `scale`
*
* The `fixed_point` number returned will have the same value, underlying representation and
* radix as `this`, the only thing changed is the scale.
*
* @param scale The `scale` of the returned `fixed_point` number
* @return `fixed_point` number with a new `scale`
*/
CUDF_HOST_DEVICE inline fixed_point<Rep, Rad> rescaled(scale_type scale) const
{
if (scale == _scale) { return *this; }
Rep const value = detail::shift<Rep, Rad>(_value, scale_type{scale - _scale});
return fixed_point<Rep, Rad>{scaled_integer<Rep>{value, scale}};
}
/**
* @brief Returns a string representation of the fixed_point value.
*/
explicit operator std::string() const
{
if (_scale < 0) {
auto const av = detail::abs(_value);
Rep const n = detail::exp10<Rep>(-_scale);
Rep const f = av % n;
auto const num_zeros =
std::max(0, (-_scale - static_cast<int32_t>(detail::to_string(f).size())));
auto const zeros = std::string(num_zeros, '0');
auto const sign = _value < 0 ? std::string("-") : std::string();
return sign + detail::to_string(av / n) + std::string(".") + zeros +
detail::to_string(av % n);
}
auto const zeros = std::string(_scale, '0');
return detail::to_string(_value) + zeros;
}
};
/**
* @brief Function for identifying integer overflow when adding
*
* @tparam Rep Type of integer to check for overflow on
* @tparam T Types of lhs and rhs (ensures they are the same type)
* @param lhs Left hand side of addition
* @param rhs Right hand side of addition
* @return true if addition causes overflow, false otherwise
*/
template <typename Rep, typename T>
CUDF_HOST_DEVICE inline auto addition_overflow(T lhs, T rhs)
{
return rhs > 0 ? lhs > cuda::std::numeric_limits<Rep>::max() - rhs
: lhs < cuda::std::numeric_limits<Rep>::min() - rhs;
}
/** @brief Function for identifying integer overflow when subtracting
*
* @tparam Rep Type of integer to check for overflow on
* @tparam T Types of lhs and rhs (ensures they are the same type)
* @param lhs Left hand side of subtraction
* @param rhs Right hand side of subtraction
* @return true if subtraction causes overflow, false otherwise
*/
template <typename Rep, typename T>
CUDF_HOST_DEVICE inline auto subtraction_overflow(T lhs, T rhs)
{
return rhs > 0 ? lhs < cuda::std::numeric_limits<Rep>::min() + rhs
: lhs > cuda::std::numeric_limits<Rep>::max() + rhs;
}
/** @brief Function for identifying integer overflow when dividing
*
* @tparam Rep Type of integer to check for overflow on
* @tparam T Types of lhs and rhs (ensures they are the same type)
* @param lhs Left hand side of division
* @param rhs Right hand side of division
* @return true if division causes overflow, false otherwise
*/
template <typename Rep, typename T>
CUDF_HOST_DEVICE inline auto division_overflow(T lhs, T rhs)
{
return lhs == cuda::std::numeric_limits<Rep>::min() && rhs == -1;
}
/** @brief Function for identifying integer overflow when multiplying
*
* @tparam Rep Type of integer to check for overflow on
* @tparam T Types of lhs and rhs (ensures they are the same type)
* @param lhs Left hand side of multiplication
* @param rhs Right hand side of multiplication
* @return true if multiplication causes overflow, false otherwise
*/
template <typename Rep, typename T>
CUDF_HOST_DEVICE inline auto multiplication_overflow(T lhs, T rhs)
{
auto const min = cuda::std::numeric_limits<Rep>::min();
auto const max = cuda::std::numeric_limits<Rep>::max();
if (rhs > 0) { return lhs > max / rhs || lhs < min / rhs; }
if (rhs < -1) { return lhs > min / rhs || lhs < max / rhs; }
return rhs == -1 && lhs == min;
}
// PLUS Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1> operator+(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
auto const sum = lhs.rescaled(scale)._value + rhs.rescaled(scale)._value;
#if defined(__CUDACC_DEBUG__)
assert(!addition_overflow<Rep1>(lhs.rescaled(scale)._value, rhs.rescaled(scale)._value) &&
"fixed_point overflow");
#endif
return fixed_point<Rep1, Rad1>{scaled_integer<Rep1>{sum, scale}};
}
// MINUS Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1> operator-(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
auto const diff = lhs.rescaled(scale)._value - rhs.rescaled(scale)._value;
#if defined(__CUDACC_DEBUG__)
assert(!subtraction_overflow<Rep1>(lhs.rescaled(scale)._value, rhs.rescaled(scale)._value) &&
"fixed_point overflow");
#endif
return fixed_point<Rep1, Rad1>{scaled_integer<Rep1>{diff, scale}};
}
// MULTIPLIES Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1> operator*(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
#if defined(__CUDACC_DEBUG__)
assert(!multiplication_overflow<Rep1>(lhs._value, rhs._value) && "fixed_point overflow");
#endif
return fixed_point<Rep1, Rad1>{
scaled_integer<Rep1>(lhs._value * rhs._value, scale_type{lhs._scale + rhs._scale})};
}
// DIVISION Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1> operator/(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
#if defined(__CUDACC_DEBUG__)
assert(!division_overflow<Rep1>(lhs._value, rhs._value) && "fixed_point overflow");
#endif
return fixed_point<Rep1, Rad1>{
scaled_integer<Rep1>(lhs._value / rhs._value, scale_type{lhs._scale - rhs._scale})};
}
// EQUALITY COMPARISON Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator==(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value == rhs.rescaled(scale)._value;
}
// EQUALITY NOT COMPARISON Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator!=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value != rhs.rescaled(scale)._value;
}
// LESS THAN OR EQUAL TO Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator<=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value <= rhs.rescaled(scale)._value;
}
// GREATER THAN OR EQUAL TO Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator>=(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value >= rhs.rescaled(scale)._value;
}
// LESS THAN Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator<(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value < rhs.rescaled(scale)._value;
}
// GREATER THAN Operation
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline bool operator>(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
return lhs.rescaled(scale)._value > rhs.rescaled(scale)._value;
}
// MODULO OPERATION
template <typename Rep1, Radix Rad1>
CUDF_HOST_DEVICE inline fixed_point<Rep1, Rad1> operator%(fixed_point<Rep1, Rad1> const& lhs,
fixed_point<Rep1, Rad1> const& rhs)
{
auto const scale = std::min(lhs._scale, rhs._scale);
auto const remainder = lhs.rescaled(scale)._value % rhs.rescaled(scale)._value;
return fixed_point<Rep1, Rad1>{scaled_integer<Rep1>{remainder, scale}};
}
using decimal32 = fixed_point<int32_t, Radix::BASE_10>; ///< 32-bit decimal fixed point
using decimal64 = fixed_point<int64_t, Radix::BASE_10>; ///< 64-bit decimal fixed point
using decimal128 = fixed_point<__int128_t, Radix::BASE_10>; ///< 128-bit decimal fixed point
/** @} */ // end of group
} // namespace numeric
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/fixed_point/temporary.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// To avoid https://github.com/NVIDIA/libcudacxx/issues/460
// in libcudacxx with CTK 12.0/12.1
#include <cuda_runtime.h>
#include <cudf/types.hpp>
#include <cuda/std/limits>
#include <cuda/std/type_traits>
#include <algorithm>
#include <string>
namespace numeric {
namespace detail {
template <typename T>
auto to_string(T value) -> std::string
{
if constexpr (cuda::std::is_same_v<T, __int128_t>) {
auto s = std::string{};
auto const sign = value < 0;
if (sign) {
value += 1; // avoid overflowing if value == _int128_t lowest
value *= -1;
if (value == cuda::std::numeric_limits<__int128_t>::max())
return "-170141183460469231731687303715884105728";
value += 1; // can add back the one, no need to avoid overflow anymore
}
while (value) {
s.push_back("0123456789"[value % 10]);
value /= 10;
}
if (sign) s.push_back('-');
std::reverse(s.begin(), s.end());
return s;
} else {
return std::to_string(value);
}
return std::string{}; // won't ever hit here, need to suppress warning though
}
template <typename T>
constexpr auto abs(T value)
{
return value >= 0 ? value : -value;
}
template <typename T>
CUDF_HOST_DEVICE inline auto min(T lhs, T rhs)
{
return lhs < rhs ? lhs : rhs;
}
template <typename T>
CUDF_HOST_DEVICE inline auto max(T lhs, T rhs)
{
return lhs > rhs ? lhs : rhs;
}
template <typename BaseType>
constexpr auto exp10(int32_t exponent)
{
BaseType value = 1;
while (exponent > 0)
value *= 10, --exponent;
return value;
}
} // namespace detail
} // namespace numeric
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/wrappers/durations.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda/std/chrono>
namespace cudf {
/**
* @addtogroup timestamp_classes Timestamp
* @{
* @file durations.hpp
* @brief Concrete type definitions for int32_t and int64_t durations in varying resolutions.
*/
/**
* @brief Type alias representing an int32_t duration of days.
*/
using duration_D = cuda::std::chrono::duration<int32_t, cuda::std::chrono::days::period>;
/**
* @brief Type alias representing an int32_t duration of hours.
*/
using duration_h = cuda::std::chrono::duration<int32_t, cuda::std::chrono::hours::period>;
/**
* @brief Type alias representing an int32_t duration of minutes.
*/
using duration_m = cuda::std::chrono::duration<int32_t, cuda::std::chrono::minutes::period>;
/**
* @brief Type alias representing an int64_t duration of seconds.
*/
using duration_s = cuda::std::chrono::duration<int64_t, cuda::std::chrono::seconds::period>;
/**
* @brief Type alias representing an int64_t duration of milliseconds.
*/
using duration_ms = cuda::std::chrono::duration<int64_t, cuda::std::chrono::milliseconds::period>;
/**
* @brief Type alias representing an int64_t duration of microseconds.
*/
using duration_us = cuda::std::chrono::duration<int64_t, cuda::std::chrono::microseconds::period>;
/**
* @brief Type alias representing an int64_t duration of nanoseconds.
*/
using duration_ns = cuda::std::chrono::duration<int64_t, cuda::std::chrono::nanoseconds::period>;
static_assert(sizeof(duration_D) == sizeof(typename duration_D::rep), "");
static_assert(sizeof(duration_h) == sizeof(typename duration_h::rep), "");
static_assert(sizeof(duration_m) == sizeof(typename duration_m::rep), "");
static_assert(sizeof(duration_s) == sizeof(typename duration_s::rep), "");
static_assert(sizeof(duration_ms) == sizeof(typename duration_ms::rep), "");
static_assert(sizeof(duration_us) == sizeof(typename duration_us::rep), "");
static_assert(sizeof(duration_ns) == sizeof(typename duration_ns::rep), "");
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/wrappers/dictionary.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <cudf/types.hpp>
#include <limits>
/**
* @file
* @brief Concrete type definition for dictionary columns.
*/
namespace cudf {
/**
* @addtogroup dictionary_classes
* @{
* @file
*/
/**
* @brief A strongly typed wrapper for indices in a DICTIONARY type column.
*
* IndexType will be integer types like int32_t.
*
* For example, `dictionary32` is a strongly typed wrapper around an `int32_t`
* value that holds the offset into the dictionary keys for a specific element.
*
* This wrapper provides common conversion and comparison operations for
* the IndexType.
*/
template <typename IndexType>
struct dictionary_wrapper {
using value_type = IndexType; ///< The underlying type of the dictionary
dictionary_wrapper() = default;
~dictionary_wrapper() = default;
dictionary_wrapper(dictionary_wrapper&&) = default; ///< Move constructor
dictionary_wrapper(dictionary_wrapper const&) = default; ///< Copy constructor
/**
* @brief Move assignment operator
*
* @return The reference to this dictionary wrapper object
*/
dictionary_wrapper& operator=(dictionary_wrapper&&) = default;
/**
* @brief Copy assignment operator
*
* @return The reference to this dictionary wrapper object
*/
dictionary_wrapper& operator=(dictionary_wrapper const&) = default;
/**
* @brief Construct dictionary_wrapper from a value
*
* @param v The value to construct the dictionary_wrapper from
*/
CUDF_HOST_DEVICE inline constexpr explicit dictionary_wrapper(value_type v) : _value{v} {}
/**
* @brief Conversion operator
*
* @return The value of this dictionary wrapper
*/
CUDF_HOST_DEVICE inline explicit operator value_type() const { return _value; }
/**
* @brief Simple accessor
*
* @return The value of this dictionary wrapper
*/
CUDF_HOST_DEVICE inline value_type value() const { return _value; }
/**
* @brief Returns the maximum value of the value type.
*
* @return The maximum value of the value type
*/
static CUDF_HOST_DEVICE inline constexpr value_type max_value()
{
return std::numeric_limits<value_type>::max();
}
/**
* @brief Returns the minimum value of the value type.
*
* @return The minimum value of the value type
*/
static CUDF_HOST_DEVICE inline constexpr value_type min_value()
{
return std::numeric_limits<value_type>::min();
}
/**
* @brief Returns the lowest value of the value type.
*
* @return The lowest value of the value type
*/
static CUDF_HOST_DEVICE inline constexpr value_type lowest_value()
{
return std::numeric_limits<value_type>::lowest();
}
private:
value_type _value;
};
// comparison operators
/**
* @brief Wqual to operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is equal to rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline bool operator==(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() == rhs.value();
}
/**
* @brief Not equal to operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is not equal to rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline bool operator!=(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() != rhs.value();
}
/**
* @brief Less than or equal to operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is less than or equal to rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline bool operator<=(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() <= rhs.value();
}
/**
* @brief Greater than or equal to operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is greater than or equal to rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline bool operator>=(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() >= rhs.value();
}
/**
* @brief Less than operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is less than rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline constexpr bool operator<(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() < rhs.value();
}
/**
* @brief Greater than operator for dictionary_wrapper
*
* @tparam Integer Index type
* @param lhs Left hand side of comparison
* @param rhs Right hand side of comparison
* @return Returns true if lhs is greater than rhs, false otherwise
*/
template <typename Integer>
CUDF_HOST_DEVICE inline bool operator>(dictionary_wrapper<Integer> const& lhs,
dictionary_wrapper<Integer> const& rhs)
{
return lhs.value() > rhs.value();
}
using dictionary32 = dictionary_wrapper<int32_t>; ///< 32-bit integer indexed dictionary wrapper
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/wrappers/timestamps.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/wrappers/durations.hpp>
/**
* @file timestamps.hpp
* @brief Concrete type definitions for int32_t and int64_t timestamps in
* varying resolutions as durations since the UNIX epoch.
*/
namespace cudf {
namespace detail {
// TODO: Use chrono::utc_clock when available in libcu++?
template <class Duration>
using time_point = cuda::std::chrono::sys_time<Duration>; ///< Time point type
/**
* @brief A wrapper around a column of time_point in varying resolutions
*
* @tparam Duration The underlying duration type
*/
template <class Duration>
using timestamp = time_point<Duration>;
} // namespace detail
/**
* @addtogroup timestamp_classes
* @{
* @file
*/
/**
* @brief Type alias representing a cudf::duration_D (int32_t) since the unix epoch.
*/
using timestamp_D = detail::timestamp<cudf::duration_D>;
/**
* @brief Type alias representing a cudf::duration_h (int32_t) since the unix epoch.
*/
using timestamp_h = detail::timestamp<cudf::duration_h>;
/**
* @brief Type alias representing a cudf::duration_m (int32_t) since the unix epoch.
*/
using timestamp_m = detail::timestamp<cudf::duration_m>;
/**
* @brief Type alias representing a cudf::duration_s (int64_t) since the unix epoch.
*/
using timestamp_s = detail::timestamp<cudf::duration_s>;
/**
* @brief Type alias representing a cudf::duration_ms (int64_t) since the unix epoch.
*/
using timestamp_ms = detail::timestamp<cudf::duration_ms>;
/**
* @brief Type alias representing a cudf::duration_us (int64_t) since the unix epoch.
*/
using timestamp_us = detail::timestamp<cudf::duration_us>;
/**
* @brief Type alias representing a cudf::duration_ns (int64_t) since the unix epoch.
*/
using timestamp_ns = detail::timestamp<cudf::duration_ns>;
static_assert(sizeof(timestamp_D) == sizeof(typename timestamp_D::rep), "");
static_assert(sizeof(timestamp_h) == sizeof(typename timestamp_h::rep), "");
static_assert(sizeof(timestamp_m) == sizeof(typename timestamp_m::rep), "");
static_assert(sizeof(timestamp_s) == sizeof(typename timestamp_s::rep), "");
static_assert(sizeof(timestamp_ms) == sizeof(typename timestamp_ms::rep), "");
static_assert(sizeof(timestamp_us) == sizeof(typename timestamp_us::rep), "");
static_assert(sizeof(timestamp_ns) == sizeof(typename timestamp_ns::rep), "");
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/rolling/range_window_bounds.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/scalar/scalar.hpp>
namespace cudf {
/**
* @brief Abstraction for window boundary sizes, to be used with
* `grouped_range_rolling_window()`.
*
* Similar to `window_bounds` in `grouped_rolling_window()`, `range_window_bounds`
* represents window boundaries for use with `grouped_range_rolling_window()`.
* A window may be specified as one of the following:
* 1. A fixed-width numeric scalar value. E.g.
* a) A `DURATION_DAYS` scalar, for use with a `TIMESTAMP_DAYS` orderby column
* b) An `INT32` scalar, for use with an `INT32` orderby column
* 2. "unbounded", indicating that the bounds stretch to the first/last
* row in the group.
* 3. "current row", indicating that the bounds end at the first/last
* row in the group that match the value of the current row.
*/
struct range_window_bounds {
public:
/**
* @brief The type of range_window_bounds.
*/
enum class extent_type : int32_t {
CURRENT_ROW = 0, /// Bounds defined as the first/last row that matches the current row.
BOUNDED, /// Bounds defined as the first/last row that falls within
/// a specified range from the current row.
UNBOUNDED /// Bounds stretching to the first/last row in the entire group.
};
/**
* @brief Factory method to construct a bounded window boundary.
*
* @param boundary Finite window boundary
* @return A bounded window boundary object
*/
static range_window_bounds get(scalar const& boundary);
/**
* @brief Factory method to construct a window boundary
* limited to the value of the current row
*
* @param type The datatype of the window boundary
* @return A "current row" window boundary object
*/
static range_window_bounds current_row(data_type type);
/**
* @brief Whether or not the window is bounded to the current row
*
* @return true If window is bounded to the current row
* @return false If window is not bounded to the current row
*/
[[nodiscard]] bool is_current_row() const { return _extent == extent_type::CURRENT_ROW; }
/**
* @brief Factory method to construct an unbounded window boundary.
*
* @param type The datatype of the window boundary
* @return An unbounded window boundary object
*/
static range_window_bounds unbounded(data_type type);
/**
* @brief Whether or not the window is unbounded
*
* @return true If window is unbounded
* @return false If window is of finite bounds
*/
[[nodiscard]] bool is_unbounded() const { return _extent == extent_type::UNBOUNDED; }
/**
* @brief Returns the underlying scalar value for the bounds
*
* @return The underlying scalar value for the bounds
*/
[[nodiscard]] scalar const& range_scalar() const { return *_range_scalar; }
range_window_bounds(range_window_bounds const&) = default; ///< Copy constructor
range_window_bounds() = default; // Required for use as return types from dispatch functors.
private:
const extent_type _extent{extent_type::UNBOUNDED};
std::shared_ptr<scalar> _range_scalar{nullptr}; // To enable copy construction/assignment.
range_window_bounds(extent_type extent_, std::unique_ptr<scalar> range_scalar_);
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/column/column_factories.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/pair.h>
namespace cudf {
/**
* @addtogroup column_factories
* @{
* @file
* @brief Column factory APIs
*/
/**
* @brief Creates an empty column of the specified @p type
*
* An empty column contains zero elements and no validity mask.
*
* @param[in] type The column data type
* @return Empty column with desired type
*/
std::unique_ptr<column> make_empty_column(data_type type);
/**
* @brief Creates an empty column of the specified type.
*
* An empty column contains zero elements and no validity mask.
*
* @param[in] id The column type id
* @return Empty column with specified type
*/
std::unique_ptr<column> make_empty_column(type_id id);
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified numeric `data_type` with an optional null mask.
*
* @note `null_count()` is determined by the requested null mask `state`
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a numeric type
* @throws cudf::logic_error if `size < 0`
*
* @param[in] type The desired numeric element type
* @param[in] size The number of elements in the column
* @param[in] state Optional, controls allocation/initialization of the
* column's null mask. By default, no null mask is allocated.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed numeric column
*/
std::unique_ptr<column> make_numeric_column(
data_type type,
size_type size,
mask_state state = mask_state::UNALLOCATED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified numeric `data_type` with a null mask.
*
* @note null_count is optional and will be computed if not provided.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a numeric type
*
* @param[in] type The desired numeric element type
* @param[in] size The number of elements in the column
* @param[in] null_mask Null mask to use for this column.
* @param[in] null_count Optional number of nulls in the null_mask.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed numeric column
*/
template <typename B>
std::unique_ptr<column> make_numeric_column(
data_type type,
size_type size,
B&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(is_numeric(type), "Invalid, non-numeric type.");
return std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
std::forward<B>(null_mask),
null_count);
}
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified `fixed_point` `data_type` with an optional null mask.
*
* @note The column's null count is determined by the requested null mask `state`.
*
* @throws cudf::logic_error if `type` is not a `fixed_point` type.
* @throws cudf::logic_error if `size < 0`
*
* @param[in] type The desired `fixed_point` element type.
* @param[in] size The number of elements in the column.
* @param[in] state Optional, controls allocation/initialization of the.
* column's null mask. By default, no null mask is allocated.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory.
* @return Constructed fixed-point type column
*/
std::unique_ptr<column> make_fixed_point_column(
data_type type,
size_type size,
mask_state state = mask_state::UNALLOCATED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified `fixed_point` `data_type` with a null mask.
*
* @note null_count is optional and will be computed if not provided.
*
* @throws cudf::logic_error if `type` is not a `fixed_point` type.
*
* @param[in] type The desired `fixed_point` element type.
* @param[in] size The number of elements in the column.
* @param[in] null_mask Null mask to use for this column.
* @param[in] null_count Optional number of nulls in the null_mask.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory.
* @return Constructed fixed-point type column
*/
template <typename B>
std::unique_ptr<column> make_fixed_point_column(
data_type type,
size_type size,
B&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(is_fixed_point(type), "Invalid, non-fixed_point type.");
return std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
std::forward<B>(null_mask),
null_count);
}
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified timestamp `data_type` with an optional null mask.
*
* @note `null_count()` is determined by the requested null mask `state`
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a timestamp type
* @throws cudf::logic_error if `size < 0`
*
* @param[in] type The desired timestamp element type
* @param[in] size The number of elements in the column
* @param[in] state Optional, controls allocation/initialization of the
* column's null mask. By default, no null mask is allocated.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed timestamp type column
*/
std::unique_ptr<column> make_timestamp_column(
data_type type,
size_type size,
mask_state state = mask_state::UNALLOCATED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified timestamp `data_type` with a null mask.
*
* @note null_count is optional and will be computed if not provided.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a timestamp type
*
* @param[in] type The desired timestamp element type
* @param[in] size The number of elements in the column
* @param[in] null_mask Null mask to use for this column.
* @param[in] null_count Optional number of nulls in the null_mask.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed timestamp type column
*/
template <typename B>
std::unique_ptr<column> make_timestamp_column(
data_type type,
size_type size,
B&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(is_timestamp(type), "Invalid, non-timestamp type.");
return std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
std::forward<B>(null_mask),
null_count);
}
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified duration `data_type` with an optional null mask.
*
* @note `null_count()` is determined by the requested null mask `state`
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a duration type
* @throws cudf::logic_error if `size < 0`
*
* @param[in] type The desired duration element type
* @param[in] size The number of elements in the column
* @param[in] state Optional, controls allocation/initialization of the
* column's null mask. By default, no null mask is allocated.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed duration type column
*/
std::unique_ptr<column> make_duration_column(
data_type type,
size_type size,
mask_state state = mask_state::UNALLOCATED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified duration `data_type` with a null mask.
*
* @note null_count is optional and will be computed if not provided.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a duration type
*
* @param[in] type The desired duration element type
* @param[in] size The number of elements in the column
* @param[in] null_mask Null mask to use for this column.
* @param[in] null_count Optional number of nulls in the null_mask.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed duration type column
*/
template <typename B>
std::unique_ptr<column> make_duration_column(
data_type type,
size_type size,
B&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(is_duration(type), "Invalid, non-duration type.");
return std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
std::forward<B>(null_mask),
null_count);
}
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified fixed width `data_type` with an optional null mask.
*
* @note `null_count()` is determined by the requested null mask `state`
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a fixed width type
* @throws cudf::logic_error if `size < 0`
*
* @param[in] type The desired fixed width type
* @param[in] size The number of elements in the column
* @param[in] state Optional, controls allocation/initialization of the
* column's null mask. By default, no null mask is allocated.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed fixed-width type column
*/
std::unique_ptr<column> make_fixed_width_column(
data_type type,
size_type size,
mask_state state = mask_state::UNALLOCATED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct column with sufficient uninitialized storage to hold `size` elements of the
* specified fixed width `data_type` with a null mask.
*
* @note null_count is optional and will be computed if not provided.
*
* @throws std::bad_alloc if device memory allocation fails
* @throws cudf::logic_error if `type` is not a fixed width type
*
* @param[in] type The desired fixed width element type
* @param[in] size The number of elements in the column
* @param[in] null_mask Null mask to use for this column.
* @param[in] null_count Optional number of nulls in the null_mask.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory
* @return Constructed fixed-width type column
*/
template <typename B>
std::unique_ptr<column> make_fixed_width_column(
data_type type,
size_type size,
B&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(is_fixed_width(type), "Invalid, non-fixed-width type.");
if (is_timestamp(type)) {
return make_timestamp_column(type, size, std::forward<B>(null_mask), null_count, stream, mr);
} else if (is_duration(type)) {
return make_duration_column(type, size, std::forward<B>(null_mask), null_count, stream, mr);
} else if (is_fixed_point(type)) {
return make_fixed_point_column(type, size, std::forward<B>(null_mask), null_count, stream, mr);
}
return make_numeric_column(type, size, std::forward<B>(null_mask), null_count, stream, mr);
}
/**
* @brief Construct a STRING type column given a device span of pointer/size pairs.
*
* The total number of char bytes must not exceed the maximum size of size_type.
* The string characters are expected to be UTF-8 encoded sequence of char
* bytes. Use the strings_column_view class to perform strings operations on
* this type of column.
*
* @note `null_count()` and `null_bitmask` are determined if a pair contains
* a null string. That is, for each pair, if `.first` is null, that string
* is considered null. Likewise, a string is considered empty (not null)
* if `.first` is not null and `.second` is 0. Otherwise the `.first` member
* must be a valid device address pointing to `.second` consecutive bytes.
*
* @throws std::bad_alloc if device memory allocation fails
*
* @param[in] strings The device span of pointer/size pairs. Each pointer must be a device memory
address or `nullptr` (indicating a null string). The size must be the number of bytes.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used for allocation of the column's `null_mask` and children
* columns' device memory.
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(
cudf::device_span<thrust::pair<char const*, size_type> const> strings,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a STRING type column given a device span of string_view.
*
* The total number of char bytes must not exceed the maximum size of size_type.
* The string characters are expected to be UTF-8 encoded sequence of char
* bytes. Use the strings_column_view class to perform strings operations on
* this type of column.
*
* @note For each string_view, if `.data()` is `null_placeholder.data()`, that
* string is considered null. Likewise, a string is considered empty (not null)
* if `.data()` is not `null_placeholder.data()` and `.size_bytes()` is 0.
* Otherwise the `.data()` must be a valid device address pointing to
* `.size_bytes()` consecutive bytes. The `null_count()` for the output column
* will be equal to the number of input `string_view`s that are null.
*
* @throws std::bad_alloc if device memory allocation fails
*
* @param[in] string_views The span of string_view. Each string_view must point to a device memory
address or `null_placeholder` (indicating a null string). The size must be the number of bytes.
* @param[in] null_placeholder string_view indicating null string in given list of
* string_views.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used for allocation of the column's `null_mask` and children
* columns' device memory.
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(
cudf::device_span<string_view const> string_views,
string_view const null_placeholder,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a STRING type column given a device span of chars encoded as UTF-8, a device
* span of byte offsets identifying individual strings within the char vector, and an optional
* null bitmask.
*
* `offsets.front()` must always be zero.
*
* The total number of char bytes must not exceed the maximum size of size_type. Use the
* strings_column_view class to perform strings operations on this type of column.
*
* This function makes a deep copy of the strings, offsets, null_mask to create a new column.
*
* @param strings The device span of chars in device memory. This char vector is expected to be
* UTF-8 encoded characters.
* @param offsets The device span of byte offsets in device memory. The number of elements is
* one more than the total number of strings so the `offsets.back()` is the total number of bytes
* in the strings array. `offsets.front()` must always be 0 to point to the beginning of `strings`.
* @param null_mask Device span containing the null element indicator bitmask. Arrow format for
* nulls is used for interpreting this bitmask.
* @param null_count The number of null string entries
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used for allocation of the column's `null_mask` and children
* columns' device memory
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(
cudf::device_span<char const> strings,
cudf::device_span<size_type const> offsets,
cudf::device_span<bitmask_type const> null_mask,
size_type null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a STRING type column given offsets column, chars columns, and null mask and null
* count.
*
* The columns and mask are moved into the resulting strings column.
*
* @param num_strings The number of strings the column represents.
* @param offsets_column The column of offset values for this column. The number of elements is
* one more than the total number of strings so the `offset[last] - offset[0]` is the total number
* of bytes in the strings vector.
* @param chars_column The column of char bytes for all the strings for this column. Individual
* strings are identified by the offsets and the nullmask.
* @param null_count The number of null string entries.
* @param null_mask The bits specifying the null strings in device memory. Arrow format for
* nulls is used for interpreting this bitmask.
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(size_type num_strings,
std::unique_ptr<column> offsets_column,
std::unique_ptr<column> chars_column,
size_type null_count,
rmm::device_buffer&& null_mask);
/**
* @brief Construct a STRING type column given offsets, columns, and optional null count and null
* mask.
*
* @param[in] num_strings The number of strings the column represents.
* @param[in] offsets The offset values for this column. The number of elements is one more than the
* total number of strings so the `offset[last] - offset[0]` is the total number of bytes in the
* strings vector.
* @param[in] chars The char bytes for all the strings for this column. Individual strings are
* identified by the offsets and the nullmask.
* @param[in] null_mask The bits specifying the null strings in device memory. Arrow format for
* nulls is used for interpreting this bitmask.
* @param[in] null_count The number of null string entries.
* @return Constructed strings column
*/
std::unique_ptr<column> make_strings_column(size_type num_strings,
rmm::device_uvector<size_type>&& offsets,
rmm::device_uvector<char>&& chars,
rmm::device_buffer&& null_mask,
size_type null_count);
/**
* @brief Construct a LIST type column given offsets column, child column, null mask and null
* count.
*
* The columns and mask are moved into the resulting lists column.
*
* List columns are structured similarly to strings columns. They contain
* a set of offsets which represents the lengths of the lists in each row, and
* a "child" column of data that is referenced by the offsets. Since lists
* are a nested type, the child column may itself be further nested.
*
* When child column at depth N+1 is itself a list, the offsets column at
* depth N references the offsets column for depth N+1. When the child column at depth
* N+1 is a leaf type (int, float, etc), the offsets column at depth N references
* the data for depth N+1.
*
* @code{.pseudo}
* Example:
* List<int>
* input: {{1, 2}, {3, 4, 5}}
* offsets (depth 0) {0, 2, 5}
* data (depth 0)
* offsets (depth 1)
* data (depth 1) {1, 2, 3, 4, 5}
* @endcode
*
* @code{.pseudo}
* Example:
* List<List<int>>
* input: { {{1, 2}}, {{3, 4, 5}, {6, 7}} }
* offsets (depth 0) {0, 1, 3}
* data (depth 0)
* offsets (depth 1) {0, 2, 5, 7}
* data (depth 1)
* offsets (depth 2)
* data (depth 1) {1, 2, 3, 4, 5, 6, 7}
* @endcode
*
* @param[in] num_rows The number of lists the column represents.
* @param[in] offsets_column The column of offset values for this column. Each value should
* represent the starting offset into the child elements that corresponds to the beginning of the
* row, with the first row starting at 0. The length of row N can be determined by subtracting
* offsets[N+1] - offsets[N]. The total number of offsets should be 1 longer than the # of rows in
* the column.
* @param[in] child_column The column of nested data referenced by the lists represented by the
* offsets_column. Note: the child column may itself be
* further nested.
* @param[in] null_count The number of null list entries.
* @param[in] null_mask The bits specifying the null lists in device memory.
* Arrow format for nulls is used for interpreting this bitmask.
* @param[in] stream Optional stream for use with all memory allocation
* and device kernels
* @param[in] mr Optional resource to use for device memory
* allocation of the column's `null_mask` and children.
* @return Constructed lists column
*/
std::unique_ptr<cudf::column> make_lists_column(
size_type num_rows,
std::unique_ptr<column> offsets_column,
std::unique_ptr<column> child_column,
size_type null_count,
rmm::device_buffer&& null_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a STRUCT column using specified child columns as members.
*
* Specified child/member columns and null_mask are adopted by resultant
* struct column.
*
* A struct column requires that all specified child columns have the same
* number of rows. A struct column's row count equals that of any/all
* of its child columns. A single struct row at any index is comprised of
* all the individual child column values at the same index, in the order
* specified in the list of child columns.
*
* The specified null mask governs which struct row has a null value. This
* is orthogonal to the null values of individual child columns.
*
* @param[in] num_rows The number of struct values in the struct column.
* @param[in] child_columns The list of child/members that the struct is comprised of.
* @param[in] null_count The number of null values in the struct column.
* @param[in] null_mask The bits specifying the null struct values in the column.
* @param[in] stream Optional stream for use with all memory allocation and device kernels.
* @param[in] mr Optional resource to use for device memory allocation.
* @return Constructed structs column
*/
std::unique_ptr<cudf::column> make_structs_column(
size_type num_rows,
std::vector<std::unique_ptr<column>>&& child_columns,
size_type null_count,
rmm::device_buffer&& null_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a column with size elements that are all equal to the given scalar.
*
* The output column will have the same type as `s.type()`
* The output column will contain all null rows if `s.invalid()==false`
* The output column will be empty if `size==0`. For LIST scalars, the column hierarchy
* from @p s is preserved.
*
* @param[in] s The scalar to use for values in the column.
* @param[in] size The number of rows for the output column.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory.
* @return Constructed column whose rows all contain the scalar value
*/
std::unique_ptr<column> make_column_from_scalar(
scalar const& s,
size_type size,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Construct a dictionary column with size elements that are all equal to the given scalar.
*
* The output column will have keys of type `s.type()`
* The output column will be empty if `size==0`.
*
* @throw cudf::logic_error if `s.is_valid()==false`
*
* @param[in] s The scalar to use for values in the column.
* @param[in] size The number of rows for the output column.
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
* @param[in] mr Device memory resource used to allocate the returned column's device memory.
* @return Constructed dictionary column
*/
std::unique_ptr<column> make_dictionary_from_scalar(
scalar const& s,
size_type size,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/column/column_view.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <limits>
#include <type_traits>
#include <vector>
/**
* @file column_view.hpp
* @brief column view class definitions
*/
namespace cudf {
namespace detail {
/**
* @brief A non-owning, immutable view of device data as a column of elements,
* some of which may be null as indicated by a bitmask.
*
* A `column_view_base` can be constructed implicitly from a `cudf::column`, or
*may be constructed explicitly from a pointer to pre-existing device memory.
*
* Unless otherwise noted, the memory layout of the `column_view_base`'s data
*and bitmask is expected to adhere to the Arrow Physical Memory Layout
* Specification: https://arrow.apache.org/docs/memory_layout.html
*
* Because `column_view_base` is non-owning, no device memory is allocated nor
*freed when `column_view_base` objects are created or destroyed.
*
* To enable zero-copy slicing, a `column_view_base` has an `offset` that
*indicates the index of the first element in the column relative to the base
*device memory allocation. By default, `offset()` is zero.
*/
class column_view_base {
public:
/**
* @brief Returns pointer to the base device memory allocation casted to
* the specified type.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @note It should be rare to need to access the `head<T>()` allocation of
*a column, and instead, accessing the elements should be done via
*`data<T>()`.
*
* This function will only participate in overload resolution if `is_rep_layout_compatible<T>()`
* or `std::is_same_v<T,void>` are true.
*
* @tparam The type to cast to
* @return Typed pointer to underlying data
*/
template <typename T = void,
CUDF_ENABLE_IF(std::is_same_v<T, void> or is_rep_layout_compatible<T>())>
T const* head() const noexcept
{
return static_cast<T const*>(_data);
}
/**
* @brief Returns the underlying data casted to the specified type, plus the
* offset.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The type to cast to
* @return Typed pointer to underlying data, including the offset
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T const* data() const noexcept
{
return head<T>() + _offset;
}
/**
* @brief Return first element (accounting for offset) after underlying data
* is casted to the specified type.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The desired type
* @return Pointer to the first element after casting
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T const* begin() const noexcept
{
return data<T>();
}
/**
* @brief Return one past the last element after underlying data is casted to
* the specified type.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The desired type
* @return Pointer to one past the last element after casting
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T const* end() const noexcept
{
return begin<T>() + size();
}
/**
* @brief Returns the number of elements in the column
*
* @return The number of elements in the column
*/
[[nodiscard]] size_type size() const noexcept { return _size; }
/**
* @brief Returns true if `size()` returns zero, or false otherwise
*
* @return True if `size()` returns zero, or false otherwise
*/
[[nodiscard]] bool is_empty() const noexcept { return size() == 0; }
/**
* @brief Returns the element `data_type`
*
* @return The `data_type` of the elements in the column
*/
[[nodiscard]] data_type type() const noexcept { return _type; }
/**
* @brief Indicates if the column can contain null elements, i.e., if it has
* an allocated bitmask.
*
* @note If `null_count() > 0`, this function must always return `true`.
*
* @return true The bitmask is allocated
* @return false The bitmask is not allocated
*/
[[nodiscard]] bool nullable() const noexcept { return nullptr != _null_mask; }
/**
* @brief Returns the count of null elements
*
* @return The count of null elements
*/
[[nodiscard]] size_type null_count() const { return _null_count; }
/**
* @brief Returns the count of null elements in the range [begin, end)
*
* @note If `null_count() != 0`, every invocation of `null_count(begin, end)`
* will recompute the count of null elements indicated by the `null_mask` in
* the range [begin, end).
*
* @throws cudf::logic_error for invalid range (if `begin < 0`,
* `begin > end`, `begin >= size()`, or `end > size()`).
*
* @param[in] begin The starting index of the range (inclusive).
* @param[in] end The index of the last element in the range (exclusive).
* @return The count of null elements in the given range
*/
[[nodiscard]] size_type null_count(size_type begin, size_type end) const;
/**
* @brief Indicates if the column contains null elements,
* i.e., `null_count() > 0`
*
* @return true One or more elements are null
* @return false All elements are valid
*/
[[nodiscard]] bool has_nulls() const { return null_count() > 0; }
/**
* @brief Indicates if the column contains null elements in the range
* [begin, end), i.e., `null_count(begin, end) > 0`
*
* @throws cudf::logic_error for invalid range (if `begin < 0`,
* `begin > end`, `begin >= size()`, or `end > size()`).
*
* @param begin The starting index of the range (inclusive).
* @param end The index of the last element in the range (exclusive).
* @return true One or more elements are null in the range [begin, end)
* @return false All elements are valid in the range [begin, end)
*/
[[nodiscard]] bool has_nulls(size_type begin, size_type end) const
{
return null_count(begin, end) > 0;
}
/**
* @brief Returns raw pointer to the underlying bitmask allocation.
*
* @note This function does *not* account for the `offset()`.
*
* @note If `null_count() == 0`, this may return `nullptr`.
* @return Raw pointer to the bitmask
*/
[[nodiscard]] bitmask_type const* null_mask() const noexcept { return _null_mask; }
/**
* @brief Returns the index of the first element relative to the base memory
* allocation, i.e., what is returned from `head<T>()`.
*
* @return The index of the first element relative to `head<T>()`
*/
[[nodiscard]] size_type offset() const noexcept { return _offset; }
protected:
data_type _type{type_id::EMPTY}; ///< Element type
size_type _size{}; ///< Number of elements
void const* _data{}; ///< Pointer to device memory containing elements
bitmask_type const* _null_mask{}; ///< Pointer to device memory containing
///< bitmask representing null elements.
///< Optional if `null_count() == 0`
mutable size_type _null_count{}; ///< The number of null elements
size_type _offset{}; ///< Index position of the first element.
///< Enables zero-copy slicing
column_view_base() = default;
~column_view_base() = default;
column_view_base(column_view_base const&) = default; ///< Copy constructor
column_view_base(column_view_base&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
column_view_base& operator=(column_view_base const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
column_view_base& operator=(column_view_base&&) = default;
/**
* @brief Construct a `column_view_base` from pointers to device memory for
*the elements and bitmask of the column.
*
* If `null_count()` is zero, `null_mask` is optional.
*
* If `type` is `EMPTY`, the specified `null_count` will be ignored and
* `null_count()` will always return the same value as `size()`
*
* @throws cudf::logic_error if `size < 0`
* @throws cudf::logic_error if `size > 0` but `data == nullptr`
* @throws cudf::logic_error if `type.id() == EMPTY` but `data != nullptr`
*or `null_mask != nullptr`
* @throws cudf::logic_error if `null_count > 0`, but `null_mask == nullptr`
* @throws cudf::logic_error if `offset < 0`
*
* @param type The element type
* @param size The number of elements
* @param data Pointer to device memory containing the column elements
* @param null_mask Pointer to device memory containing the null
* indicator bitmask
* @param null_count The number of null elements.
* @param offset Optional, index of the first element
*/
column_view_base(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type null_count,
size_type offset = 0);
};
class mutable_column_view_base : public column_view_base {
public:
protected:
};
} // namespace detail
/**
* @brief A non-owning, immutable view of device data as a column of elements,
* some of which may be null as indicated by a bitmask.
*
* @ingroup column_classes
*
* A `column_view` can be constructed implicitly from a `cudf::column`, or may
* be constructed explicitly from a pointer to pre-existing device memory.
*
* Unless otherwise noted, the memory layout of the `column_view`'s data and
* bitmask is expected to adhere to the Arrow Physical Memory Layout
* Specification: https://arrow.apache.org/docs/memory_layout.html
*
* Because `column_view` is non-owning, no device memory is allocated nor freed
* when `column_view` objects are created or destroyed.
*
* To enable zero-copy slicing, a `column_view` has an `offset` that indicates
* the index of the first element in the column relative to the base device
* memory allocation. By default, `offset()` is zero.
*/
class column_view : public detail::column_view_base {
public:
column_view() = default;
// these pragmas work around the nvcc issue where if a column_view is used
// inside of a __device__ code path, these functions will end up being created
// as __host__ __device__ because they are explicitly defaulted. However, if
// they then end up being called by a simple __host__ function
// (eg std::vector destructor) you get a compile error because you're trying to
// call a __host__ __device__ function from a __host__ function.
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
~column_view() = default;
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
column_view(column_view const&) = default; ///< Copy constructor
column_view(column_view&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
column_view& operator=(column_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object
*/
column_view& operator=(column_view&&) = default;
/**
* @brief Construct a `column_view` from pointers to device memory for the
* elements and bitmask of the column.
*
* If `null_count()` is zero, `null_mask` is optional.
*
* If `type` is `EMPTY`, the specified `null_count` will be ignored and
* `null_count()` will always return the same value as `size()`
*
* @throws cudf::logic_error if `size < 0`
* @throws cudf::logic_error if `size > 0` but `data == nullptr`
* @throws cudf::logic_error if `type.id() == EMPTY` but `data != nullptr`
*or `null_mask != nullptr`
* @throws cudf::logic_error if `null_count > 0`, but `null_mask == nullptr`
* @throws cudf::logic_error if `offset < 0`
*
* @param type The element type
* @param size The number of elements
* @param data Pointer to device memory containing the column elements
* @param null_mask Pointer to device memory containing the null
* indicator bitmask
* @param null_count The number of null elements.
* @param offset Optional, index of the first element
* @param children Optional, depending on the element type, child columns may
* contain additional data
*/
column_view(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type null_count,
size_type offset = 0,
std::vector<column_view> const& children = {});
/**
* @brief Returns the specified child
*
* @param child_index The index of the desired child
* @return The requested child `column_view`
*/
[[nodiscard]] column_view child(size_type child_index) const noexcept
{
return _children[child_index];
}
/**
* @brief Returns the number of child columns.
*
* @return The number of child columns
*/
[[nodiscard]] size_type num_children() const noexcept { return _children.size(); }
/**
* @brief Returns iterator to the beginning of the ordered sequence of child column-views.
*
* @return An iterator to a `column_view` referencing the first child column
*/
auto child_begin() const noexcept { return _children.cbegin(); }
/**
* @brief Returns iterator to the end of the ordered sequence of child column-views.
*
* @return An iterator to a `column_view` one past the end of the child columns
*/
auto child_end() const noexcept { return _children.cend(); }
/**
* @brief Construct a column view from a device_span<T>.
*
* Only numeric and chrono types are supported.
*
* @tparam T The device span type. Must be const and match the column view's type.
* @param data A typed device span containing the column view's data.
*/
template <typename T, CUDF_ENABLE_IF(cudf::is_numeric<T>() or cudf::is_chrono<T>())>
column_view(device_span<T const> data)
: column_view(
cudf::data_type{cudf::type_to_id<T>()}, data.size(), data.data(), nullptr, 0, 0, {})
{
CUDF_EXPECTS(
data.size() <= static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max()),
"Data exceeds the column size limit",
std::overflow_error);
}
/**
* @brief Converts a column view into a device span.
*
* Only numeric and chrono data types are supported. The column view must not
* be nullable.
*
* @tparam T The device span type. Must be const and match the column view's type.
* @throws cudf::logic_error if the column view type does not match the span type.
* @throws cudf::logic_error if the column view is nullable.
* @return A typed device span of the column view's data.
*/
template <typename T, CUDF_ENABLE_IF(cudf::is_numeric<T>() or cudf::is_chrono<T>())>
[[nodiscard]] operator device_span<T const>() const
{
CUDF_EXPECTS(type() == cudf::data_type{cudf::type_to_id<T>()},
"Device span type must match column view type.");
CUDF_EXPECTS(!nullable(), "A nullable column view cannot be converted to a device span.");
return device_span<T const>(data<T>(), size());
}
private:
friend column_view bit_cast(column_view const& input, data_type type);
std::vector<column_view> _children{}; ///< Based on element type, children
///< may contain additional data
}; // namespace cudf
/**
* @brief A non-owning, mutable view of device data as a column of elements,
* some of which may be null as indicated by a bitmask.
*
* @ingroup column_classes
*
* A `mutable_column_view` can be constructed implicitly from a `cudf::column`,
* or may be constructed explicitly from a pointer to pre-existing device memory.
*
* Unless otherwise noted, the memory layout of the `mutable_column_view`'s data
* and bitmask is expected to adhere to the Arrow Physical Memory Layout
* Specification: https://arrow.apache.org/docs/memory_layout.html
*
* Because `mutable_column_view` is non-owning, no device memory is allocated
* nor freed when `mutable_column_view` objects are created or destroyed.
*
* To enable zero-copy slicing, a `mutable_column_view` has an `offset` that
* indicates the index of the first element in the column relative to the base
* device memory allocation. By default, `offset()` is zero.
*/
class mutable_column_view : public detail::column_view_base {
public:
mutable_column_view() = default;
~mutable_column_view() = default;
mutable_column_view(mutable_column_view const&) = default; ///< Copy constructor
mutable_column_view(mutable_column_view&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
mutable_column_view& operator=(mutable_column_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
mutable_column_view& operator=(mutable_column_view&&) = default;
/**
* @brief Construct a `mutable_column_view` from pointers to device memory for
* the elements and bitmask of the column.
* If `type` is `EMPTY`, the specified `null_count` will be ignored and
* `null_count()` will always return the same value as `size()`
*
* @throws cudf::logic_error if `size < 0`
* @throws cudf::logic_error if `size > 0` but `data == nullptr`
* @throws cudf::logic_error if `type.id() == EMPTY` but `data != nullptr`
*or `null_mask != nullptr`
* @throws cudf::logic_error if `null_count > 0`, but `null_mask == nullptr`
* @throws cudf::logic_error if `offset < 0`
*
* @param type The element type
* @param size The number of elements
* @param data Pointer to device memory containing the column elements
* @param null_mask Pointer to device memory containing the null
indicator
* bitmask
* @param null_count The number of null elements.
* @param offset Optional, index of the first element
* @param children Optional, depending on the element type, child columns may
* contain additional data
*/
mutable_column_view(data_type type,
size_type size,
void* data,
bitmask_type* null_mask,
size_type null_count,
size_type offset = 0,
std::vector<mutable_column_view> const& children = {});
/**
* @brief Returns pointer to the base device memory allocation casted to
* the specified type.
*
* This function will only participate in overload resolution if `is_rep_layout_compatible<T>()`
* or `std::is_same_v<T,void>` are true.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @note It should be rare to need to access the `head<T>()` allocation of a
* column, and instead, accessing the elements should be done via `data<T>()`.
*
* @tparam The type to cast to
* @return Typed pointer to underlying data
*/
template <typename T = void,
CUDF_ENABLE_IF(std::is_same_v<T, void> or is_rep_layout_compatible<T>())>
T* head() const noexcept
{
return const_cast<T*>(detail::column_view_base::head<T>());
}
/**
* @brief Returns the underlying data casted to the specified type, plus the
* offset.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @tparam T The type to cast to
* @return Typed pointer to underlying data, including the offset
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T* data() const noexcept
{
return const_cast<T*>(detail::column_view_base::data<T>());
}
/**
* @brief Return first element (accounting for offset) when underlying data is
* casted to the specified type.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The desired type
* @return Pointer to the first element after casting
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T* begin() const noexcept
{
return const_cast<T*>(detail::column_view_base::begin<T>());
}
/**
* @brief Return one past the last element after underlying data is casted to
* the specified type.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The desired type
* @return Pointer to one past the last element after casting
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
T* end() const noexcept
{
return const_cast<T*>(detail::column_view_base::end<T>());
}
/**
* @brief Returns raw pointer to the underlying bitmask allocation.
*
* @note This function does *not* account for the `offset()`.
*
* @note If `null_count() == 0`, this may return `nullptr`.
*
* @return Raw pointer to the underlying bitmask allocation
*/
[[nodiscard]] bitmask_type* null_mask() const noexcept
{
return const_cast<bitmask_type*>(detail::column_view_base::null_mask());
}
/**
* @brief Set the null count
*
* @throws cudf::logic_error if `new_null_count > 0` and `nullable() == false`
*
* @param new_null_count The new null count
*/
void set_null_count(size_type new_null_count);
/**
* @brief Returns a reference to the specified child
*
* @param child_index The index of the desired child
* @return The requested child `mutable_column_view`
*/
[[nodiscard]] mutable_column_view child(size_type child_index) const noexcept
{
return mutable_children[child_index];
}
/**
* @brief Returns the number of child columns.
*
* @return The number of child columns
*/
[[nodiscard]] size_type num_children() const noexcept { return mutable_children.size(); }
/**
* @brief Returns iterator to the beginning of the ordered sequence of child column-views.
*
* @return An iterator to a `mutable_column_view` referencing the first child column
*/
auto child_begin() const noexcept { return mutable_children.begin(); }
/**
* @brief Returns iterator to the end of the ordered sequence of child column-views.
*
* @return An iterator to a `mutable_column_view` to the element following the last child column
*/
auto child_end() const noexcept { return mutable_children.end(); }
/**
* @brief Converts a mutable view into an immutable view
*
* @return An immutable view of the mutable view's elements
*/
operator column_view() const;
private:
friend mutable_column_view bit_cast(mutable_column_view const& input, data_type type);
std::vector<mutable_column_view> mutable_children;
};
/**
* @brief Counts the number of descendants of the specified parent.
*
* @param parent The parent whose descendants will be counted
* @return The number of descendants of the parent
*/
size_type count_descendants(column_view parent);
/**
* @brief Zero-copy cast between types with the same size and compatible underlying representations.
*
* This is similar to `reinterpret_cast` or `bit_cast` in that it gives a view of the same raw bits
* as a different type. Unlike `reinterpret_cast` however, this cast is only allowed on types that
* have the same width and compatible representations. For example, the way timestamp types are laid
* out in memory is equivalent to an integer representing a duration since a fixed epoch;
* bit-casting to the same integer type (INT32 for days, INT64 for others) results in a raw view of
* the duration count. A FLOAT32 can also be bit-casted into INT32 and treated as an integer value.
* However, an INT32 column cannot be bit-casted to INT64 as the sizes differ, nor can a string_view
* column be casted into a numeric type column as their data representations are not compatible.
*
* The validity of the conversion can be checked with `cudf::is_bit_castable()`.
*
* @throws cudf::logic_error if the specified cast is not possible, i.e.,
* `is_bit_castable(input.type(), type)` is false.
*
* @param input The `column_view` to cast from
* @param type The `data_type` to cast to
* @return New `column_view` wrapping the same data as `input` but cast to `type`
*/
column_view bit_cast(column_view const& input, data_type type);
/**
* @brief Zero-copy cast between types with the same size and compatible underlying representations.
*
* This is similar to `reinterpret_cast` or `bit_cast` in that it gives a view of the same raw bits
* as a different type. Unlike `reinterpret_cast` however, this cast is only allowed on types that
* have the same width and compatible representations. For example, the way timestamp types are laid
* out in memory is equivalent to an integer representing a duration since a fixed epoch;
* bit-casting to the same integer type (INT32 for days, INT64 for others) results in a raw view of
* the duration count. A FLOAT32 can also be bit-casted into INT32 and treated as an integer value.
* However, an INT32 column cannot be bit-casted to INT64 as the sizes differ, nor can a string_view
* column be casted into a numeric type column as their data representations are not compatible.
*
* The validity of the conversion can be checked with `cudf::is_bit_castable()`.
*
* @throws cudf::logic_error if the specified cast is not possible, i.e.,
* `is_bit_castable(input.type(), type)` is false.
*
* @param input The `mutable_column_view` to cast from
* @param type The `data_type` to cast to
* @return New `mutable_column_view` wrapping the same data as `input` but cast to `type`
*/
mutable_column_view bit_cast(mutable_column_view const& input, data_type type);
namespace detail {
/**
* @brief Computes a hash value from the shallow state of the specified column
*
* For any two columns, if `is_shallow_equivalent(c0,c1)` then `shallow_hash(c0) ==
* shallow_hash(c1)`.
*
* The complexity of computing the hash value of `input` is `O( count_descendants(input) )`, i.e.,
* it is independent of the number of elements in the column.
*
* This function does _not_ inspect the elements of `input` nor access any device memory or launch
* any kernels.
*
* @param input The `column_view` to compute hash
* @return The hash value derived from the shallow state of `input`.
*/
std::size_t shallow_hash(column_view const& input);
/**
* @brief Uses only shallow state to determine if two `column_view`s view equivalent columns
*
* Two columns are equivalent if for any operation `F` then:
* ```
* is_shallow_equivalent(c0, c1) ==> The results of F(c0) and F(c1) are equivalent
* ```
* For any two non-empty columns, `is_shallow_equivalent(c0,c1)` is true only if they view the exact
* same physical column. In other words, two physically independent columns may have exactly
* equivalent elements but their shallow state would not be equivalent.
*
* The complexity of this function is `O( min(count_descendants(lhs), count_descendants(rhs)) )`,
* i.e., it is independent of the number of elements in either column.
*
* This function does _not_ inspect the elements of `lhs` or `rhs` nor access any device memory nor
* launch any kernels.
*
* @param lhs The left `column_view` to compare
* @param rhs The right `column_view` to compare
* @return If `lhs` and `rhs` have equivalent shallow state
*/
bool is_shallow_equivalent(column_view const& lhs, column_view const& rhs);
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/column/column.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
/**
* @file
* @brief Class definition for cudf::column
*/
namespace cudf {
/**
* @brief A container of nullable device data as a column of elements.
*
* @ingroup column_classes Column
* @{
*/
class column {
public:
column() = default;
~column() = default;
column& operator=(column const& other) = delete;
column& operator=(column&& other) = delete;
/**
* @brief Construct a new column object by deep copying the contents of
*`other`.
*
* Uses the specified `stream` and device_memory_resource for all allocations
* and copies.
*
* @param other The `column` to copy
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for all device memory allocations
*/
column(column const& other,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Move the contents from `other` to create a new column.
*
* After the move, `other.size() == 0` and `other.type() = {EMPTY}`
*
* @param other The column whose contents will be moved into the new column
*/
column(column&& other) noexcept;
/**
* @brief Construct a new column by taking ownership of the contents of a device_uvector.
*
* @param other The device_uvector whose contents will be moved into the new column.
* @param null_mask Column's null value indicator bitmask. May be empty if `null_count` is 0.
* @param null_count The count of null elements.
*/
template <typename T, CUDF_ENABLE_IF(cudf::is_numeric<T>() or cudf::is_chrono<T>())>
column(rmm::device_uvector<T>&& other, rmm::device_buffer&& null_mask, size_type null_count)
: _type{cudf::data_type{cudf::type_to_id<T>()}},
_size{[&]() {
CUDF_EXPECTS(
other.size() <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"The device_uvector size exceeds the column size limit",
std::overflow_error);
return static_cast<size_type>(other.size());
}()},
_data{other.release()},
_null_mask{std::move(null_mask)},
_null_count{null_count}
{
}
/**
* @brief Construct a new column from existing device memory.
*
* @note This constructor is primarily intended for use in column factory
* functions.
*
* @throws cudf::logic_error if `size < 0`
*
* @param dtype The element type
* @param size The number of elements in the column
* @param data The column's data
* @param null_mask Column's null value indicator bitmask. May be empty if `null_count` is 0.
* @param null_count Optional, the count of null elements.
* @param children Optional, vector of child columns
*/
template <typename B1, typename B2 = rmm::device_buffer>
column(data_type dtype,
size_type size,
B1&& data,
B2&& null_mask,
size_type null_count,
std::vector<std::unique_ptr<column>>&& children = {})
: _type{dtype},
_size{size},
_data{std::forward<B1>(data)},
_null_mask{std::forward<B2>(null_mask)},
_null_count{null_count},
_children{std::move(children)}
{
CUDF_EXPECTS(size >= 0, "Column size cannot be negative.");
}
/**
* @brief Construct a new column by deep copying the contents of a
* `column_view`.
*
* This accounts for the `column_view`'s offset.
*
* @param view The view to copy
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource to use for all device memory allocations
*/
explicit column(column_view view,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the column's logical element type
*
* @return The column's logical element type
*/
[[nodiscard]] data_type type() const noexcept { return _type; }
/**
* @brief Returns the number of elements
*
* @return The number of elements
*/
[[nodiscard]] size_type size() const noexcept { return _size; }
/**
* @brief Returns the count of null elements.
*
* @return The number of null elements
*/
[[nodiscard]] size_type null_count() const { return _null_count; }
/**
* @brief Sets the column's null value indicator bitmask to `new_null_mask`.
*
* @throws cudf::logic_error if new_null_count is larger than 0 and the size
* of `new_null_mask` does not match the size of this column.
*
* @param new_null_mask New null value indicator bitmask (rvalue overload &
* moved) to set the column's null value indicator mask. May be empty if
* `new_null_count` is 0.
* @param new_null_count The count of null elements.
*/
void set_null_mask(rmm::device_buffer&& new_null_mask, size_type new_null_count);
/**
* @brief Sets the column's null value indicator bitmask to `new_null_mask`.
*
* @throws cudf::logic_error if new_null_count is larger than 0 and the size of `new_null_mask`
* does not match the size of this column.
*
* @param new_null_mask New null value indicator bitmask (lvalue overload & copied) to set the
* column's null value indicator mask. May be empty if `new_null_count` is 0.
* @param new_null_count The count of null elements
* @param stream The stream on which to perform the allocation and copy. Uses the default CUDF
* stream if none is specified.
*/
void set_null_mask(rmm::device_buffer const& new_null_mask,
size_type new_null_count,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Updates the count of null elements.
*
* @throws cudf::logic_error if `new_null_count > 0 and nullable() == false`
*
* @param new_null_count The new null count.
*/
void set_null_count(size_type new_null_count);
/**
* @brief Indicates whether it is possible for the column to contain null
* values, i.e., it has an allocated null mask.
*
* This may return `false` iff `null_count() == 0`.
*
* May return true even if `null_count() == 0`. This function simply indicates
* whether the column has an allocated null mask.
*
* @return true The column can hold null values
* @return false The column cannot hold null values
*/
[[nodiscard]] bool nullable() const noexcept { return (_null_mask.size() > 0); }
/**
* @brief Indicates whether the column contains null elements.
*
* @return true One or more elements are null
* @return false Zero elements are null
*/
[[nodiscard]] bool has_nulls() const noexcept { return (null_count() > 0); }
/**
* @brief Returns the number of child columns
*
* @return The number of child columns
*/
[[nodiscard]] size_type num_children() const noexcept { return _children.size(); }
/**
* @brief Returns a reference to the specified child
*
* @param child_index Index of the desired child
* @return Reference to the desired child
*/
column& child(size_type child_index) noexcept { return *_children[child_index]; };
/**
* @brief Returns a const reference to the specified child
*
* @param child_index Index of the desired child
* @return Const reference to the desired child
*/
[[nodiscard]] column const& child(size_type child_index) const noexcept
{
return *_children[child_index];
};
/**
* @brief Wrapper for the contents of a column.
*
* Returned by `column::release()`.
*/
struct contents {
std::unique_ptr<rmm::device_buffer> data; ///< data device memory buffer
std::unique_ptr<rmm::device_buffer> null_mask; ///< null mask device memory buffer
std::vector<std::unique_ptr<column>> children; ///< child columns
};
/**
* @brief Releases ownership of the column's contents.
*
* It is the caller's responsibility to query the `size(), null_count(),
* type()` before invoking `release()`.
*
* After calling `release()` on a column it will be empty, i.e.:
* - `type() == data_type{EMPTY}`
* - `size() == 0`
* - `null_count() == 0`
* - `num_children() == 0`
*
* @return A `contents` struct containing the data, null mask, and children of
* the column.
*/
contents release() noexcept;
/**
* @brief Creates an immutable, non-owning view of the column's data and
* children.
*
* @return The immutable, non-owning view
*/
[[nodiscard]] column_view view() const;
/**
* @brief Implicit conversion operator to a `column_view`.
*
* This allows passing a `column` object directly into a function that
* requires a `column_view`. The conversion is automatic.
*
* @return Immutable, non-owning `column_view`
*/
operator column_view() const { return this->view(); };
/**
* @brief Creates a mutable, non-owning view of the column's data, null mask,
* and children
*
* @return The mutable, non-owning view
*/
mutable_column_view mutable_view();
/**
* @brief Implicit conversion operator to a `mutable_column_view`.
*
* This allows passing a `column` object into a function that accepts a
* `mutable_column_view`. The conversion is automatic.
*
* The caller is expected to update the null count appropriately if the null mask
* is modified.
*
* @return Mutable, non-owning `mutable_column_view`
*/
operator mutable_column_view() { return this->mutable_view(); };
private:
cudf::data_type _type{type_id::EMPTY}; ///< Logical type of elements in the column
cudf::size_type _size{}; ///< The number of elements in the column
rmm::device_buffer _data{}; ///< Dense, contiguous, type erased device memory
///< buffer containing the column elements
rmm::device_buffer _null_mask{}; ///< Bitmask used to represent null values.
///< May be empty if `null_count() == 0`
mutable cudf::size_type _null_count{}; ///< The number of null elements
std::vector<std::unique_ptr<column>> _children{}; ///< Depending on element type, child
///< columns may contain additional data
};
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/column/column_device_view.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/detail/offsets_iterator.cuh>
#include <cudf/detail/utilities/alignment.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/lists/list_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
#include <algorithm>
/**
* @file column_device_view.cuh
* @brief Column device view class definitions
*/
namespace cudf {
/**
* @brief Indicates the presence of nulls at compile-time or runtime.
*
* If used at compile-time, this indicator can tell the optimizer
* to include or exclude any null-checking clauses.
*
*/
struct nullate {
struct YES : std::bool_constant<true> {};
struct NO : std::bool_constant<false> {};
/**
* @brief `nullate::DYNAMIC` defers the determination of nullability to run time rather than
* compile time. The calling code is responsible for specifying whether or not nulls are
* present using the constructor parameter at run time.
*/
struct DYNAMIC {
DYNAMIC() = delete;
/**
* @brief Create a runtime nullate object.
*
* @see cudf::column_device_view::optional_begin for example usage
*
* @param b True if nulls are expected in the operation in which this
* object is applied.
*/
constexpr explicit DYNAMIC(bool b) noexcept : value{b} {}
/**
* @brief Returns true if nulls are expected in the operation in which this object is applied.
*
* @return `true` if nulls are expected in the operation in which this object is applied,
* otherwise false
*/
constexpr operator bool() const noexcept { return value; }
bool value; ///< True if nulls are expected
};
};
namespace detail {
/**
* @brief An immutable, non-owning view of device data as a column of elements
* that is trivially copyable and usable in CUDA device code.
*
* column_device_view_base and derived classes do not support has_nulls() or
* null_count(). The primary reason for this is that creation of column_device_views
* from column_views that have UNKNOWN null counts would require an on-the-spot, and
* not-obvious computation of null count, which could lead to undesirable performance issues.
* This information is also generally not needed in device code, and on the host-side
* is easily accessible from the associated column_view.
*/
class alignas(16) column_device_view_base {
public:
column_device_view_base() = delete;
~column_device_view_base() = default;
column_device_view_base(column_device_view_base const&) = default; ///< Copy constructor
column_device_view_base(column_device_view_base&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
column_device_view_base& operator=(column_device_view_base const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
column_device_view_base& operator=(column_device_view_base&&) = default;
/**
* @brief Returns pointer to the base device memory allocation casted to
* the specified type.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @note It should be rare to need to access the `head<T>()` allocation of
* a column, and instead, accessing the elements should be done via
*`data<T>()`.
*
* This function will only participate in overload resolution if `is_rep_layout_compatible<T>()`
* or `std::is_same_v<T,void>` are true.
*
* @tparam The type to cast to
* @return Typed pointer to underlying data
*/
template <typename T = void,
CUDF_ENABLE_IF(std::is_same_v<T, void> or is_rep_layout_compatible<T>())>
[[nodiscard]] CUDF_HOST_DEVICE T const* head() const noexcept
{
return static_cast<T const*>(_data);
}
/**
* @brief Returns the underlying data casted to the specified type, plus the
* offset.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* For columns with children, the pointer returned is undefined
* and should not be used.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @tparam T The type to cast to
* @return Typed pointer to underlying data, including the offset
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
[[nodiscard]] CUDF_HOST_DEVICE T const* data() const noexcept
{
return head<T>() + _offset;
}
/**
* @brief Returns the number of elements in the column.
*
* @return The number of elements in the column
*/
[[nodiscard]] CUDF_HOST_DEVICE size_type size() const noexcept { return _size; }
/**
* @brief Returns the element type
*
* @return The element type
*/
[[nodiscard]] CUDF_HOST_DEVICE data_type type() const noexcept { return _type; }
/**
* @brief Indicates whether the column can contain null elements, i.e., if it
*has an allocated bitmask.
*
* @note If `null_count() > 0`, this function must always return `true`.
*
* @return true The bitmask is allocated
* @return false The bitmask is not allocated
*/
[[nodiscard]] CUDF_HOST_DEVICE bool nullable() const noexcept { return nullptr != _null_mask; }
/**
* @brief Returns raw pointer to the underlying bitmask allocation.
*
* @note This function does *not* account for the `offset()`.
*
* @note If `null_count() == 0`, this may return `nullptr`.
*
* @return Raw pointer to the underlying bitmask allocation
*/
[[nodiscard]] CUDF_HOST_DEVICE bitmask_type const* null_mask() const noexcept
{
return _null_mask;
}
/**
* @brief Returns the index of the first element relative to the base memory
* allocation, i.e., what is returned from `head<T>()`.
*
* @return The index of the first element relative to the `head<T>()`
*/
[[nodiscard]] CUDF_HOST_DEVICE size_type offset() const noexcept { return _offset; }
/**
* @brief Returns whether the specified element holds a valid value (i.e., not
* null).
*
* Checks first for the existence of the null bitmask. If `nullable() ==
* false`, this function always returns true.
*
* @note If `nullable() == true` can be guaranteed, then it is more performant
* to use `is_valid_nocheck()`.
*
* @param element_index The index of the element to query
* @return true The element is valid
* @return false The element is null
*/
[[nodiscard]] __device__ bool is_valid(size_type element_index) const noexcept
{
return not nullable() or is_valid_nocheck(element_index);
}
/**
* @brief Returns whether the specified element holds a valid value (i.e., not
* null)
*
* This function does *not* verify the existence of the bitmask before
* attempting to read it. Therefore, it is undefined behavior to call this
* function if `nullable() == false`.
*
* @param element_index The index of the element to query
* @return true The element is valid
* @return false The element is null
*/
[[nodiscard]] __device__ bool is_valid_nocheck(size_type element_index) const noexcept
{
return bit_is_set(_null_mask, offset() + element_index);
}
/**
* @brief Returns whether the specified element is null.
*
* Checks first for the existence of the null bitmask. If `nullable() ==
* false`, this function always returns false.
*
* @note If `nullable() == true` can be guaranteed, then it is more performant
* to use `is_null_nocheck()`.
*
* @param element_index The index of the element to query
* @return true The element is null
* @return false The element is valid
*/
[[nodiscard]] __device__ bool is_null(size_type element_index) const noexcept
{
return not is_valid(element_index);
}
/**
* @brief Returns whether the specified element is null
*
* This function does *not* verify the existence of the bitmask before
* attempting to read it. Therefore, it is undefined behavior to call this
* function if `nullable() == false`.
*
* @param element_index The index of the element to query
* @return true The element is null
* @return false The element is valid
*/
[[nodiscard]] __device__ bool is_null_nocheck(size_type element_index) const noexcept
{
return not is_valid_nocheck(element_index);
}
/**
* @brief Returns the specified bitmask word from the `null_mask()`.
*
* @note It is undefined behavior to call this function if `nullable() ==
* false`.
*
* @param word_index The index of the word to get
* @return bitmask word for the given word_index
*/
[[nodiscard]] __device__ bitmask_type get_mask_word(size_type word_index) const noexcept
{
return null_mask()[word_index];
}
protected:
data_type _type{type_id::EMPTY}; ///< Element type
cudf::size_type _size{}; ///< Number of elements
void const* _data{}; ///< Pointer to device memory containing elements
bitmask_type const* _null_mask{}; ///< Pointer to device memory containing
///< bitmask representing null elements.
size_type _offset{}; ///< Index position of the first element.
///< Enables zero-copy slicing
/**
* @brief Constructs a column with the specified type, size, data, nullmask and offset.
*
* @param type The type of the column
* @param size The number of elements in the column
* @param data Pointer to device memory containing elements
* @param null_mask Pointer to device memory containing bitmask representing valid elements
* @param offset Index position of the first element
*/
CUDF_HOST_DEVICE column_device_view_base(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type offset)
: _type{type}, _size{size}, _data{data}, _null_mask{null_mask}, _offset{offset}
{
}
template <typename C, typename T, typename = void>
struct has_element_accessor_impl : std::false_type {};
template <typename C, typename T>
struct has_element_accessor_impl<
C,
T,
void_t<decltype(std::declval<C>().template element<T>(std::declval<size_type>()))>>
: std::true_type {};
};
// @cond
// Forward declaration
template <typename T>
struct value_accessor;
template <typename T, typename Nullate>
struct optional_accessor;
template <typename T, bool has_nulls>
struct pair_accessor;
template <typename T, bool has_nulls>
struct pair_rep_accessor;
template <typename T>
struct mutable_value_accessor;
// @endcond
} // namespace detail
/**
* @brief An immutable, non-owning view of device data as a column of elements
* that is trivially copyable and usable in CUDA device code.
*
* @ingroup column_classes
*/
class alignas(16) column_device_view : public detail::column_device_view_base {
public:
column_device_view() = delete;
~column_device_view() = default;
column_device_view(column_device_view const&) = default; ///< Copy constructor
column_device_view(column_device_view&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
column_device_view& operator=(column_device_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
column_device_view& operator=(column_device_view&&) = default;
/**
* @brief Creates an instance of this class using the specified host memory
* pointer (h_ptr) to store child objects and the device memory pointer
* (d_ptr) as a base for any child object pointers.
*
* @param column Column view from which to create this instance.
* @param h_ptr Host memory pointer on which to place any child data.
* @param d_ptr Device memory pointer on which to base any child pointers.
*/
column_device_view(column_view column, void* h_ptr, void* d_ptr);
/**
* @brief Get a new column_device_view which is a slice of this column.
*
* Example:
* @code{.cpp}
* // column = column_device_view([1, 2, 3, 4, 5, 6, 7])
* auto c = column.slice(1, 3);
* // c = column_device_view([2, 3, 4])
* auto c1 = column.slice(2, 3);
* // c1 = column_device_view([3, 4, 5])
* @endcode
*
* @param offset The index of the first element in the slice
* @param size The number of elements in the slice
* @return A slice of this column
*/
[[nodiscard]] CUDF_HOST_DEVICE column_device_view slice(size_type offset,
size_type size) const noexcept
{
return column_device_view{this->type(),
size,
this->head(),
this->null_mask(),
this->offset() + offset,
d_children,
this->num_child_columns()};
}
/**
* @brief Returns reference to element at the specified index.
*
* If the element at the specified index is NULL, i.e.,
* `is_null(element_index) == true`, then any attempt to use the result will
* lead to undefined behavior.
*
* This function accounts for the offset.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false. Specializations of this function may exist for types `T` where
*`is_rep_layout_compatible<T>` is false.
*
* @tparam T The element type
* @param element_index Position of the desired element
* @return reference to the element at the specified index
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
[[nodiscard]] __device__ T element(size_type element_index) const noexcept
{
return data<T>()[element_index];
}
/**
* @brief Returns `string_view` to the string element at the specified index.
*
* If the element at the specified index is NULL, i.e., `is_null(element_index)
* == true`, then any attempt to use the result will lead to undefined behavior.
*
* This function accounts for the offset.
*
* @param element_index Position of the desired string element
* @return string_view instance representing this element at this index
*/
template <typename T, CUDF_ENABLE_IF(std::is_same_v<T, string_view>)>
__device__ T element(size_type element_index) const noexcept
{
size_type index = element_index + offset(); // account for this view's _offset
char const* d_strings = d_children[strings_column_view::chars_column_index].data<char>();
auto const offsets = d_children[strings_column_view::offsets_column_index];
auto const itr = cudf::detail::input_offsetalator(offsets.head(), offsets.type());
auto const offset = itr[index];
return string_view{d_strings + offset, static_cast<cudf::size_type>(itr[index + 1] - offset)};
}
private:
/**
* @brief Dispatch functor for resolving the index value for a dictionary element.
*
* The basic dictionary elements are the indices which can be any index type.
*/
struct index_element_fn {
template <typename IndexType,
CUDF_ENABLE_IF(is_index_type<IndexType>() and std::is_unsigned_v<IndexType>)>
__device__ size_type operator()(column_device_view const& indices, size_type index)
{
return static_cast<size_type>(indices.element<IndexType>(index));
}
template <typename IndexType,
typename... Args,
CUDF_ENABLE_IF(not(is_index_type<IndexType>() and std::is_unsigned_v<IndexType>))>
__device__ size_type operator()(Args&&... args)
{
CUDF_UNREACHABLE("dictionary indices must be an unsigned integral type");
}
};
public:
/**
* @brief Returns `dictionary32` element at the specified index for a
* dictionary column.
*
* `dictionary32` is a strongly typed wrapper around an `int32_t` value that holds the
* offset into the dictionary keys for the specified element.
*
* For example, given a dictionary column `d` with:
* ```c++
* keys: {"foo", "bar", "baz"}
* indices: {2, 0, 2, 1, 0}
*
* d.element<dictionary32>(0) == dictionary32{2};
* d.element<dictionary32>(1) == dictionary32{0};
* ```
*
* If the element at the specified index is NULL, i.e., `is_null(element_index) == true`,
* then any attempt to use the result will lead to undefined behavior.
*
* This function accounts for the offset.
*
* @param element_index Position of the desired element
* @return dictionary32 instance representing this element at this index
*/
template <typename T, CUDF_ENABLE_IF(std::is_same_v<T, dictionary32>)>
__device__ T element(size_type element_index) const noexcept
{
size_type index = element_index + offset(); // account for this view's _offset
auto const indices = d_children[0];
return dictionary32{type_dispatcher(indices.type(), index_element_fn{}, indices, index)};
}
/**
* @brief Returns a `numeric::fixed_point` element at the specified index for a `fixed_point`
* column.
*
* If the element at the specified index is NULL, i.e., `is_null(element_index) == true`,
* then any attempt to use the result will lead to undefined behavior.
*
* @param element_index Position of the desired element
* @return numeric::fixed_point representing the element at this index
*/
template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_point<T>())>
__device__ T element(size_type element_index) const noexcept
{
using namespace numeric;
using rep = typename T::rep;
auto const scale = scale_type{_type.scale()};
return T{scaled_integer<rep>{data<rep>()[element_index], scale}};
}
/**
* @brief For a given `T`, indicates if `column_device_view::element<T>()` has a valid overload.
*
* @tparam T The element type
* @return `true` if `column_device_view::element<T>()` has a valid overload, `false` otherwise
*/
template <typename T>
static constexpr bool has_element_accessor()
{
return has_element_accessor_impl<column_device_view, T>::value;
}
/// Counting iterator
using count_it = thrust::counting_iterator<size_type>;
/**
* @brief Iterator for navigating this column
*/
template <typename T>
using const_iterator = thrust::transform_iterator<detail::value_accessor<T>, count_it>;
/**
* @brief Return an iterator to the first element of the column.
*
* This iterator only supports columns where `has_nulls() == false`. Using it
* with columns where `has_nulls() == true` will result in undefined behavior
* when accessing null elements.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* For columns with null elements, use `make_null_replacement_iterator`.
*
* @tparam T Type of the elements in the column
* @return An iterator to the first element of the column
*/
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_iterator<T> begin() const
{
return const_iterator<T>{count_it{0}, detail::value_accessor<T>{*this}};
}
/**
* @brief Returns an iterator to the element following the last element of the column.
*
* This iterator only supports columns where `has_nulls() == false`. Using it
* with columns where `has_nulls() == true` will result in undefined behavior
* when accessing null elements.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* For columns with null elements, use `make_null_replacement_iterator`.
*
* @return An iterator to the element following the last element of the column
*/
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_iterator<T> end() const
{
return const_iterator<T>{count_it{size()}, detail::value_accessor<T>{*this}};
}
/**
* @brief Optional iterator for navigating this column
*/
template <typename T, typename Nullate>
using const_optional_iterator =
thrust::transform_iterator<detail::optional_accessor<T, Nullate>, count_it>;
/**
* @brief Pair iterator for navigating this column
*/
template <typename T, bool has_nulls>
using const_pair_iterator =
thrust::transform_iterator<detail::pair_accessor<T, has_nulls>, count_it>;
/**
* @brief Pair rep iterator for navigating this column
*
* Each row value is accessed in its representative form.
*/
template <typename T, bool has_nulls>
using const_pair_rep_iterator =
thrust::transform_iterator<detail::pair_rep_accessor<T, has_nulls>, count_it>;
/**
* @brief Return an optional iterator to the first element of the column.
*
* Dereferencing the returned iterator returns a `thrust::optional<T>`.
*
* The element of this iterator contextually converts to bool. The conversion returns true
* if the object contains a value and false if it does not contain a value.
*
* Calling this method with `nullate::DYNAMIC` defers the assumption of nullability to
* runtime with the caller indicating if the column has nulls. The `nullate::DYNAMIC` is
* useful when an algorithm is going to execute on multiple iterators and all the combinations of
* iterator types are not required at compile time.
*
* @code{.cpp}
* template<typename T>
* void some_function(cudf::column_view<T> const& col_view){
* auto d_col = cudf::column_device_view::create(col_view);
* // Create a `DYNAMIC` optional iterator
* auto optional_iterator =
* d_col->optional_begin<T>(cudf::nullate::DYNAMIC{col_view.has_nulls()});
* }
* @endcode
*
* Calling this method with `nullate::YES` means that the column supports nulls and
* the optional returned might not contain a value.
*
* Calling this method with `nullate::NO` means that the column has no null values
* and the optional returned will always contain a value.
*
* @code{.cpp}
* template<typename T, bool has_nulls>
* void some_function(cudf::column_view<T> const& col_view){
* auto d_col = cudf::column_device_view::create(col_view);
* if constexpr(has_nulls) {
* auto optional_iterator = d_col->optional_begin<T>(cudf::nullate::YES{});
* //use optional_iterator
* } else {
* auto optional_iterator = d_col->optional_begin<T>(cudf::nullate::NO{});
* //use optional_iterator
* }
* }
* @endcode
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if the column is not nullable and `has_nulls` evaluates to true.
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam T The type of elements in the column
* @tparam Nullate A cudf::nullate type describing how to check for nulls
* @param has_nulls A cudf::nullate type describing how to check for nulls
* @return An optional iterator to the first element of the column
*/
template <typename T,
typename Nullate,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
auto optional_begin(Nullate has_nulls) const
{
return const_optional_iterator<T, Nullate>{
count_it{0}, detail::optional_accessor<T, Nullate>{*this, has_nulls}};
}
/**
* @brief Return a pair iterator to the first element of the column.
*
* Dereferencing the returned iterator returns a `thrust::pair<T, bool>`.
*
* If an element at position `i` is valid (or `has_nulls == false`), then
* for `p = *(iter + i)`, `p.first` contains the value of the element at `i`
* and `p.second == true`.
*
* Else, if the element at `i` is null, then the value of `p.first` is
* undefined and `p.second == false`.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if tparam `has_nulls == true` and
* `nullable() == false`
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @return A pair iterator to the first element of the column
*/
template <typename T,
bool has_nulls,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_pair_iterator<T, has_nulls> pair_begin() const
{
return const_pair_iterator<T, has_nulls>{count_it{0},
detail::pair_accessor<T, has_nulls>{*this}};
}
/**
* @brief Return a pair iterator to the first element of the column.
*
* Dereferencing the returned iterator returns a `thrust::pair<rep_type, bool>`,
* where `rep_type` is `device_storage_type<T>`, the type used to store
* the value on the device.
*
* If an element at position `i` is valid (or `has_nulls == false`), then
* for `p = *(iter + i)`, `p.first` contains the value of the element at `i`
* and `p.second == true`.
*
* Else, if the element at `i` is null, then the value of `p.first` is
* undefined and `p.second == false`.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if tparam `has_nulls == true` and
* `nullable() == false`
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @return A pair iterator to the first element of the column
*/
template <typename T,
bool has_nulls,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_pair_rep_iterator<T, has_nulls> pair_rep_begin() const
{
return const_pair_rep_iterator<T, has_nulls>{count_it{0},
detail::pair_rep_accessor<T, has_nulls>{*this}};
}
/**
* @brief Return an optional iterator to the element following the last element of the column.
*
* The returned iterator represents a `thrust::optional<T>` element.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if the column is not nullable and `has_nulls` is true
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @tparam T The type of elements in the column
* @tparam Nullate A cudf::nullate type describing how to check for nulls
* @param has_nulls A cudf::nullate type describing how to check for nulls
* @return An optional iterator to the element following the last element of the column
*/
template <typename T,
typename Nullate,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
auto optional_end(Nullate has_nulls) const
{
return const_optional_iterator<T, Nullate>{
count_it{size()}, detail::optional_accessor<T, Nullate>{*this, has_nulls}};
}
/**
* @brief Return a pair iterator to the element following the last element of the column.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if tparam `has_nulls == true` and
* `nullable() == false`
* @throws cudf::logic_error if column datatype and Element type mismatch.
* @return A pair iterator to the element following the last element of the column
*/
template <typename T,
bool has_nulls,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_pair_iterator<T, has_nulls> pair_end() const
{
return const_pair_iterator<T, has_nulls>{count_it{size()},
detail::pair_accessor<T, has_nulls>{*this}};
}
/**
* @brief Return a pair iterator to the element following the last element of the column.
*
* This function does not participate in overload resolution if
* `column_device_view::has_element_accessor<T>()` is false.
*
* @throws cudf::logic_error if tparam `has_nulls == true` and
* `nullable() == false`
* @throws cudf::logic_error if column datatype and Element type mismatch.
*
* @return A pair iterator to the element following the last element of the column
*/
template <typename T,
bool has_nulls,
CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
[[nodiscard]] const_pair_rep_iterator<T, has_nulls> pair_rep_end() const
{
return const_pair_rep_iterator<T, has_nulls>{count_it{size()},
detail::pair_rep_accessor<T, has_nulls>{*this}};
}
/**
* @brief Factory to construct a column view that is usable in device memory.
*
* Allocates and copies views of `source_view`'s children to device memory to
* make them accessible in device code.
*
* If `source_view.num_children() == 0`, then no device memory is allocated.
*
* Returns a `std::unique_ptr<column_device_view>` with a custom deleter to
* free the device memory allocated for the children.
*
* A `column_device_view` should be passed by value into GPU kernels.
*
* @param source_view The `column_view` to make usable in device code
* @param stream CUDA stream used for device memory operations for children columns.
* @return A `unique_ptr` to a `column_device_view` that makes the data from
*`source_view` available in device memory.
*/
static std::unique_ptr<column_device_view, std::function<void(column_device_view*)>> create(
column_view source_view, rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Destroy the `column_device_view` object.
*
* @note Does not free the column data, simply frees the device memory
* allocated to hold the child views.
*/
void destroy();
/**
* @brief Return the size in bytes of the amount of memory needed to hold a
* device view of the specified column and it's children.
*
* @param source_view The `column_view` to use for this calculation.
* @return number of bytes to store device view in GPU memory
*/
static std::size_t extent(column_view const& source_view);
/**
* @brief Returns the specified child
*
* @param child_index The index of the desired child
* @return column_view The requested child `column_view`
*/
[[nodiscard]] __device__ column_device_view child(size_type child_index) const noexcept
{
return d_children[child_index];
}
/**
* @brief Returns a span containing the children of this column
*
* @return A span containing the children of this column
*/
[[nodiscard]] __device__ device_span<column_device_view const> children() const noexcept
{
return device_span<column_device_view const>(d_children, _num_children);
}
/**
* @brief Returns the number of child columns
*
* @return The number of child columns
*/
[[nodiscard]] CUDF_HOST_DEVICE size_type num_child_columns() const noexcept
{
return _num_children;
}
private:
/**
* @brief Creates an instance of this class using pre-existing device memory pointers to data,
* nullmask, and offset.
*
* @param type The type of the column
* @param size The number of elements in the column
* @param data Pointer to the device memory containing the data
* @param null_mask Pointer to the device memory containing the null bitmask
* @param offset The index of the first element in the column
* @param children Pointer to the device memory containing child data
* @param num_children The number of child columns
*/
CUDF_HOST_DEVICE column_device_view(data_type type,
size_type size,
void const* data,
bitmask_type const* null_mask,
size_type offset,
column_device_view* children,
size_type num_children)
: column_device_view_base(type, size, data, null_mask, offset),
d_children(children),
_num_children(num_children)
{
}
protected:
column_device_view* d_children{}; ///< Array of `column_device_view`
///< objects in device memory.
///< Based on element type, children
///< may contain additional data
size_type _num_children{}; ///< The number of child columns
/**
* @brief Construct's a `column_device_view` from a `column_view` populating
* all but the children.
*
* @note This constructor is for internal use only. To create a
*`column_device_view` from a `column_view`, the
*`column_device_view::create()` function should be used.
*
* @param source The `column_view` to use for this construction
*/
column_device_view(column_view source);
};
/**
* @brief A mutable, non-owning view of device data as a column of elements
* that is trivially copyable and usable in CUDA device code.
*
* @ingroup column_classes
*/
class alignas(16) mutable_column_device_view : public detail::column_device_view_base {
public:
mutable_column_device_view() = delete;
~mutable_column_device_view() = default;
mutable_column_device_view(mutable_column_device_view const&) = default; ///< Copy constructor
mutable_column_device_view(mutable_column_device_view&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
mutable_column_device_view& operator=(mutable_column_device_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
mutable_column_device_view& operator=(mutable_column_device_view&&) = default;
/**
* @brief Creates an instance of this class using the specified host memory
* pointer (h_ptr) to store child objects and the device memory pointer
* (d_ptr) as a base for any child object pointers.
*
* @param column Column view from which to create this instance.
* @param h_ptr Host memory pointer on which to place any child data.
* @param d_ptr Device memory pointer on which to base any child pointers.
*/
mutable_column_device_view(mutable_column_view column, void* h_ptr, void* d_ptr);
/**
* @brief Factory to construct a column view that is usable in device memory.
*
* Allocates and copies views of `source_view`'s children to device memory to
* make them accessible in device code.
*
* If `source_view.num_children() == 0`, then no device memory is allocated.
*
* Returns a `std::unique_ptr<mutable_column_device_view>` with a custom
* deleter to free the device memory allocated for the children.
*
* A `mutable_column_device_view` should be passed by value into GPU kernels.
*
* @param source_view The `column_view` to make usable in device code
* @param stream CUDA stream used for device memory operations for children columns.
* @return A `unique_ptr` to a `mutable_column_device_view` that makes the
* data from `source_view` available in device memory.
*/
static std::unique_ptr<mutable_column_device_view,
std::function<void(mutable_column_device_view*)>>
create(mutable_column_view source_view,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Returns pointer to the base device memory allocation casted to
* the specified type.
*
* This function will only participate in overload resolution if `is_rep_layout_compatible<T>()`
* or `std::is_same_v<T,void>` are true.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @note It should be rare to need to access the `head<T>()` allocation of
* a column, and instead, accessing the elements should be done via
* `data<T>()`.
*
* @tparam The type to cast to
* @return Typed pointer to underlying data
*/
template <typename T = void,
CUDF_ENABLE_IF(std::is_same_v<T, void> or is_rep_layout_compatible<T>())>
CUDF_HOST_DEVICE T* head() const noexcept
{
return const_cast<T*>(detail::column_device_view_base::head<T>());
}
/**
* @brief Returns the underlying data casted to the specified type, plus the
* offset.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false.
*
* @note If `offset() == 0`, then `head<T>() == data<T>()`
*
* @tparam T The type to cast to
* @return Typed pointer to underlying data, including the offset
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
CUDF_HOST_DEVICE T* data() const noexcept
{
return const_cast<T*>(detail::column_device_view_base::data<T>());
}
/**
* @brief Returns reference to element at the specified index.
*
* This function accounts for the offset.
*
* This function does not participate in overload resolution if `is_rep_layout_compatible<T>` is
* false. Specializations of this function may exist for types `T` where
*`is_rep_layout_compatible<T>` is false.
*
*
* @tparam T The element type
* @param element_index Position of the desired element
* @return Reference to the element at the specified index
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
__device__ T& element(size_type element_index) const noexcept
{
return data<T>()[element_index];
}
/**
* @brief For a given `T`, indicates if `mutable_column_device_view::element<T>()` has a valid
* overload.
*
* @return `true` if `mutable_column_device_view::element<T>()` has a valid overload, `false`
*/
template <typename T>
static constexpr bool has_element_accessor()
{
return has_element_accessor_impl<mutable_column_device_view, T>::value;
}
/**
* @brief Returns raw pointer to the underlying bitmask allocation.
*
* @note This function does *not* account for the `offset()`.
*
* @note If `null_count() == 0`, this may return `nullptr`.
* @return Raw pointer to the underlying bitmask allocation
*/
[[nodiscard]] CUDF_HOST_DEVICE bitmask_type* null_mask() const noexcept
{
return const_cast<bitmask_type*>(detail::column_device_view_base::null_mask());
}
/// Counting iterator
using count_it = thrust::counting_iterator<size_type>;
/**
* @brief Iterator for navigating this column
*/
template <typename T>
using iterator = thrust::transform_iterator<detail::mutable_value_accessor<T>, count_it>;
/**
* @brief Return first element (accounting for offset) after underlying data
* is casted to the specified type.
*
* This function does not participate in overload resolution if
* `mutable_column_device_view::has_element_accessor<T>()` is false.
*
* @tparam T The desired type
* @return Pointer to the first element after casting
*/
template <typename T, CUDF_ENABLE_IF(mutable_column_device_view::has_element_accessor<T>())>
iterator<T> begin()
{
return iterator<T>{count_it{0}, detail::mutable_value_accessor<T>{*this}};
}
/**
* @brief Return one past the last element after underlying data is casted to
* the specified type.
*
* This function does not participate in overload resolution if
* `mutable_column_device_view::has_element_accessor<T>()` is false.
*
* @tparam T The desired type
* @return Pointer to one past the last element after casting
*/
template <typename T, CUDF_ENABLE_IF(mutable_column_device_view::has_element_accessor<T>())>
iterator<T> end()
{
return iterator<T>{count_it{size()}, detail::mutable_value_accessor<T>{*this}};
}
/**
* @brief Returns the specified child
*
* @param child_index The index of the desired child
* @return The requested child `column_view`
*/
[[nodiscard]] __device__ mutable_column_device_view child(size_type child_index) const noexcept
{
return d_children[child_index];
}
#ifdef __CUDACC__ // because set_bit in bit.hpp is wrapped with __CUDACC__
/**
* @brief Updates the null mask to indicate that the specified element is
* valid
*
* @note This operation requires a global atomic operation. Therefore, it is
* not recommended to use this function in performance critical regions. When
* possible, it is more efficient to compute and update an entire word at
* once using `set_mask_word`.
*
* @note It is undefined behavior to call this function if `nullable() ==
* false`.
*
* @param element_index The index of the element to update
*/
__device__ void set_valid(size_type element_index) const noexcept
{
return set_bit(null_mask(), element_index);
}
/**
* @brief Updates the null mask to indicate that the specified element is null
*
* @note This operation requires a global atomic operation. Therefore, it is
* not recommended to use this function in performance critical regions. When
* possible, it is more efficient to compute and update an entire word at
* once using `set_mask_word`.
*
* @note It is undefined behavior to call this function if `nullable() ==
* false`.
*
* @param element_index The index of the element to update
*/
__device__ void set_null(size_type element_index) const noexcept
{
return clear_bit(null_mask(), element_index);
}
#endif
/**
* @brief Updates the specified bitmask word in the `null_mask()` with a
* new word.
*
* @note It is undefined behavior to call this function if `nullable() ==
* false`.
*
* @param word_index The index of the word to update
* @param new_word The new bitmask word
*/
__device__ void set_mask_word(size_type word_index, bitmask_type new_word) const noexcept
{
null_mask()[word_index] = new_word;
}
/**
* @brief Return the size in bytes of the amount of memory needed to hold a
* device view of the specified column and it's children.
*
* @param source_view The `column_view` to use for this calculation.
* @return The size in bytes of the amount of memory needed to hold a
* device view of the specified column and it's children
*/
static std::size_t extent(mutable_column_view source_view);
/**
* @brief Destroy the `mutable_column_device_view` object.
*
* @note Does not free the column data, simply frees the device memory
* allocated to hold the child views.
*/
void destroy();
private:
mutable_column_device_view* d_children{}; ///< Array of `mutable_column_device_view`
///< objects in device memory.
///< Based on element type, children
///< may contain additional data
size_type _num_children{}; ///< The number of child columns
/**
* @brief Construct's a `mutable_column_device_view` from a
*`mutable_column_view` populating all but the children.
*
* @note This constructor is for internal use only. To create a
*`mutable_column_device_view` from a `column_view`, the
*`mutable_column_device_view::create()` function should be used.
*/
mutable_column_device_view(mutable_column_view source);
};
namespace detail {
#ifdef __CUDACC__ // because set_bit in bit.hpp is wrapped with __CUDACC__
/**
* @brief Convenience function to get offset word from a bitmask
*
* @see copy_offset_bitmask
* @see offset_bitmask_binop
*/
__device__ inline bitmask_type get_mask_offset_word(bitmask_type const* __restrict__ source,
size_type destination_word_index,
size_type source_begin_bit,
size_type source_end_bit)
{
size_type source_word_index = destination_word_index + word_index(source_begin_bit);
bitmask_type curr_word = source[source_word_index];
bitmask_type next_word = 0;
if (word_index(source_end_bit - 1) >
word_index(source_begin_bit +
destination_word_index * detail::size_in_bits<bitmask_type>())) {
next_word = source[source_word_index + 1];
}
return __funnelshift_r(curr_word, next_word, source_begin_bit);
}
#endif
/**
* @brief value accessor of column without null bitmask
*
* A unary functor returns scalar value at `id`.
* `operator() (cudf::size_type id)` computes `element`
* This functor is only allowed for non-nullable columns.
*
* the return value for element `i` will return `column[i]`
*
* @throws cudf::logic_error if the column is nullable.
* @throws cudf::logic_error if column datatype and template T type mismatch.
*
* @tparam T The type of elements in the column
*/
template <typename T>
struct value_accessor {
column_device_view const col; ///< column view of column in device
/**
* @brief constructor
*
* @param[in] _col column device view of cudf column
*/
value_accessor(column_device_view const& _col) : col{_col}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(col.type().id()), "the data type mismatch");
}
/**
* @brief Returns the value of element at index `i`
* @param[in] i index of element
* @return value of element at index `i`
*/
__device__ T operator()(cudf::size_type i) const { return col.element<T>(i); }
};
/**
* @brief optional accessor of a column
*
*
* The optional_accessor always returns a `thrust::optional` of `column[i]`. The validity
* of the optional is determined by the `Nullate` parameter which may be one of the following:
*
* - `nullate::YES` means that the column supports nulls and the optional returned
* might be valid or invalid.
*
* - `nullate::NO` means the caller attests that the column has no null values,
* no checks will occur and `thrust::optional{column[i]}` will be
* return for each `i`.
*
* - `nullate::DYNAMIC` defers the assumption of nullability to runtime and the caller
* specifies if the column has nulls at runtime.
* For `DYNAMIC{true}` the return value will be `thrust::optional{column[i]}` if
* element `i` is not null and `thrust::optional{}` if element `i` is null.
* For `DYNAMIC{false}` the return value will always be `thrust::optional{column[i]}`.
*
* @throws cudf::logic_error if column datatype and template T type mismatch.
* @throws cudf::logic_error if the column is not nullable and `with_nulls` evaluates to true
*
* @tparam T The type of elements in the column
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <typename T, typename Nullate>
struct optional_accessor {
column_device_view const col; ///< column view of column in device
/**
* @brief Constructor
*
* @param _col Column on which to iterator over its elements.
* @param with_nulls Indicates if the `col` should be checked for nulls.
*/
optional_accessor(column_device_view const& _col, Nullate with_nulls)
: col{_col}, has_nulls{with_nulls}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(col.type().id()), "the data type mismatch");
if (with_nulls) { CUDF_EXPECTS(_col.nullable(), "Unexpected non-nullable column."); }
}
/**
* @brief Returns a `thrust::optional` of `column[i]`.
*
* @param i The index of the element to return
* @return A `thrust::optional` that contains the value of `column[i]` is not null. If that
* element is null, the resulting optional will not contain a value.
*/
__device__ inline thrust::optional<T> operator()(cudf::size_type i) const
{
if (has_nulls) {
return (col.is_valid_nocheck(i)) ? thrust::optional<T>{col.element<T>(i)}
: thrust::optional<T>{thrust::nullopt};
}
return thrust::optional<T>{col.element<T>(i)};
}
Nullate has_nulls{}; ///< Indicates if the `col` should be checked for nulls.
};
/**
* @brief pair accessor of column with/without null bitmask
*
* A unary functor returns pair with scalar value at `id` and boolean validity
* `operator() (cudf::size_type id)` computes `element` and
* returns a `pair(element, validity)`
*
* the return value for element `i` will return `pair(column[i], validity)`
* `validity` is `true` if `has_nulls=false`.
* `validity` is validity of the element at `i` if `has_nulls=true` and the
* column is nullable.
*
* @throws cudf::logic_error if `has_nulls==true` and the column is not
* nullable.
* @throws cudf::logic_error if column datatype and template T type mismatch.
*
* @tparam T The type of elements in the column
* @tparam has_nulls boolean indicating to treat the column is nullable
*/
template <typename T, bool has_nulls = false>
struct pair_accessor {
column_device_view const col; ///< column view of column in device
/**
* @brief constructor
*
* @param[in] _col column device view of cudf column
*/
pair_accessor(column_device_view const& _col) : col{_col}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(col.type().id()), "the data type mismatch");
if (has_nulls) { CUDF_EXPECTS(_col.nullable(), "Unexpected non-nullable column."); }
}
/**
* @brief Pair accessor
*
* @param[in] i index of the element
* @return pair(element, validity)
*/
__device__ inline thrust::pair<T, bool> operator()(cudf::size_type i) const
{
return {col.element<T>(i), (has_nulls ? col.is_valid_nocheck(i) : true)};
}
};
/**
* @brief pair accessor of column with/without null bitmask
*
* A unary functor returns pair with representative scalar value at `id` and boolean validity
* `operator() (cudf::size_type id)` computes `element` and
* returns a `pair(element, validity)`
*
* the return value for element `i` will return `pair(column[i], validity)`
* `validity` is `true` if `has_nulls=false`.
* `validity` is validity of the element at `i` if `has_nulls=true` and the
* column is nullable.
*
* @throws cudf::logic_error if `has_nulls==true` and the column is not
* nullable.
* @throws cudf::logic_error if column datatype and template T type mismatch.
*
* @tparam T The type of elements in the column
* @tparam has_nulls boolean indicating to treat the column is nullable
*/
template <typename T, bool has_nulls = false>
struct pair_rep_accessor {
column_device_view const col; ///< column view of column in device
using rep_type = device_storage_type_t<T>; ///< representation type
/**
* @brief constructor
*
* @param[in] _col column device view of cudf column
*/
pair_rep_accessor(column_device_view const& _col) : col{_col}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(col.type().id()), "the data type mismatch");
if (has_nulls) { CUDF_EXPECTS(_col.nullable(), "Unexpected non-nullable column."); }
}
/**
* @brief Pair accessor
*
* @param[in] i index of element to access
* @return pair of element and validity
*/
__device__ inline thrust::pair<rep_type, bool> operator()(cudf::size_type i) const
{
return {get_rep<T>(i), (has_nulls ? col.is_valid_nocheck(i) : true)};
}
private:
template <typename R, std::enable_if_t<std::is_same_v<R, rep_type>, void>* = nullptr>
__device__ inline auto get_rep(cudf::size_type i) const
{
return col.element<R>(i);
}
template <typename R, std::enable_if_t<not std::is_same_v<R, rep_type>, void>* = nullptr>
__device__ inline auto get_rep(cudf::size_type i) const
{
return col.element<R>(i).value();
}
};
/**
* @brief Mutable value accessor of column without null bitmask
*
* A unary functor that accepts an index and returns a reference to the element at that index in the
* column.
*
* @throws cudf::logic_error if the column is nullable
* @throws cudf::logic_error if column datatype and template T type mismatch
*
* @tparam T The type of elements in the column
*/
template <typename T>
struct mutable_value_accessor {
mutable_column_device_view col; ///< mutable column view of column in device
/**
* @brief Constructor
*
* @param[in] _col mutable column device view of cudf column
*/
mutable_value_accessor(mutable_column_device_view& _col) : col{_col}
{
CUDF_EXPECTS(type_id_matches_device_storage_type<T>(col.type().id()), "the data type mismatch");
}
/**
* @brief Accessor
*
* @param i index of element to access
* @return reference to element at `i`
*/
__device__ T& operator()(cudf::size_type i) { return col.element<T>(i); }
};
/**
* @brief Helper function for use by column_device_view and mutable_column_device_view
* constructors to build device_views from views.
*
* It is used to build the array of child columns in device memory. Since child columns can
* also have child columns, this uses recursion to build up the flat device buffer to contain
* all the children and set the member pointers appropriately.
*
* This is accomplished by laying out all the children and grand-children into a flat host
* buffer first but also keep a running device pointer to use when setting the
* d_children array result.
*
* This function is provided both the host pointer in which to insert its children (and
* by recursion its grand-children) and the device pointer to be used when calculating
* ultimate device pointer for the d_children member.
*
* @tparam ColumnView is either column_view or mutable_column_view
* @tparam ColumnDeviceView is either column_device_view or mutable_column_device_view
*
* @param child_begin Iterator pointing to begin of child columns to make into a device view
* @param child_end Iterator pointing to end of child columns to make into a device view
* @param h_ptr The host memory where to place any child data
* @param d_ptr The device pointer for calculating the d_children member of any child data
* @return The device pointer to be used for the d_children member of the given column
*/
template <typename ColumnDeviceView, typename ColumnViewIterator>
ColumnDeviceView* child_columns_to_device_array(ColumnViewIterator child_begin,
ColumnViewIterator child_end,
void* h_ptr,
void* d_ptr)
{
ColumnDeviceView* d_children = detail::align_ptr_for_type<ColumnDeviceView>(d_ptr);
auto num_children = std::distance(child_begin, child_end);
if (num_children > 0) {
// The beginning of the memory must be the fixed-sized ColumnDeviceView
// struct objects in order for d_children to be used as an array.
auto h_column = detail::align_ptr_for_type<ColumnDeviceView>(h_ptr);
auto d_column = d_children;
// Any child data is assigned past the end of this array: h_end and d_end.
auto h_end = reinterpret_cast<int8_t*>(h_column + num_children);
auto d_end = reinterpret_cast<int8_t*>(d_column + num_children);
std::for_each(child_begin, child_end, [&](auto const& col) {
// inplace-new each child into host memory
new (h_column) ColumnDeviceView(col, h_end, d_end);
h_column++; // advance to next child
// update the pointers for holding this child column's child data
auto col_child_data_size = ColumnDeviceView::extent(col) - sizeof(ColumnDeviceView);
h_end += col_child_data_size;
d_end += col_child_data_size;
});
}
return d_children;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/structs/struct_view.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @file
* @brief Class definition for cudf::struct_view.
*/
namespace cudf {
/**
* @brief A non-owning, immutable view of device data that represents
* a struct with fields of arbitrary types (including primitives, lists,
* and other structs)
*/
class struct_view {};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/structs/structs_column_view.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
/**
* @file
* @brief Class definition for cudf::structs_column_view.
*/
namespace cudf {
/**
* @addtogroup structs_classes
* @{
*/
/**
* @brief Given a column view of struct type, an instance of this class
* provides a wrapper on this compound column for struct operations.
*/
class structs_column_view : public column_view {
public:
// Foundation members:
structs_column_view(structs_column_view const&) = default; ///< Copy constructor
structs_column_view(structs_column_view&&) = default; ///< Move constructor
~structs_column_view() = default;
/**
* @brief Copy assignment operator
*
* @return The reference to this structs column
*/
structs_column_view& operator=(structs_column_view const&) = default;
/**
* @brief Move assignment operator
*
* @return The reference to this structs column
*/
structs_column_view& operator=(structs_column_view&&) = default;
/**
* @brief Construct a new structs column view object from a column view.
*
* @param col The column view to wrap
*/
explicit structs_column_view(column_view const& col);
/**
* @brief Returns the parent column.
*
* @return The parent column
*/
[[nodiscard]] column_view parent() const;
using column_view::child_begin;
using column_view::child_end;
using column_view::has_nulls;
using column_view::null_count;
using column_view::null_mask;
using column_view::num_children;
using column_view::offset;
using column_view::size;
/**
* @brief Returns the internal child column, applying any offset from the root.
*
* Slice/split offset values are only stored at the root level of a struct column.
* So when doing computations on them, we need to apply that offset to
* the child columns when recursing. Most functions operating in a recursive manner
* on struct columns should be using `get_sliced_child()` instead of `child()`.
*
* @throw cudf::logic error if this is an empty column
*
* @param index The index of the child column to return
* @param stream The stream on which to perform the operation. Uses the default CUDF
* stream if none is specified.
* @return The child column sliced relative to the parent's offset and size
*/
[[nodiscard]] column_view get_sliced_child(
int index, rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
}; // class structs_column_view;
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/structs/structs_column_device_view.cuh
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/types.hpp>
namespace cudf {
namespace detail {
/**
* @brief Given a column_device_view, an instance of this class provides a
* wrapper on this compound column for struct operations.
* Analogous to struct_column_view.
*/
class structs_column_device_view : private column_device_view {
public:
structs_column_device_view() = delete;
~structs_column_device_view() = default;
structs_column_device_view(structs_column_device_view const&) = default; ///< Copy constructor
structs_column_device_view(structs_column_device_view&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return The reference to this structs column
*/
structs_column_device_view& operator=(structs_column_device_view const&) = default;
/**
* @brief Move assignment operator
*
* @return The reference to this structs column
*/
structs_column_device_view& operator=(structs_column_device_view&&) = default;
/**
* @brief Construct a new structs column device view object from a column device view.
*
* @param underlying_ The column device view to wrap
*/
CUDF_HOST_DEVICE structs_column_device_view(column_device_view const& underlying_)
: column_device_view(underlying_)
{
#ifdef __CUDA_ARCH__
cudf_assert(underlying_.type().id() == type_id::STRUCT and
"structs_column_device_view only supports structs");
#else
CUDF_EXPECTS(underlying_.type().id() == type_id::STRUCT,
"structs_column_device_view only supports structs");
#endif
}
using column_device_view::child;
using column_device_view::is_null;
using column_device_view::nullable;
using column_device_view::offset;
using column_device_view::size;
/**
* @brief Fetches the child column of the underlying struct column.
*
* @param idx The index of the child column to fetch
* @return The child column sliced relative to the parent's offset and size
*/
[[nodiscard]] __device__ inline column_device_view get_sliced_child(size_type idx) const
{
return child(idx).slice(offset(), size());
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/structs
|
rapidsai_public_repos/cudf/cpp/include/cudf/structs/detail/concatenate.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/span.hpp>
namespace cudf {
namespace structs {
namespace detail {
/**
* @brief Returns a single column by concatenating the given vector of structs columns.
*
* @code{.pseudo}
* s1 = [ col0 : {0, 1}
* col1 : {2, 3, 4, 5, 6}
* col2 : {"abc", "def", "ghi"} ]
*
* s2 = [ col0 : {7, 8}
* col1 : {-4, -5, -6}
* col2 : {"uvw", "xyz"} ]
*
* r = concatenate({s1, s2})
*
* r is now [ col0: {0, 1, 7, 8}
* col1: {2, 3, 4, 5, 6, -4, -5, -6}
* col2: {"abc", "def", "ghi", "uvw", "xyz"} ]
* @endcode
*
* @param columns Vector of structs columns to concatenate.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column with concatenated results.
*/
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace structs
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/structs
|
rapidsai_public_repos/cudf/cpp/include/cudf/structs/detail/scan.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace structs {
namespace detail {
/**
* @brief Scan function for struct column type
*
* Called by cudf::scan() with only min and max aggregates.
*
* @tparam Op Either DeviceMin or DeviceMax operations
*
* @param input Input column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New struct column
*/
template <typename Op>
std::unique_ptr<column> scan_inclusive(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace detail
} // namespace structs
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/ast/expressions.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cstdint>
namespace cudf {
namespace ast {
// Forward declaration.
namespace detail {
class expression_parser;
class expression_transformer;
} // namespace detail
/**
* @brief A generic expression that can be evaluated to return a value.
*
* This class is a part of a "visitor" pattern with the `expression_parser` class.
* Expressions inheriting from this class can accept parsers as visitors.
*/
struct expression {
/**
* @brief Accepts a visitor class.
*
* @param visitor The `expression_parser` parsing this expression tree
* @return Index of device data reference for this instance
*/
virtual cudf::size_type accept(detail::expression_parser& visitor) const = 0;
/**
* @brief Accepts a visitor class.
*
* @param visitor The `expression_transformer` transforming this expression tree
* @return Reference wrapper of transformed expression
*/
virtual std::reference_wrapper<expression const> accept(
detail::expression_transformer& visitor) const = 0;
/**
* @brief Returns true if the expression may evaluate to null.
*
* @param left The left operand of the expression (The same is used as right operand)
* @param stream CUDA stream used for device memory operations and kernel launches
* @return `true` if the expression may evaluate to null, otherwise false
*/
[[nodiscard]] bool may_evaluate_null(table_view const& left, rmm::cuda_stream_view stream) const
{
return may_evaluate_null(left, left, stream);
}
/**
* @brief Returns true if the expression may evaluate to null.
*
* @param left The left operand of the expression
* @param right The right operand of the expression
* @param stream CUDA stream used for device memory operations and kernel launches
* @return `true` if the expression may evaluate to null, otherwise false
*/
[[nodiscard]] virtual bool may_evaluate_null(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream) const = 0;
virtual ~expression() {}
};
/**
* @brief Enum of supported operators.
*/
enum class ast_operator : int32_t {
// Binary operators
ADD, ///< operator +
SUB, ///< operator -
MUL, ///< operator *
DIV, ///< operator / using common type of lhs and rhs
TRUE_DIV, ///< operator / after promoting type to floating point
FLOOR_DIV, ///< operator / after promoting to 64 bit floating point and then
///< flooring the result
MOD, ///< operator %
PYMOD, ///< operator % using Python's sign rules for negatives
POW, ///< lhs ^ rhs
EQUAL, ///< operator ==
NULL_EQUAL, ///< operator == with Spark rules: NULL_EQUAL(null, null) is true, NULL_EQUAL(null,
///< valid) is false, and
///< NULL_EQUAL(valid, valid) == EQUAL(valid, valid)
NOT_EQUAL, ///< operator !=
LESS, ///< operator <
GREATER, ///< operator >
LESS_EQUAL, ///< operator <=
GREATER_EQUAL, ///< operator >=
BITWISE_AND, ///< operator &
BITWISE_OR, ///< operator |
BITWISE_XOR, ///< operator ^
LOGICAL_AND, ///< operator &&
NULL_LOGICAL_AND, ///< operator && with Spark rules: NULL_LOGICAL_AND(null, null) is null,
///< NULL_LOGICAL_AND(null, true) is
///< null, NULL_LOGICAL_AND(null, false) is false, and NULL_LOGICAL_AND(valid,
///< valid) == LOGICAL_AND(valid, valid)
LOGICAL_OR, ///< operator ||
NULL_LOGICAL_OR, ///< operator || with Spark rules: NULL_LOGICAL_OR(null, null) is null,
///< NULL_LOGICAL_OR(null, true) is true,
///< NULL_LOGICAL_OR(null, false) is null, and NULL_LOGICAL_OR(valid, valid) ==
///< LOGICAL_OR(valid, valid)
// Unary operators
IDENTITY, ///< Identity function
IS_NULL, ///< Check if operand is null
SIN, ///< Trigonometric sine
COS, ///< Trigonometric cosine
TAN, ///< Trigonometric tangent
ARCSIN, ///< Trigonometric sine inverse
ARCCOS, ///< Trigonometric cosine inverse
ARCTAN, ///< Trigonometric tangent inverse
SINH, ///< Hyperbolic sine
COSH, ///< Hyperbolic cosine
TANH, ///< Hyperbolic tangent
ARCSINH, ///< Hyperbolic sine inverse
ARCCOSH, ///< Hyperbolic cosine inverse
ARCTANH, ///< Hyperbolic tangent inverse
EXP, ///< Exponential (base e, Euler number)
LOG, ///< Natural Logarithm (base e)
SQRT, ///< Square-root (x^0.5)
CBRT, ///< Cube-root (x^(1.0/3))
CEIL, ///< Smallest integer value not less than arg
FLOOR, ///< largest integer value not greater than arg
ABS, ///< Absolute value
RINT, ///< Rounds the floating-point argument arg to an integer value
BIT_INVERT, ///< Bitwise Not (~)
NOT, ///< Logical Not (!)
CAST_TO_INT64, ///< Cast value to int64_t
CAST_TO_UINT64, ///< Cast value to uint64_t
CAST_TO_FLOAT64 ///< Cast value to double
};
/**
* @brief Enum of table references.
*
* This determines which table to use in cases with two tables (e.g. joins).
*/
enum class table_reference {
LEFT, ///< Column index in the left table
RIGHT, ///< Column index in the right table
OUTPUT ///< Column index in the output table
};
/**
* @brief A type-erased scalar_device_view where the value is a fixed width type or a string
*/
class generic_scalar_device_view : public cudf::detail::scalar_device_view_base {
public:
/**
* @brief Returns the stored value.
*
* @tparam T The desired type
* @returns The stored value
*/
template <typename T>
__device__ T const value() const noexcept
{
if constexpr (std::is_same_v<T, cudf::string_view>) {
return string_view(static_cast<char const*>(_data), _size);
}
return *static_cast<T const*>(_data);
}
/** @brief Construct a new generic scalar device view object from a numeric scalar
*
* @param s The numeric scalar to construct from
*/
template <typename T>
generic_scalar_device_view(numeric_scalar<T>& s)
: generic_scalar_device_view(s.type(), s.data(), s.validity_data())
{
}
/** @brief Construct a new generic scalar device view object from a timestamp scalar
*
* @param s The timestamp scalar to construct from
*/
template <typename T>
generic_scalar_device_view(timestamp_scalar<T>& s)
: generic_scalar_device_view(s.type(), s.data(), s.validity_data())
{
}
/** @brief Construct a new generic scalar device view object from a duration scalar
*
* @param s The duration scalar to construct from
*/
template <typename T>
generic_scalar_device_view(duration_scalar<T>& s)
: generic_scalar_device_view(s.type(), s.data(), s.validity_data())
{
}
/** @brief Construct a new generic scalar device view object from a string scalar
*
* @param s The string scalar to construct from
*/
generic_scalar_device_view(string_scalar& s)
: generic_scalar_device_view(s.type(), s.data(), s.validity_data(), s.size())
{
}
protected:
void const* _data{}; ///< Pointer to device memory containing the value
size_type const _size{}; ///< Size of the string in bytes for string scalar
/**
* @brief Construct a new fixed width scalar device view object
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
*/
generic_scalar_device_view(data_type type, void const* data, bool* is_valid)
: cudf::detail::scalar_device_view_base(type, is_valid), _data(data)
{
}
/** @brief Construct a new string scalar device view object
*
* @param type The data type of the value
* @param data The pointer to the data in device memory
* @param is_valid The pointer to the bool in device memory that indicates the
* validity of the stored value
* @param size The size of the string in bytes
*/
generic_scalar_device_view(data_type type, void const* data, bool* is_valid, size_type size)
: cudf::detail::scalar_device_view_base(type, is_valid), _data(data), _size(size)
{
}
};
/**
* @brief A literal value used in an abstract syntax tree.
*/
class literal : public expression {
public:
/**
* @brief Construct a new literal object.
*
* @tparam T Numeric scalar template type
* @param value A numeric scalar value
*/
template <typename T>
literal(cudf::numeric_scalar<T>& value) : scalar(value), value(value)
{
}
/**
* @brief Construct a new literal object.
*
* @tparam T Timestamp scalar template type
* @param value A timestamp scalar value
*/
template <typename T>
literal(cudf::timestamp_scalar<T>& value) : scalar(value), value(value)
{
}
/**
* @brief Construct a new literal object.
*
* @tparam T Duration scalar template type
* @param value A duration scalar value
*/
template <typename T>
literal(cudf::duration_scalar<T>& value) : scalar(value), value(value)
{
}
/**
* @brief Construct a new literal object.
*
* @param value A string scalar value
*/
literal(cudf::string_scalar& value) : scalar(value), value(value) {}
/**
* @brief Get the data type.
*
* @return The data type of the literal
*/
[[nodiscard]] cudf::data_type get_data_type() const { return get_value().type(); }
/**
* @brief Get the value object.
*
* @return The device scalar object
*/
[[nodiscard]] generic_scalar_device_view get_value() const { return value; }
/**
* @copydoc expression::accept
*/
cudf::size_type accept(detail::expression_parser& visitor) const override;
/**
* @copydoc expression::accept
*/
std::reference_wrapper<expression const> accept(
detail::expression_transformer& visitor) const override;
[[nodiscard]] bool may_evaluate_null(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream) const override
{
return !is_valid(stream);
}
/**
* @brief Check if the underlying scalar is valid.
*
* @param stream CUDA stream used for device memory operations and kernel launches
* @return true if the underlying scalar is valid
*/
[[nodiscard]] bool is_valid(rmm::cuda_stream_view stream) const
{
return scalar.is_valid(stream);
}
private:
cudf::scalar const& scalar;
generic_scalar_device_view const value;
};
/**
* @brief A expression referring to data from a column in a table.
*/
class column_reference : public expression {
public:
/**
* @brief Construct a new column reference object
*
* @param column_index Index of this column in the table (provided when the expression is
* evaluated).
* @param table_source Which table to use in cases with two tables (e.g. joins)
*/
column_reference(cudf::size_type column_index,
table_reference table_source = table_reference::LEFT)
: column_index(column_index), table_source(table_source)
{
}
/**
* @brief Get the column index.
*
* @return The column index of the column reference
*/
[[nodiscard]] cudf::size_type get_column_index() const { return column_index; }
/**
* @brief Get the table source.
*
* @return table_reference The reference to the table containing this column
*/
[[nodiscard]] table_reference get_table_source() const { return table_source; }
/**
* @brief Get the data type.
*
* @param table Table used to determine types
* @return The data type of the column
*/
[[nodiscard]] cudf::data_type get_data_type(table_view const& table) const
{
return table.column(get_column_index()).type();
}
/**
* @brief Get the data type.
*
* @param left_table Left table used to determine types
* @param right_table Right table used to determine types
* @return The data type of the column
*/
[[nodiscard]] cudf::data_type get_data_type(table_view const& left_table,
table_view const& right_table) const
{
auto const table = [&] {
if (get_table_source() == table_reference::LEFT) {
return left_table;
} else if (get_table_source() == table_reference::RIGHT) {
return right_table;
} else {
CUDF_FAIL("Column reference data type cannot be determined from unknown table.");
}
}();
return table.column(get_column_index()).type();
}
/**
* @copydoc expression::accept
*/
cudf::size_type accept(detail::expression_parser& visitor) const override;
/**
* @copydoc expression::accept
*/
std::reference_wrapper<expression const> accept(
detail::expression_transformer& visitor) const override;
[[nodiscard]] bool may_evaluate_null(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream) const override
{
return (table_source == table_reference::LEFT ? left : right).column(column_index).has_nulls();
}
private:
cudf::size_type column_index;
table_reference table_source;
};
/**
* @brief An operation expression holds an operator and zero or more operands.
*/
class operation : public expression {
public:
/**
* @brief Construct a new unary operation object.
*
* @param op Operator
* @param input Input expression (operand)
*/
operation(ast_operator op, expression const& input);
/**
* @brief Construct a new binary operation object.
*
* @param op Operator
* @param left Left input expression (left operand)
* @param right Right input expression (right operand)
*/
operation(ast_operator op, expression const& left, expression const& right);
// operation only stores references to expressions, so it does not accept r-value
// references: the calling code must own the expressions.
operation(ast_operator op, expression&& input) = delete;
operation(ast_operator op, expression&& left, expression&& right) = delete;
operation(ast_operator op, expression&& left, expression const& right) = delete;
operation(ast_operator op, expression const& left, expression&& right) = delete;
/**
* @brief Get the operator.
*
* @return The operator
*/
[[nodiscard]] ast_operator get_operator() const { return op; }
/**
* @brief Get the operands.
*
* @return Vector of operands
*/
std::vector<std::reference_wrapper<expression const>> get_operands() const { return operands; }
/**
* @copydoc expression::accept
*/
cudf::size_type accept(detail::expression_parser& visitor) const override;
/**
* @copydoc expression::accept
*/
std::reference_wrapper<expression const> accept(
detail::expression_transformer& visitor) const override;
[[nodiscard]] bool may_evaluate_null(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream) const override
{
return std::any_of(operands.cbegin(),
operands.cend(),
[&left, &right, &stream](std::reference_wrapper<expression const> subexpr) {
return subexpr.get().may_evaluate_null(left, right, stream);
});
};
private:
ast_operator const op;
std::vector<std::reference_wrapper<expression const>> const operands;
};
/**
* @brief A expression referring to data from a column in a table.
*/
class column_name_reference : public expression {
public:
/**
* @brief Construct a new column name reference object
*
* @param column_name Name of this column in the table metadata (provided when the expression is
* evaluated).
*/
column_name_reference(std::string column_name) : column_name(std::move(column_name)) {}
/**
* @brief Get the column name.
*
* @return The name of this column reference
*/
[[nodiscard]] std::string get_column_name() const { return column_name; }
/**
* @copydoc expression::accept
*/
cudf::size_type accept(detail::expression_parser& visitor) const override;
/**
* @copydoc expression::accept
*/
std::reference_wrapper<expression const> accept(
detail::expression_transformer& visitor) const override;
[[nodiscard]] bool may_evaluate_null(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream) const override
{
return true;
}
private:
std::string column_name;
};
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/ast
|
rapidsai_public_repos/cudf/cpp/include/cudf/ast/detail/expression_parser.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/detail/operators.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <thrust/scan.h>
#include <functional>
#include <numeric>
#include <optional>
namespace cudf {
namespace ast {
namespace detail {
/**
* @brief Node data reference types.
*
* This enum is device-specific. For instance, intermediate data references are generated by the
* linearization process but cannot be explicitly created by the user.
*/
enum class device_data_reference_type {
COLUMN, ///< A value in a table column
LITERAL, ///< A literal value
INTERMEDIATE ///< An internal temporary value
};
/**
* @brief A device data reference describes a source of data used by a expression.
*
* This is a POD class used to create references describing data type and locations for consumption
* by the `row_evaluator`.
*/
struct alignas(8) device_data_reference {
device_data_reference(device_data_reference_type reference_type,
cudf::data_type data_type,
cudf::size_type data_index,
table_reference table_source);
device_data_reference(device_data_reference_type reference_type,
cudf::data_type data_type,
cudf::size_type data_index);
device_data_reference_type const reference_type; // Source of data
cudf::data_type const data_type; // Type of data
cudf::size_type const data_index; // The column index of a table, index of a
// literal, or index of an intermediate
table_reference const table_source;
bool operator==(device_data_reference const& rhs) const
{
return std::tie(data_index, data_type, reference_type, table_source) ==
std::tie(rhs.data_index, rhs.data_type, rhs.reference_type, rhs.table_source);
}
};
// Type used for intermediate storage in expression evaluation.
template <bool has_nulls>
using IntermediateDataType = possibly_null_value_t<std::int64_t, has_nulls>;
/**
* @brief A container of all device data required to evaluate an expression on tables.
*
* This struct should never be instantiated directly. It is created by the
* `expression_parser` on construction, and the resulting member is publicly accessible
* for passing to kernels for constructing an `expression_evaluator`.
*
*/
struct expression_device_view {
device_span<detail::device_data_reference const> data_references;
device_span<generic_scalar_device_view const> literals;
device_span<ast_operator const> operators;
device_span<cudf::size_type const> operator_source_indices;
cudf::size_type num_intermediates;
};
/**
* @brief The expression_parser traverses an expression and converts it into a form suitable for
* execution on the device.
*
* This class is part of a "visitor" pattern with the `expression` class.
*
* This class does pre-processing work on the host, validating operators and operand data types. It
* traverses downward from a root expression in a depth-first fashion, capturing information about
* the expressions and constructing vectors of information that are later used by the device for
* evaluating the abstract syntax tree as a "linear" list of operators whose input dependencies are
* resolved into intermediate data storage in shared memory.
*/
class expression_parser {
public:
/**
* @brief Construct a new expression_parser object
*
* @param expr The expression to create an evaluable expression_parser for.
* @param left The left table used for evaluating the abstract syntax tree.
* @param right The right table used for evaluating the abstract syntax tree.
*/
expression_parser(expression const& expr,
cudf::table_view const& left,
std::optional<std::reference_wrapper<cudf::table_view const>> right,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _left{left},
_right{right},
_expression_count{0},
_intermediate_counter{},
_has_nulls(has_nulls)
{
expr.accept(*this);
move_to_device(stream, mr);
}
/**
* @brief Construct a new expression_parser object
*
* @param expr The expression to create an evaluable expression_parser for.
* @param table The table used for evaluating the abstract syntax tree.
*/
expression_parser(expression const& expr,
cudf::table_view const& table,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: expression_parser(expr, table, {}, has_nulls, stream, mr)
{
}
/**
* @brief Get the root data type of the abstract syntax tree.
*
* @return cudf::data_type
*/
[[nodiscard]] cudf::data_type output_type() const;
/**
* @brief Visit a literal expression.
*
* @param expr Literal expression.
* @return cudf::size_type Index of device data reference for the expression.
*/
cudf::size_type visit(literal const& expr);
/**
* @brief Visit a column reference expression.
*
* @param expr Column reference expression.
* @return cudf::size_type Index of device data reference for the expression.
*/
cudf::size_type visit(column_reference const& expr);
/**
* @brief Visit an expression expression.
*
* @param expr Expression expression.
* @return cudf::size_type Index of device data reference for the expression.
*/
cudf::size_type visit(operation const& expr);
/**
* @brief Visit a column name reference expression.
*
* @param expr Column name reference expression.
* @return cudf::size_type Index of device data reference for the expression.
*/
cudf::size_type visit(column_name_reference const& expr);
/**
* @brief Internal class used to track the utilization of intermediate storage locations.
*
* As expressions are being evaluated, they may generate "intermediate" data that is immediately
* consumed. Rather than manifesting this data in global memory, we can store intermediates of any
* fixed width type (up to 8 bytes) by placing them in shared memory. This class helps to track
* the number and indices of intermediate data in shared memory using a give-take model. Locations
* in shared memory can be "taken" and used for storage, "given back," and then later re-used.
* This aims to minimize the maximum amount of shared memory needed at any point during the
* evaluation.
*
*/
class intermediate_counter {
public:
intermediate_counter() : used_values() {}
cudf::size_type take();
void give(cudf::size_type value);
[[nodiscard]] cudf::size_type get_max_used() const { return max_used; }
private:
/**
* @brief Find the first missing value in a contiguous sequence of integers.
*
* From a sorted container of integers, find the first "missing" value.
* For example, {0, 1, 2, 4, 5} is missing 3, and {1, 2, 3} is missing 0.
* If there are no missing values, return the size of the container.
*
* @return cudf::size_type Smallest value not already in the container.
*/
[[nodiscard]] cudf::size_type find_first_missing() const;
std::vector<cudf::size_type> used_values;
cudf::size_type max_used{0};
};
expression_device_view device_expression_data; ///< The collection of data required to evaluate
///< the expression on the device.
int shmem_per_thread;
private:
/**
* @brief Helper function for adding components (operators, literals, etc) to AST plan
*
* @tparam T The underlying type of the input `std::vector`
* @param[in] v The `std::vector` containing components (operators, literals, etc).
* @param[in,out] sizes The `std::vector` containing the size of each data buffer.
* @param[in,out] data_pointers The `std::vector` containing pointers to each data buffer.
*/
template <typename T>
void extract_size_and_pointer(std::vector<T> const& v,
std::vector<cudf::size_type>& sizes,
std::vector<void const*>& data_pointers)
{
auto const data_size = sizeof(T) * v.size();
sizes.push_back(data_size);
data_pointers.push_back(v.data());
}
void move_to_device(rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
std::vector<cudf::size_type> sizes;
std::vector<void const*> data_pointers;
extract_size_and_pointer(_data_references, sizes, data_pointers);
extract_size_and_pointer(_literals, sizes, data_pointers);
extract_size_and_pointer(_operators, sizes, data_pointers);
extract_size_and_pointer(_operator_source_indices, sizes, data_pointers);
// Create device buffer
auto const buffer_size = std::accumulate(sizes.cbegin(), sizes.cend(), 0);
auto buffer_offsets = std::vector<int>(sizes.size());
thrust::exclusive_scan(sizes.cbegin(), sizes.cend(), buffer_offsets.begin(), 0);
auto h_data_buffer = std::vector<char>(buffer_size);
for (unsigned int i = 0; i < data_pointers.size(); ++i) {
std::memcpy(h_data_buffer.data() + buffer_offsets[i], data_pointers[i], sizes[i]);
}
_device_data_buffer = rmm::device_buffer(h_data_buffer.data(), buffer_size, stream, mr);
stream.synchronize();
// Create device pointers to components of plan
auto device_data_buffer_ptr = static_cast<char const*>(_device_data_buffer.data());
device_expression_data.data_references = device_span<detail::device_data_reference const>(
reinterpret_cast<detail::device_data_reference const*>(device_data_buffer_ptr +
buffer_offsets[0]),
_data_references.size());
device_expression_data.literals = device_span<generic_scalar_device_view const>(
reinterpret_cast<generic_scalar_device_view const*>(device_data_buffer_ptr +
buffer_offsets[1]),
_literals.size());
device_expression_data.operators = device_span<ast_operator const>(
reinterpret_cast<ast_operator const*>(device_data_buffer_ptr + buffer_offsets[2]),
_operators.size());
device_expression_data.operator_source_indices = device_span<cudf::size_type const>(
reinterpret_cast<cudf::size_type const*>(device_data_buffer_ptr + buffer_offsets[3]),
_operator_source_indices.size());
device_expression_data.num_intermediates = _intermediate_counter.get_max_used();
shmem_per_thread = static_cast<int>(
(_has_nulls ? sizeof(IntermediateDataType<true>) : sizeof(IntermediateDataType<false>)) *
device_expression_data.num_intermediates);
}
/**
* @brief Helper function for recursive traversal of expressions.
*
* When parsing an expression composed of subexpressions, all subexpressions
* must be evaluated before an operator can be applied to them. This method
* performs that recursive traversal (in conjunction with the
* `expression_parser.visit` and `expression.accept` methods if necessary to
* descend deeper into an expression tree).
*
* @param operands The operands to visit.
*
* @return The indices of the operands stored in the data references.
*/
std::vector<cudf::size_type> visit_operands(
std::vector<std::reference_wrapper<expression const>> operands);
/**
* @brief Add a data reference to the internal list.
*
* @param data_ref The data reference to add.
*
* @return The index of the added data reference in the internal data references list.
*/
cudf::size_type add_data_reference(detail::device_data_reference data_ref);
rmm::device_buffer
_device_data_buffer; ///< The device-side data buffer containing the plan information, which is
///< owned by this class and persists until it is destroyed.
cudf::table_view const& _left;
std::optional<std::reference_wrapper<cudf::table_view const>> _right;
cudf::size_type _expression_count;
intermediate_counter _intermediate_counter;
bool _has_nulls;
std::vector<detail::device_data_reference> _data_references;
std::vector<ast_operator> _operators;
std::vector<cudf::size_type> _operator_source_indices;
std::vector<generic_scalar_device_view> _literals;
};
} // namespace detail
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/ast
|
rapidsai_public_repos/cudf/cpp/include/cudf/ast/detail/operators.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/expressions.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/optional.h>
#include <cuda/std/type_traits>
#include <cmath>
#include <type_traits>
#include <utility>
#include <vector>
namespace cudf {
namespace ast {
namespace detail {
// Type trait for wrapping nullable types in a thrust::optional. Non-nullable
// types are returned as is.
template <typename T, bool has_nulls>
struct possibly_null_value;
template <typename T>
struct possibly_null_value<T, true> {
using type = thrust::optional<T>;
};
template <typename T>
struct possibly_null_value<T, false> {
using type = T;
};
template <typename T, bool has_nulls>
using possibly_null_value_t = typename possibly_null_value<T, has_nulls>::type;
// Traits for valid operator / type combinations
template <typename Op, typename LHS, typename RHS>
constexpr bool is_valid_binary_op = cuda::std::is_invocable_v<Op, LHS, RHS>;
template <typename Op, typename T>
constexpr bool is_valid_unary_op = cuda::std::is_invocable_v<Op, T>;
/**
* @brief Operator dispatcher
*
* @tparam F Type of forwarded functor.
* @tparam Ts Parameter pack of forwarded arguments.
* @param f Forwarded functor to be called.
* @param args Forwarded arguments to `operator()` of `f`.
*/
template <typename F, typename... Ts>
CUDF_HOST_DEVICE inline constexpr void ast_operator_dispatcher(ast_operator op, F&& f, Ts&&... args)
{
switch (op) {
case ast_operator::ADD:
f.template operator()<ast_operator::ADD>(std::forward<Ts>(args)...);
break;
case ast_operator::SUB:
f.template operator()<ast_operator::SUB>(std::forward<Ts>(args)...);
break;
case ast_operator::MUL:
f.template operator()<ast_operator::MUL>(std::forward<Ts>(args)...);
break;
case ast_operator::DIV:
f.template operator()<ast_operator::DIV>(std::forward<Ts>(args)...);
break;
case ast_operator::TRUE_DIV:
f.template operator()<ast_operator::TRUE_DIV>(std::forward<Ts>(args)...);
break;
case ast_operator::FLOOR_DIV:
f.template operator()<ast_operator::FLOOR_DIV>(std::forward<Ts>(args)...);
break;
case ast_operator::MOD:
f.template operator()<ast_operator::MOD>(std::forward<Ts>(args)...);
break;
case ast_operator::PYMOD:
f.template operator()<ast_operator::PYMOD>(std::forward<Ts>(args)...);
break;
case ast_operator::POW:
f.template operator()<ast_operator::POW>(std::forward<Ts>(args)...);
break;
case ast_operator::EQUAL:
f.template operator()<ast_operator::EQUAL>(std::forward<Ts>(args)...);
break;
case ast_operator::NULL_EQUAL:
f.template operator()<ast_operator::NULL_EQUAL>(std::forward<Ts>(args)...);
break;
case ast_operator::NOT_EQUAL:
f.template operator()<ast_operator::NOT_EQUAL>(std::forward<Ts>(args)...);
break;
case ast_operator::LESS:
f.template operator()<ast_operator::LESS>(std::forward<Ts>(args)...);
break;
case ast_operator::GREATER:
f.template operator()<ast_operator::GREATER>(std::forward<Ts>(args)...);
break;
case ast_operator::LESS_EQUAL:
f.template operator()<ast_operator::LESS_EQUAL>(std::forward<Ts>(args)...);
break;
case ast_operator::GREATER_EQUAL:
f.template operator()<ast_operator::GREATER_EQUAL>(std::forward<Ts>(args)...);
break;
case ast_operator::BITWISE_AND:
f.template operator()<ast_operator::BITWISE_AND>(std::forward<Ts>(args)...);
break;
case ast_operator::BITWISE_OR:
f.template operator()<ast_operator::BITWISE_OR>(std::forward<Ts>(args)...);
break;
case ast_operator::BITWISE_XOR:
f.template operator()<ast_operator::BITWISE_XOR>(std::forward<Ts>(args)...);
break;
case ast_operator::LOGICAL_AND:
f.template operator()<ast_operator::LOGICAL_AND>(std::forward<Ts>(args)...);
break;
case ast_operator::NULL_LOGICAL_AND:
f.template operator()<ast_operator::NULL_LOGICAL_AND>(std::forward<Ts>(args)...);
break;
case ast_operator::LOGICAL_OR:
f.template operator()<ast_operator::LOGICAL_OR>(std::forward<Ts>(args)...);
break;
case ast_operator::NULL_LOGICAL_OR:
f.template operator()<ast_operator::NULL_LOGICAL_OR>(std::forward<Ts>(args)...);
break;
case ast_operator::IDENTITY:
f.template operator()<ast_operator::IDENTITY>(std::forward<Ts>(args)...);
break;
case ast_operator::IS_NULL:
f.template operator()<ast_operator::IS_NULL>(std::forward<Ts>(args)...);
break;
case ast_operator::SIN:
f.template operator()<ast_operator::SIN>(std::forward<Ts>(args)...);
break;
case ast_operator::COS:
f.template operator()<ast_operator::COS>(std::forward<Ts>(args)...);
break;
case ast_operator::TAN:
f.template operator()<ast_operator::TAN>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCSIN:
f.template operator()<ast_operator::ARCSIN>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCCOS:
f.template operator()<ast_operator::ARCCOS>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCTAN:
f.template operator()<ast_operator::ARCTAN>(std::forward<Ts>(args)...);
break;
case ast_operator::SINH:
f.template operator()<ast_operator::SINH>(std::forward<Ts>(args)...);
break;
case ast_operator::COSH:
f.template operator()<ast_operator::COSH>(std::forward<Ts>(args)...);
break;
case ast_operator::TANH:
f.template operator()<ast_operator::TANH>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCSINH:
f.template operator()<ast_operator::ARCSINH>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCCOSH:
f.template operator()<ast_operator::ARCCOSH>(std::forward<Ts>(args)...);
break;
case ast_operator::ARCTANH:
f.template operator()<ast_operator::ARCTANH>(std::forward<Ts>(args)...);
break;
case ast_operator::EXP:
f.template operator()<ast_operator::EXP>(std::forward<Ts>(args)...);
break;
case ast_operator::LOG:
f.template operator()<ast_operator::LOG>(std::forward<Ts>(args)...);
break;
case ast_operator::SQRT:
f.template operator()<ast_operator::SQRT>(std::forward<Ts>(args)...);
break;
case ast_operator::CBRT:
f.template operator()<ast_operator::CBRT>(std::forward<Ts>(args)...);
break;
case ast_operator::CEIL:
f.template operator()<ast_operator::CEIL>(std::forward<Ts>(args)...);
break;
case ast_operator::FLOOR:
f.template operator()<ast_operator::FLOOR>(std::forward<Ts>(args)...);
break;
case ast_operator::ABS:
f.template operator()<ast_operator::ABS>(std::forward<Ts>(args)...);
break;
case ast_operator::RINT:
f.template operator()<ast_operator::RINT>(std::forward<Ts>(args)...);
break;
case ast_operator::BIT_INVERT:
f.template operator()<ast_operator::BIT_INVERT>(std::forward<Ts>(args)...);
break;
case ast_operator::NOT:
f.template operator()<ast_operator::NOT>(std::forward<Ts>(args)...);
break;
case ast_operator::CAST_TO_INT64:
f.template operator()<ast_operator::CAST_TO_INT64>(std::forward<Ts>(args)...);
break;
case ast_operator::CAST_TO_UINT64:
f.template operator()<ast_operator::CAST_TO_UINT64>(std::forward<Ts>(args)...);
break;
case ast_operator::CAST_TO_FLOAT64:
f.template operator()<ast_operator::CAST_TO_FLOAT64>(std::forward<Ts>(args)...);
break;
default: {
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid operator.");
#else
CUDF_UNREACHABLE("Invalid operator.");
#endif
}
}
}
/**
* @brief Operator functor.
*
* This functor is templated on an `ast_operator`, with each template specialization defining a
* callable `operator()` that executes the operation. The functor specialization also has a member
* `arity` defining the number of operands that are accepted by the call to `operator()`. The
* `operator()` is templated on the types of its inputs (e.g. `typename LHS` and `typename RHS` for
* a binary operator). Trailing return types are defined as `decltype(result)` where `result` is
* the returned value. The trailing return types allow SFINAE to only consider template
* instantiations for valid combinations of types. This, in turn, allows the operator functors to be
* used with traits like `is_valid_binary_op` that rely on `std::is_invocable` and related features.
*
* @tparam op AST operator.
*/
template <ast_operator op, bool has_nulls>
struct operator_functor {};
template <>
struct operator_functor<ast_operator::ADD, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs + rhs)
{
return lhs + rhs;
}
};
template <>
struct operator_functor<ast_operator::SUB, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs - rhs)
{
return lhs - rhs;
}
};
template <>
struct operator_functor<ast_operator::MUL, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs * rhs)
{
return lhs * rhs;
}
};
template <>
struct operator_functor<ast_operator::DIV, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs / rhs)
{
return lhs / rhs;
}
};
template <>
struct operator_functor<ast_operator::TRUE_DIV, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(static_cast<double>(lhs) / static_cast<double>(rhs))
{
return static_cast<double>(lhs) / static_cast<double>(rhs);
}
};
template <>
struct operator_functor<ast_operator::FLOOR_DIV, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(floor(static_cast<double>(lhs) / static_cast<double>(rhs)))
{
return floor(static_cast<double>(lhs) / static_cast<double>(rhs));
}
};
template <>
struct operator_functor<ast_operator::MOD, false> {
static constexpr auto arity{2};
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_integral_v<CommonType>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(static_cast<CommonType>(lhs) % static_cast<CommonType>(rhs))
{
return static_cast<CommonType>(lhs) % static_cast<CommonType>(rhs);
}
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_same_v<CommonType, float>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(fmodf(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)))
{
return fmodf(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs));
}
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_same_v<CommonType, double>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(fmod(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)))
{
return fmod(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs));
}
};
template <>
struct operator_functor<ast_operator::PYMOD, false> {
static constexpr auto arity{2};
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_integral_v<CommonType>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(((static_cast<CommonType>(lhs) % static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs)) %
static_cast<CommonType>(rhs))
{
return ((static_cast<CommonType>(lhs) % static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs)) %
static_cast<CommonType>(rhs);
}
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_same_v<CommonType, float>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(fmodf(fmodf(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs),
static_cast<CommonType>(rhs)))
{
return fmodf(fmodf(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs),
static_cast<CommonType>(rhs));
}
template <typename LHS,
typename RHS,
typename CommonType = std::common_type_t<LHS, RHS>,
std::enable_if_t<std::is_same_v<CommonType, double>>* = nullptr>
__device__ inline auto operator()(LHS lhs, RHS rhs)
-> decltype(fmod(fmod(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs),
static_cast<CommonType>(rhs)))
{
return fmod(fmod(static_cast<CommonType>(lhs), static_cast<CommonType>(rhs)) +
static_cast<CommonType>(rhs),
static_cast<CommonType>(rhs));
}
};
template <>
struct operator_functor<ast_operator::POW, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(std::pow(lhs, rhs))
{
return std::pow(lhs, rhs);
}
};
template <>
struct operator_functor<ast_operator::EQUAL, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs == rhs)
{
return lhs == rhs;
}
};
// Alias NULL_EQUAL = EQUAL in the non-nullable case.
template <>
struct operator_functor<ast_operator::NULL_EQUAL, false>
: public operator_functor<ast_operator::EQUAL, false> {};
template <>
struct operator_functor<ast_operator::NOT_EQUAL, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs != rhs)
{
return lhs != rhs;
}
};
template <>
struct operator_functor<ast_operator::LESS, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs < rhs)
{
return lhs < rhs;
}
};
template <>
struct operator_functor<ast_operator::GREATER, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs > rhs)
{
return lhs > rhs;
}
};
template <>
struct operator_functor<ast_operator::LESS_EQUAL, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs <= rhs)
{
return lhs <= rhs;
}
};
template <>
struct operator_functor<ast_operator::GREATER_EQUAL, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs >= rhs)
{
return lhs >= rhs;
}
};
template <>
struct operator_functor<ast_operator::BITWISE_AND, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs & rhs)
{
return lhs & rhs;
}
};
template <>
struct operator_functor<ast_operator::BITWISE_OR, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs | rhs)
{
return lhs | rhs;
}
};
template <>
struct operator_functor<ast_operator::BITWISE_XOR, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs ^ rhs)
{
return lhs ^ rhs;
}
};
template <>
struct operator_functor<ast_operator::LOGICAL_AND, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs && rhs)
{
return lhs && rhs;
}
};
// Alias NULL_LOGICAL_AND = LOGICAL_AND in the non-nullable case.
template <>
struct operator_functor<ast_operator::NULL_LOGICAL_AND, false>
: public operator_functor<ast_operator::LOGICAL_AND, false> {};
template <>
struct operator_functor<ast_operator::LOGICAL_OR, false> {
static constexpr auto arity{2};
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS lhs, RHS rhs) -> decltype(lhs || rhs)
{
return lhs || rhs;
}
};
// Alias NULL_LOGICAL_OR = LOGICAL_OR in the non-nullable case.
template <>
struct operator_functor<ast_operator::NULL_LOGICAL_OR, false>
: public operator_functor<ast_operator::LOGICAL_OR, false> {};
template <>
struct operator_functor<ast_operator::IDENTITY, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(input)
{
return input;
}
};
template <>
struct operator_functor<ast_operator::IS_NULL, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> bool
{
return false;
}
};
template <>
struct operator_functor<ast_operator::SIN, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::sin(input))
{
return std::sin(input);
}
};
template <>
struct operator_functor<ast_operator::COS, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::cos(input))
{
return std::cos(input);
}
};
template <>
struct operator_functor<ast_operator::TAN, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::tan(input))
{
return std::tan(input);
}
};
template <>
struct operator_functor<ast_operator::ARCSIN, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::asin(input))
{
return std::asin(input);
}
};
template <>
struct operator_functor<ast_operator::ARCCOS, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::acos(input))
{
return std::acos(input);
}
};
template <>
struct operator_functor<ast_operator::ARCTAN, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::atan(input))
{
return std::atan(input);
}
};
template <>
struct operator_functor<ast_operator::SINH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::sinh(input))
{
return std::sinh(input);
}
};
template <>
struct operator_functor<ast_operator::COSH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::cosh(input))
{
return std::cosh(input);
}
};
template <>
struct operator_functor<ast_operator::TANH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::tanh(input))
{
return std::tanh(input);
}
};
template <>
struct operator_functor<ast_operator::ARCSINH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::asinh(input))
{
return std::asinh(input);
}
};
template <>
struct operator_functor<ast_operator::ARCCOSH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::acosh(input))
{
return std::acosh(input);
}
};
template <>
struct operator_functor<ast_operator::ARCTANH, false> {
static constexpr auto arity{1};
template <typename InputT, std::enable_if_t<std::is_floating_point_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::atanh(input))
{
return std::atanh(input);
}
};
template <>
struct operator_functor<ast_operator::EXP, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::exp(input))
{
return std::exp(input);
}
};
template <>
struct operator_functor<ast_operator::LOG, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::log(input))
{
return std::log(input);
}
};
template <>
struct operator_functor<ast_operator::SQRT, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::sqrt(input))
{
return std::sqrt(input);
}
};
template <>
struct operator_functor<ast_operator::CBRT, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::cbrt(input))
{
return std::cbrt(input);
}
};
template <>
struct operator_functor<ast_operator::CEIL, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::ceil(input))
{
return std::ceil(input);
}
};
template <>
struct operator_functor<ast_operator::FLOOR, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::floor(input))
{
return std::floor(input);
}
};
template <>
struct operator_functor<ast_operator::ABS, false> {
static constexpr auto arity{1};
// Only accept signed or unsigned types (both require is_arithmetic<T> to be true)
template <typename InputT, std::enable_if_t<std::is_signed_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(std::abs(input))
{
return std::abs(input);
}
template <typename InputT, std::enable_if_t<std::is_unsigned_v<InputT>>* = nullptr>
__device__ inline auto operator()(InputT input) -> decltype(input)
{
return input;
}
};
template <>
struct operator_functor<ast_operator::RINT, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(std::rint(input))
{
return std::rint(input);
}
};
template <>
struct operator_functor<ast_operator::BIT_INVERT, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(~input)
{
return ~input;
}
};
template <>
struct operator_functor<ast_operator::NOT, false> {
static constexpr auto arity{1};
template <typename InputT>
__device__ inline auto operator()(InputT input) -> decltype(!input)
{
return !input;
}
};
template <typename To>
struct cast {
static constexpr auto arity{1};
template <typename From>
__device__ inline auto operator()(From f) -> decltype(static_cast<To>(f))
{
return static_cast<To>(f);
}
};
template <>
struct operator_functor<ast_operator::CAST_TO_INT64, false> : cast<int64_t> {};
template <>
struct operator_functor<ast_operator::CAST_TO_UINT64, false> : cast<uint64_t> {};
template <>
struct operator_functor<ast_operator::CAST_TO_FLOAT64, false> : cast<double> {};
/*
* The default specialization of nullable operators is to fall back to the non-nullable
* implementation
*/
template <ast_operator op>
struct operator_functor<op, true> {
using NonNullOperator = operator_functor<op, false>;
static constexpr auto arity = NonNullOperator::arity;
template <typename LHS,
typename RHS,
std::size_t arity_placeholder = arity,
std::enable_if_t<arity_placeholder == 2>* = nullptr>
__device__ inline auto operator()(LHS const lhs, RHS const rhs)
-> possibly_null_value_t<decltype(NonNullOperator{}(*lhs, *rhs)), true>
{
using Out = possibly_null_value_t<decltype(NonNullOperator{}(*lhs, *rhs)), true>;
return (lhs.has_value() && rhs.has_value()) ? Out{NonNullOperator{}(*lhs, *rhs)} : Out{};
}
template <typename Input,
std::size_t arity_placeholder = arity,
std::enable_if_t<arity_placeholder == 1>* = nullptr>
__device__ inline auto operator()(Input const input)
-> possibly_null_value_t<decltype(NonNullOperator{}(*input)), true>
{
using Out = possibly_null_value_t<decltype(NonNullOperator{}(*input)), true>;
return input.has_value() ? Out{NonNullOperator{}(*input)} : Out{};
}
};
// IS_NULL(null) is true, IS_NULL(valid) is false
template <>
struct operator_functor<ast_operator::IS_NULL, true> {
using NonNullOperator = operator_functor<ast_operator::IS_NULL, false>;
static constexpr auto arity = NonNullOperator::arity;
template <typename LHS>
__device__ inline auto operator()(LHS const lhs) -> decltype(!lhs.has_value())
{
return !lhs.has_value();
}
};
// NULL_EQUAL(null, null) is true, NULL_EQUAL(null, valid) is false, and NULL_EQUAL(valid, valid) ==
// EQUAL(valid, valid)
template <>
struct operator_functor<ast_operator::NULL_EQUAL, true> {
using NonNullOperator = operator_functor<ast_operator::NULL_EQUAL, false>;
static constexpr auto arity = NonNullOperator::arity;
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS const lhs, RHS const rhs)
-> possibly_null_value_t<decltype(NonNullOperator{}(*lhs, *rhs)), true>
{
// Case 1: Neither is null, so the output is given by the operation.
if (lhs.has_value() && rhs.has_value()) { return {NonNullOperator{}(*lhs, *rhs)}; }
// Case 2: Two nulls compare equal.
if (!lhs.has_value() && !rhs.has_value()) { return {true}; }
// Case 3: One value is null, while the other is not, so we return false.
return {false};
}
};
///< NULL_LOGICAL_AND(null, null) is null, NULL_LOGICAL_AND(null, true) is null,
///< NULL_LOGICAL_AND(null, false) is false, and NULL_LOGICAL_AND(valid, valid) ==
///< LOGICAL_AND(valid, valid)
template <>
struct operator_functor<ast_operator::NULL_LOGICAL_AND, true> {
using NonNullOperator = operator_functor<ast_operator::NULL_LOGICAL_AND, false>;
static constexpr auto arity = NonNullOperator::arity;
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS const lhs, RHS const rhs)
-> possibly_null_value_t<decltype(NonNullOperator{}(*lhs, *rhs)), true>
{
// Case 1: Neither is null, so the output is given by the operation.
if (lhs.has_value() && rhs.has_value()) { return {NonNullOperator{}(*lhs, *rhs)}; }
// Case 2: Two nulls return null.
if (!lhs.has_value() && !rhs.has_value()) { return {}; }
// Case 3: One value is null, while the other is not. If it's true we return null, otherwise we
// return false.
auto const& valid_element = lhs.has_value() ? lhs : rhs;
if (*valid_element) { return {}; }
return {false};
}
};
///< NULL_LOGICAL_OR(null, null) is null, NULL_LOGICAL_OR(null, true) is true, NULL_LOGICAL_OR(null,
///< false) is null, and NULL_LOGICAL_OR(valid, valid) == LOGICAL_OR(valid, valid)
template <>
struct operator_functor<ast_operator::NULL_LOGICAL_OR, true> {
using NonNullOperator = operator_functor<ast_operator::NULL_LOGICAL_OR, false>;
static constexpr auto arity = NonNullOperator::arity;
template <typename LHS, typename RHS>
__device__ inline auto operator()(LHS const lhs, RHS const rhs)
-> possibly_null_value_t<decltype(NonNullOperator{}(*lhs, *rhs)), true>
{
// Case 1: Neither is null, so the output is given by the operation.
if (lhs.has_value() && rhs.has_value()) { return {NonNullOperator{}(*lhs, *rhs)}; }
// Case 2: Two nulls return null.
if (!lhs.has_value() && !rhs.has_value()) { return {}; }
// Case 3: One value is null, while the other is not. If it's true we return true, otherwise we
// return null.
auto const& valid_element = lhs.has_value() ? lhs : rhs;
if (*valid_element) { return {true}; }
return {};
}
};
/**
* @brief Functor used to single-type-dispatch binary operators.
*
* This functor's `operator()` is templated to validate calls to its operators based on the input
* type, as determined by the `is_valid_binary_op` trait. This function assumes that both inputs are
* the same type, and dispatches based on the type of the left input.
*
* @tparam OperatorFunctor Binary operator functor.
*/
template <typename OperatorFunctor>
struct single_dispatch_binary_operator_types {
template <typename LHS,
typename F,
typename... Ts,
std::enable_if_t<is_valid_binary_op<OperatorFunctor, LHS, LHS>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(F&& f, Ts&&... args)
{
f.template operator()<OperatorFunctor, LHS, LHS>(std::forward<Ts>(args)...);
}
template <typename LHS,
typename F,
typename... Ts,
std::enable_if_t<!is_valid_binary_op<OperatorFunctor, LHS, LHS>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(F&& f, Ts&&... args)
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid binary operation.");
#else
CUDF_UNREACHABLE("Invalid binary operation.");
#endif
}
};
/**
* @brief Functor performing a type dispatch for a binary operator.
*
* This functor performs single dispatch, which assumes lhs_type == rhs_type. This may not be true
* for all binary operators but holds for all currently implemented operators.
*/
struct type_dispatch_binary_op {
/**
* @brief Performs type dispatch for a binary operator.
*
* @tparam op AST operator.
* @tparam F Type of forwarded functor.
* @tparam Ts Parameter pack of forwarded arguments.
* @param lhs_type Type of left input data.
* @param rhs_type Type of right input data.
* @param f Forwarded functor to be called.
* @param args Forwarded arguments to `operator()` of `f`.
*/
template <ast_operator op, typename F, typename... Ts>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type lhs_type,
cudf::data_type rhs_type,
F&& f,
Ts&&... args)
{
// Single dispatch (assume lhs_type == rhs_type)
type_dispatcher(
lhs_type,
// Always dispatch to the non-null operator for the purpose of type determination.
detail::single_dispatch_binary_operator_types<operator_functor<op, false>>{},
std::forward<F>(f),
std::forward<Ts>(args)...);
}
};
/**
* @brief Dispatches a runtime binary operator to a templated type dispatcher.
*
* @tparam F Type of forwarded functor.
* @tparam Ts Parameter pack of forwarded arguments.
* @param lhs_type Type of left input data.
* @param rhs_type Type of right input data.
* @param f Forwarded functor to be called.
* @param args Forwarded arguments to `operator()` of `f`.
*/
template <typename F, typename... Ts>
CUDF_HOST_DEVICE inline constexpr void binary_operator_dispatcher(
ast_operator op, cudf::data_type lhs_type, cudf::data_type rhs_type, F&& f, Ts&&... args)
{
ast_operator_dispatcher(op,
detail::type_dispatch_binary_op{},
lhs_type,
rhs_type,
std::forward<F>(f),
std::forward<Ts>(args)...);
}
/**
* @brief Functor used to type-dispatch unary operators.
*
* This functor's `operator()` is templated to validate calls to its operators based on the input
* type, as determined by the `is_valid_unary_op` trait.
*
* @tparam OperatorFunctor Unary operator functor.
*/
template <typename OperatorFunctor>
struct dispatch_unary_operator_types {
template <typename InputT,
typename F,
typename... Ts,
std::enable_if_t<is_valid_unary_op<OperatorFunctor, InputT>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(F&& f, Ts&&... args)
{
f.template operator()<OperatorFunctor, InputT>(std::forward<Ts>(args)...);
}
template <typename InputT,
typename F,
typename... Ts,
std::enable_if_t<!is_valid_unary_op<OperatorFunctor, InputT>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(F&& f, Ts&&... args)
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid unary operation.");
#else
CUDF_UNREACHABLE("Invalid unary operation.");
#endif
}
};
/**
* @brief Functor performing a type dispatch for a unary operator.
*/
struct type_dispatch_unary_op {
template <ast_operator op, typename F, typename... Ts>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type input_type, F&& f, Ts&&... args)
{
type_dispatcher(
input_type,
// Always dispatch to the non-null operator for the purpose of type determination.
detail::dispatch_unary_operator_types<operator_functor<op, false>>{},
std::forward<F>(f),
std::forward<Ts>(args)...);
}
};
/**
* @brief Dispatches a runtime unary operator to a templated type dispatcher.
*
* @tparam F Type of forwarded functor.
* @tparam Ts Parameter pack of forwarded arguments.
* @param input_type Type of input data.
* @param f Forwarded functor to be called.
* @param args Forwarded arguments to `operator()` of `f`.
*/
template <typename F, typename... Ts>
CUDF_HOST_DEVICE inline constexpr void unary_operator_dispatcher(ast_operator op,
cudf::data_type input_type,
F&& f,
Ts&&... args)
{
ast_operator_dispatcher(op,
detail::type_dispatch_unary_op{},
input_type,
std::forward<F>(f),
std::forward<Ts>(args)...);
}
/**
* @brief Functor to determine the return type of an operator from its input types.
*/
struct return_type_functor {
/**
* @brief Callable for binary operators to determine return type.
*
* @tparam OperatorFunctor Operator functor to perform.
* @tparam LHS Left input type.
* @tparam RHS Right input type.
* @param result Reference whose value is assigned to the result data type.
*/
template <typename OperatorFunctor,
typename LHS,
typename RHS,
std::enable_if_t<is_valid_binary_op<OperatorFunctor, LHS, RHS>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type& result)
{
using Out = cuda::std::invoke_result_t<OperatorFunctor, LHS, RHS>;
result = cudf::data_type(cudf::type_to_id<Out>());
}
template <typename OperatorFunctor,
typename LHS,
typename RHS,
std::enable_if_t<!is_valid_binary_op<OperatorFunctor, LHS, RHS>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type& result)
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid binary operation. Return type cannot be determined.");
#else
CUDF_UNREACHABLE("Invalid binary operation. Return type cannot be determined.");
#endif
}
/**
* @brief Callable for unary operators to determine return type.
*
* @tparam OperatorFunctor Operator functor to perform.
* @tparam T Input type.
* @param result Pointer whose value is assigned to the result data type.
*/
template <typename OperatorFunctor,
typename T,
std::enable_if_t<is_valid_unary_op<OperatorFunctor, T>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type& result)
{
using Out = cuda::std::invoke_result_t<OperatorFunctor, T>;
result = cudf::data_type(cudf::type_to_id<Out>());
}
template <typename OperatorFunctor,
typename T,
std::enable_if_t<!is_valid_unary_op<OperatorFunctor, T>>* = nullptr>
CUDF_HOST_DEVICE inline void operator()(cudf::data_type& result)
{
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid unary operation. Return type cannot be determined.");
#else
CUDF_UNREACHABLE("Invalid unary operation. Return type cannot be determined.");
#endif
}
};
/**
* @brief Gets the return type of an AST operator.
*
* @param op Operator used to evaluate return type.
* @param operand_types Vector of input types to the operator.
* @return cudf::data_type Return type of the operator.
*/
inline cudf::data_type ast_operator_return_type(ast_operator op,
std::vector<cudf::data_type> const& operand_types)
{
auto result = cudf::data_type(cudf::type_id::EMPTY);
switch (operand_types.size()) {
case 1:
unary_operator_dispatcher(op, operand_types[0], detail::return_type_functor{}, result);
break;
case 2:
binary_operator_dispatcher(
op, operand_types[0], operand_types[1], detail::return_type_functor{}, result);
break;
default: CUDF_FAIL("Unsupported operator return type."); break;
}
return result;
}
/**
* @brief Functor to determine the arity (number of operands) of an operator.
*/
struct arity_functor {
template <ast_operator op>
CUDF_HOST_DEVICE inline void operator()(cudf::size_type& result)
{
// Arity is not dependent on null handling, so just use the false implementation here.
result = operator_functor<op, false>::arity;
}
};
/**
* @brief Gets the arity (number of operands) of an AST operator.
*
* @param op Operator used to determine arity.
* @return Arity of the operator.
*/
CUDF_HOST_DEVICE inline cudf::size_type ast_operator_arity(ast_operator op)
{
auto result = cudf::size_type(0);
ast_operator_dispatcher(op, detail::arity_functor{}, result);
return result;
}
} // namespace detail
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/ast
|
rapidsai_public_repos/cudf/cpp/include/cudf/ast/detail/expression_transformer.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/expressions.hpp>
namespace cudf::ast::detail {
/**
* @brief Base "visitor" pattern class with the `expression` class for expression transformer.
*
* This class can be used to implement recursive traversal of AST tree, and used to validate or
* translate an AST expression.
*/
class expression_transformer {
public:
/**
* @brief Visit a literal expression.
*
* @param expr Literal expression
* @return Reference wrapper of transformed expression
*/
virtual std::reference_wrapper<expression const> visit(literal const& expr) = 0;
/**
* @brief Visit a column reference expression.
*
* @param expr Column reference expression
* @return Reference wrapper of transformed expression
*/
virtual std::reference_wrapper<expression const> visit(column_reference const& expr) = 0;
/**
* @brief Visit an expression expression
*
* @param expr Expression expression
* @return Reference wrapper of transformed expression
*/
virtual std::reference_wrapper<expression const> visit(operation const& expr) = 0;
/**
* @brief Visit a column name reference expression.
*
* @param expr Column name reference expression
* @return Reference wrapper of transformed expression
*/
virtual std::reference_wrapper<expression const> visit(column_name_reference const& expr) = 0;
virtual ~expression_transformer() {}
};
} // namespace cudf::ast::detail
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/ast
|
rapidsai_public_repos/cudf/cpp/include/cudf/ast/detail/expression_evaluator.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/detail/operators.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/optional.h>
namespace cudf {
namespace ast {
namespace detail {
/**
* @brief A container for capturing the output of an evaluated expression.
*
* This class is designed to be passed by reference as the first argument to
* expression_evaluator::evaluate. The API is designed such that template
* specializations for specific output types will be able to customize setting
* behavior if necessary. The class leverages CRTP to define a suitable interface
* for the `expression_evaluator` at compile-time and enforce this API on its
* subclasses to get around the lack of device-side polymorphism.
*
* @tparam Subclass The subclass to dispatch methods to.
* @tparam T The underlying data type.
* @tparam has_nulls Whether or not the result data is nullable.
*/
template <typename Subclass, typename T, bool has_nulls>
struct expression_result {
/**
* Helper function to get the subclass type to dispatch methods to.
*/
__device__ inline Subclass& subclass() { return static_cast<Subclass&>(*this); }
__device__ inline Subclass const& subclass() const { return static_cast<Subclass const&>(*this); }
// TODO: The index is ignored by the value subclass, but is included in this
// signature because it is required by the implementation in the template
// specialization for column views. It would be nice to clean this up, see
// the related TODO below. Note that storing the index in the class on
// construction (which would result in a cleaner delineation of the API for
// the derived types) results in a significant performance penalty because
// the index is pushed down the memory hierarchy by the time it needs to be
// used, whereas passing it as a parameter keeps it in registers for fast
// access at the point where indexing occurs.
template <typename Element>
__device__ inline void set_value(cudf::size_type index,
possibly_null_value_t<Element, has_nulls> const& result)
{
subclass().template set_value<Element>(index, result);
}
[[nodiscard]] __device__ inline bool is_valid() const { return subclass().is_valid(); }
__device__ inline T value() const { return subclass().value(); }
};
/**
* @brief A container for capturing the output of an evaluated expression in a scalar.
*
* This subclass of `expression_result` functions as an owning container of a
* (possibly nullable) scalar type that can be written to by the
* expression_evaluator. The data (and its validity) can then be accessed.
*
* @tparam T The underlying data type.
* @tparam has_nulls Whether or not the result data is nullable.
*/
template <typename T, bool has_nulls>
struct value_expression_result
: public expression_result<value_expression_result<T, has_nulls>, T, has_nulls> {
__device__ inline value_expression_result() {}
template <typename Element>
__device__ inline void set_value(cudf::size_type index,
possibly_null_value_t<Element, has_nulls> const& result)
{
if constexpr (std::is_same_v<Element, T>) {
_obj = result;
} else {
CUDF_UNREACHABLE("Output type does not match container type.");
}
}
/**
* @brief Returns true if the underlying data is valid and false otherwise.
*/
[[nodiscard]] __device__ inline bool is_valid() const
{
if constexpr (has_nulls) { return _obj.has_value(); }
return true;
}
/**
* @brief Returns the underlying data.
*
* If the underlying data is not valid, behavior is undefined. Callers should
* use is_valid to check for validity before accessing the value.
*/
__device__ inline T value() const
{
// Using two separate constexprs silences compiler warnings, whereas an
// if/else does not. An unconditional return is not ignored by the compiler
// when has_nulls is true and therefore raises a compiler error.
if constexpr (has_nulls) { return *_obj; }
if constexpr (!has_nulls) { return _obj; }
}
possibly_null_value_t<T, has_nulls>
_obj; ///< The underlying data value, or a nullable version of it.
};
// TODO: The below implementation significantly differs from the default
// implementation above due to the non-owning nature of the container and the
// usage of the index. It would be ideal to unify these further if possible.
/**
* @brief A container for capturing the output of an evaluated expression in a column.
*
* This subclass of `expression_result` functions as a non-owning container
* that transparently passes calls through to an underlying mutable view to a
* column. Not all methods are implemented
*
* @tparam has_nulls Whether or not the result data is nullable.
*/
template <bool has_nulls>
struct mutable_column_expression_result
: public expression_result<mutable_column_expression_result<has_nulls>,
mutable_column_device_view,
has_nulls> {
__device__ inline mutable_column_expression_result(mutable_column_device_view& obj) : _obj(obj) {}
template <typename Element>
__device__ inline void set_value(cudf::size_type index,
possibly_null_value_t<Element, has_nulls> const& result)
{
if constexpr (has_nulls) {
if (result.has_value()) {
_obj.template element<Element>(index) = *result;
_obj.set_valid(index);
} else {
_obj.set_null(index);
}
} else {
_obj.template element<Element>(index) = result;
}
}
/**
* @brief Not implemented for this specialization.
*/
[[nodiscard]] __device__ inline bool is_valid() const
{
// Not implemented since it would require modifying the API in the parent class to accept an
// index.
CUDF_UNREACHABLE("This method is not implemented.");
}
/**
* @brief Not implemented for this specialization.
*/
[[nodiscard]] __device__ inline mutable_column_device_view value() const
{
// Not implemented since it would require modifying the API in the parent class to accept an
// index.
CUDF_UNREACHABLE("This method is not implemented.");
}
mutable_column_device_view& _obj; ///< The column to which the data is written.
};
/**
* @brief Dispatch to a binary operator based on a single data type.
*
* This functor is a dispatcher for binary operations that assumes that both
* operands are of the same type. This assumption is encoded in the
* non-deducible template parameter LHS, the type of the left-hand operand,
* which is then used as the template parameter for both the left and right
* operands to the binary operator f.
*/
struct single_dispatch_binary_operator {
/**
* @brief Single-type dispatch to a binary operation.
*
* @tparam LHS Left input type.
* @tparam F Type of forwarded binary operator functor.
* @tparam Ts Parameter pack of forwarded arguments.
*
* @param f Binary operator functor.
* @param args Forwarded arguments to `operator()` of `f`.
*/
template <typename LHS, typename F, typename... Ts>
__device__ inline auto operator()(F&& f, Ts&&... args)
{
f.template operator()<LHS, LHS>(std::forward<Ts>(args)...);
}
};
/**
* @brief The principal object for evaluating AST expressions on device.
*
* This class is designed for n-ary transform evaluation. It operates on two
* tables.
*/
template <bool has_nulls>
struct expression_evaluator {
public:
/**
* @brief Construct an expression evaluator acting on two tables.
*
* @param left View of the left table view used for evaluation.
* @param right View of the right table view used for evaluation.
* @param plan The collection of device references representing the expression to evaluate.
* @param thread_intermediate_storage Pointer to this thread's portion of shared memory for
* storing intermediates.
*/
__device__ inline expression_evaluator(table_device_view const& left,
table_device_view const& right,
expression_device_view const& plan)
: left(left), right(right), plan(plan)
{
}
/**
* @brief Construct an expression evaluator acting on one table.
*
* @param table View of the table view used for evaluation.
* @param plan The collection of device references representing the expression to evaluate.
* @param thread_intermediate_storage Pointer to this thread's portion of shared memory for
* storing intermediates.
*/
__device__ inline expression_evaluator(table_device_view const& table,
expression_device_view const& plan)
: expression_evaluator(table, table, plan)
{
}
/**
* @brief Resolves an input data reference into a value.
*
* Only input columns (COLUMN), literal values (LITERAL), and intermediates (INTERMEDIATE) are
* supported as input data references. Intermediates must be of fixed width less than or equal to
* sizeof(std::int64_t). This requirement on intermediates is enforced by the linearizer.
*
* @tparam Element Type of element to return.
* @tparam has_nulls Whether or not the result data is nullable.
* @param device_data_reference Data reference to resolve.
* @param row_index Row index of data column.
* @return Element The type- and null-resolved data.
*/
template <typename Element, CUDF_ENABLE_IF(column_device_view::has_element_accessor<Element>())>
__device__ inline possibly_null_value_t<Element, has_nulls> resolve_input(
detail::device_data_reference const& input_reference,
IntermediateDataType<has_nulls>* thread_intermediate_storage,
cudf::size_type left_row_index,
thrust::optional<cudf::size_type> right_row_index = {}) const
{
// TODO: Everywhere in the code assumes that the table reference is either
// left or right. Should we error-check somewhere to prevent
// table_reference::OUTPUT from being specified?
using ReturnType = possibly_null_value_t<Element, has_nulls>;
if (input_reference.reference_type == detail::device_data_reference_type::COLUMN) {
// If we have nullable data, return an empty nullable type with no value if the data is null.
auto const& table = (input_reference.table_source == table_reference::LEFT) ? left : right;
// Note that the code below assumes that a right index has been passed in
// any case where input_reference.table_source == table_reference::RIGHT.
// Otherwise, behavior is undefined.
auto const row_index =
(input_reference.table_source == table_reference::LEFT) ? left_row_index : *right_row_index;
if constexpr (has_nulls) {
return table.column(input_reference.data_index).is_valid(row_index)
? ReturnType(table.column(input_reference.data_index).element<Element>(row_index))
: ReturnType();
} else {
return ReturnType(table.column(input_reference.data_index).element<Element>(row_index));
}
} else if (input_reference.reference_type == detail::device_data_reference_type::LITERAL) {
if constexpr (has_nulls) {
return plan.literals[input_reference.data_index].is_valid()
? ReturnType(plan.literals[input_reference.data_index].value<Element>())
: ReturnType();
} else {
return ReturnType(plan.literals[input_reference.data_index].value<Element>());
}
} else { // Assumes input_reference.reference_type ==
// detail::device_data_reference_type::INTERMEDIATE
// Using memcpy instead of reinterpret_cast<Element*> for safe type aliasing
// Using a temporary variable ensures that the compiler knows the result is aligned
IntermediateDataType<has_nulls> intermediate =
thread_intermediate_storage[input_reference.data_index];
ReturnType tmp;
memcpy(&tmp, &intermediate, sizeof(ReturnType));
return tmp;
}
// Unreachable return used to silence compiler warnings.
return {};
}
template <typename Element,
CUDF_ENABLE_IF(not column_device_view::has_element_accessor<Element>())>
__device__ inline possibly_null_value_t<Element, has_nulls> resolve_input(
detail::device_data_reference const& device_data_reference,
IntermediateDataType<has_nulls>* thread_intermediate_storage,
cudf::size_type left_row_index,
thrust::optional<cudf::size_type> right_row_index = {}) const
{
CUDF_UNREACHABLE("Unsupported type in resolve_input.");
}
/**
* @brief Callable to perform a unary operation.
*
* @tparam Input Type of input value.
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param input_row_index The row to pull the data from the input table.
* @param input Input data reference.
* @param output Output data reference.
* @param output_row_index The row in the output to insert the result.
* @param op The operator to act with.
*/
template <typename Input, typename ResultSubclass, typename T, bool result_has_nulls>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const input_row_index,
detail::device_data_reference const& input,
detail::device_data_reference const& output,
cudf::size_type const output_row_index,
ast_operator const op,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
auto const typed_input =
resolve_input<Input>(input, thread_intermediate_storage, input_row_index);
ast_operator_dispatcher(op,
unary_expression_output_handler<Input>{},
output_object,
output_row_index,
typed_input,
output,
thread_intermediate_storage);
}
/**
* @brief Callable to perform a binary operation.
*
* @tparam LHS Type of the left input value.
* @tparam RHS Type of the right input value.
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param left_row_index The row to pull the data from the left table.
* @param right_row_index The row to pull the data from the right table.
* @param lhs Left input data reference.
* @param rhs Right input data reference.
* @param output Output data reference.
* @param output_row_index The row in the output to insert the result.
* @param op The operator to act with.
*/
template <typename LHS, typename RHS, typename ResultSubclass, typename T, bool result_has_nulls>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const left_row_index,
cudf::size_type const right_row_index,
detail::device_data_reference const& lhs,
detail::device_data_reference const& rhs,
detail::device_data_reference const& output,
cudf::size_type const output_row_index,
ast_operator const op,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
auto const typed_lhs =
resolve_input<LHS>(lhs, thread_intermediate_storage, left_row_index, right_row_index);
auto const typed_rhs =
resolve_input<RHS>(rhs, thread_intermediate_storage, left_row_index, right_row_index);
ast_operator_dispatcher(op,
binary_expression_output_handler<LHS, RHS>{},
output_object,
output_row_index,
typed_lhs,
typed_rhs,
output,
thread_intermediate_storage);
}
/**
* @brief Evaluate an expression applied to a row.
*
* This function performs an n-ary transform for one row on one thread.
*
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param row_index Row index of all input and output data column(s).
*/
template <typename ResultSubclass, typename T, bool result_has_nulls>
__device__ __forceinline__ void evaluate(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const row_index,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
evaluate(output_object, row_index, row_index, row_index, thread_intermediate_storage);
}
/**
* @brief Evaluate an expression applied to a row.
*
* This function performs an n-ary transform for one row on one thread.
*
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param left_row_index The row to pull the data from the left table.
* @param right_row_index The row to pull the data from the right table.
* @param output_row_index The row in the output to insert the result.
*/
template <typename ResultSubclass, typename T, bool result_has_nulls>
__device__ __forceinline__ void evaluate(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const left_row_index,
cudf::size_type const right_row_index,
cudf::size_type const output_row_index,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
cudf::size_type operator_source_index{0};
for (cudf::size_type operator_index = 0; operator_index < plan.operators.size();
++operator_index) {
// Execute operator
auto const op = plan.operators[operator_index];
auto const arity = ast_operator_arity(op);
if (arity == 1) {
// Unary operator
auto const& input =
plan.data_references[plan.operator_source_indices[operator_source_index++]];
auto const& output =
plan.data_references[plan.operator_source_indices[operator_source_index++]];
auto input_row_index =
input.table_source == table_reference::LEFT ? left_row_index : right_row_index;
type_dispatcher(input.data_type,
*this,
output_object,
input_row_index,
input,
output,
output_row_index,
op,
thread_intermediate_storage);
} else if (arity == 2) {
// Binary operator
auto const& lhs =
plan.data_references[plan.operator_source_indices[operator_source_index++]];
auto const& rhs =
plan.data_references[plan.operator_source_indices[operator_source_index++]];
auto const& output =
plan.data_references[plan.operator_source_indices[operator_source_index++]];
type_dispatcher(lhs.data_type,
detail::single_dispatch_binary_operator{},
*this,
output_object,
left_row_index,
right_row_index,
lhs,
rhs,
output,
output_row_index,
op,
thread_intermediate_storage);
} else {
CUDF_UNREACHABLE("Invalid operator arity.");
}
}
}
private:
/**
* @brief Helper struct for type dispatch on the result of an expression.
*
* Evaluating an expression requires multiple levels of type dispatch to
* determine the input types, the operation type, and the output type. This
* helper class is a functor that handles the operator dispatch, invokes the
* operator, and dispatches output writing based on the resulting data type.
*/
struct expression_output_handler {
public:
__device__ inline expression_output_handler() {}
/**
* @brief Resolves an output data reference and assigns result value.
*
* Only output columns (COLUMN) and intermediates (INTERMEDIATE) are supported as output
* reference types. Intermediates must be of fixed width less than or equal to
* sizeof(std::int64_t). This requirement on intermediates is enforced by the linearizer.
*
* @tparam Element Type of result element.
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param device_data_reference Data reference to resolve.
* @param row_index Row index of data column.
* @param result Value to assign to output.
*/
template <typename Element,
typename ResultSubclass,
typename T,
bool result_has_nulls,
CUDF_ENABLE_IF(is_rep_layout_compatible<Element>())>
__device__ inline void resolve_output(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
detail::device_data_reference const& device_data_reference,
cudf::size_type const row_index,
IntermediateDataType<has_nulls>* thread_intermediate_storage,
possibly_null_value_t<Element, has_nulls> const& result) const
{
if (device_data_reference.reference_type == detail::device_data_reference_type::COLUMN) {
output_object.template set_value<Element>(row_index, result);
} else { // Assumes device_data_reference.reference_type ==
// detail::device_data_reference_type::INTERMEDIATE
// Using memcpy instead of reinterpret_cast<Element*> for safe type aliasing.
// Using a temporary variable ensures that the compiler knows the result is aligned.
IntermediateDataType<has_nulls> tmp;
memcpy(&tmp, &result, sizeof(possibly_null_value_t<Element, has_nulls>));
thread_intermediate_storage[device_data_reference.data_index] = tmp;
}
}
template <typename Element,
typename ResultSubclass,
typename T,
bool result_has_nulls,
CUDF_ENABLE_IF(!is_rep_layout_compatible<Element>())>
__device__ inline void resolve_output(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
detail::device_data_reference const& device_data_reference,
cudf::size_type const row_index,
IntermediateDataType<has_nulls>* thread_intermediate_storage,
possibly_null_value_t<Element, has_nulls> const& result) const
{
CUDF_UNREACHABLE("Invalid type in resolve_output.");
}
};
/**
* @brief Subclass of the expression output handler for unary operations.
*
* This functor's call operator is specialized to handle unary operations,
* which only require a single operand.
*/
template <typename Input>
struct unary_expression_output_handler : public expression_output_handler {
__device__ inline unary_expression_output_handler() {}
/**
* @brief Callable to perform a unary operation.
*
* @tparam op The operation to perform.
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param outputrow_index The row in the output object to insert the data.
* @param input Input to the operation.
* @param output Output data reference.
*/
template <ast_operator op,
typename ResultSubclass,
typename T,
bool result_has_nulls,
std::enable_if_t<
detail::is_valid_unary_op<detail::operator_functor<op, has_nulls>,
possibly_null_value_t<Input, has_nulls>>>* = nullptr>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const output_row_index,
possibly_null_value_t<Input, has_nulls> const& input,
detail::device_data_reference const& output,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
// The output data type is the same whether or not nulls are present, so
// pull from the non-nullable operator.
using Out = cuda::std::invoke_result_t<detail::operator_functor<op, false>, Input>;
this->template resolve_output<Out>(output_object,
output,
output_row_index,
thread_intermediate_storage,
detail::operator_functor<op, has_nulls>{}(input));
}
template <ast_operator op,
typename ResultSubclass,
typename T,
bool result_has_nulls,
std::enable_if_t<
!detail::is_valid_unary_op<detail::operator_functor<op, has_nulls>,
possibly_null_value_t<Input, has_nulls>>>* = nullptr>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const output_row_index,
possibly_null_value_t<Input, has_nulls> const& input,
detail::device_data_reference const& output,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
CUDF_UNREACHABLE("Invalid unary dispatch operator for the provided input.");
}
};
/**
* @brief Subclass of the expression output handler for binary operations.
*
* This functor's call operator is specialized to handle binary operations,
* which require two operands.
*/
template <typename LHS, typename RHS>
struct binary_expression_output_handler : public expression_output_handler {
__device__ inline binary_expression_output_handler() {}
/**
* @brief Callable to perform a binary operation.
*
* @tparam op The operation to perform.
* @tparam OutputType The container type that data will be inserted into.
*
* @param output_object The container that data will be inserted into.
* @param output_row_index The row in the output to insert the result.
* @param lhs Left input to the operation.
* @param rhs Right input to the operation.
* @param output Output data reference.
*/
template <ast_operator op,
typename ResultSubclass,
typename T,
bool result_has_nulls,
std::enable_if_t<detail::is_valid_binary_op<detail::operator_functor<op, has_nulls>,
possibly_null_value_t<LHS, has_nulls>,
possibly_null_value_t<RHS, has_nulls>>>* =
nullptr>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const output_row_index,
possibly_null_value_t<LHS, has_nulls> const& lhs,
possibly_null_value_t<RHS, has_nulls> const& rhs,
detail::device_data_reference const& output,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
// The output data type is the same whether or not nulls are present, so
// pull from the non-nullable operator.
using Out = cuda::std::invoke_result_t<detail::operator_functor<op, false>, LHS, RHS>;
this->template resolve_output<Out>(output_object,
output,
output_row_index,
thread_intermediate_storage,
detail::operator_functor<op, has_nulls>{}(lhs, rhs));
}
template <ast_operator op,
typename ResultSubclass,
typename T,
bool result_has_nulls,
std::enable_if_t<
!detail::is_valid_binary_op<detail::operator_functor<op, has_nulls>,
possibly_null_value_t<LHS, has_nulls>,
possibly_null_value_t<RHS, has_nulls>>>* = nullptr>
__device__ inline void operator()(
expression_result<ResultSubclass, T, result_has_nulls>& output_object,
cudf::size_type const output_row_index,
possibly_null_value_t<LHS, has_nulls> const& lhs,
possibly_null_value_t<RHS, has_nulls> const& rhs,
detail::device_data_reference const& output,
IntermediateDataType<has_nulls>* thread_intermediate_storage) const
{
CUDF_UNREACHABLE("Invalid binary dispatch operator for the provided input.");
}
};
table_device_view const& left; ///< The left table to operate on.
table_device_view const& right; ///< The right table to operate on.
expression_device_view const&
plan; ///< The container of device data representing the expression to evaluate.
};
} // namespace detail
} // namespace ast
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/traits.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/wrappers/dictionary.hpp>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cuda/std/type_traits>
namespace cudf {
/**
* @addtogroup utility_types
* @{
* @file
*/
/// Utility metafunction that maps a sequence of any types to the type void.
template <typename...>
using void_t = void;
/**
* @brief Convenience macro for SFINAE as an unnamed template parameter.
*
* Example:
* \code{cpp}
* // This function will participate in overload resolution only if T is an integral type
* template <typename T, CUDF_ENABLE_IF(std::is_integral_v<T> )>
* void foo();
* \endcode
*
*/
#define CUDF_ENABLE_IF(...) std::enable_if_t<(__VA_ARGS__)>* = nullptr
/// Checks if two types are comparable using less operator (i.e. <).
template <typename L, typename R>
using less_comparable = decltype(std::declval<L>() < std::declval<R>());
/// Checks if two types are comparable using greater operator (i.e. >).
template <typename L, typename R>
using greater_comparable = decltype(std::declval<L>() > std::declval<R>());
/// Checks if two types are comparable using equality operator (i.e. ==).
template <typename L, typename R>
using equality_comparable = decltype(std::declval<L>() == std::declval<R>());
namespace detail {
template <typename L, typename R, typename = void>
struct is_relationally_comparable_impl : std::false_type {};
template <typename L, typename R>
struct is_relationally_comparable_impl<L,
R,
void_t<less_comparable<L, R>, greater_comparable<L, R>>>
: std::true_type {};
template <typename L, typename R, typename = void>
struct is_equality_comparable_impl : std::false_type {};
template <typename L, typename R>
struct is_equality_comparable_impl<L, R, void_t<equality_comparable<L, R>>> : std::true_type {};
// has common type
template <typename AlwaysVoid, typename... Ts>
struct has_common_type_impl : std::false_type {};
template <typename... Ts>
struct has_common_type_impl<void_t<std::common_type_t<Ts...>>, Ts...> : std::true_type {};
} // namespace detail
/// Checks if types have a common type
template <typename... Ts>
using has_common_type = typename detail::has_common_type_impl<void, Ts...>::type;
/// Helper variable template for has_common_type<>::value
template <typename... Ts>
constexpr inline bool has_common_type_v = detail::has_common_type_impl<void, Ts...>::value;
/// Checks if a type is a timestamp type.
template <typename T>
using is_timestamp_t = cuda::std::disjunction<std::is_same<cudf::timestamp_D, T>,
std::is_same<cudf::timestamp_s, T>,
std::is_same<cudf::timestamp_ms, T>,
std::is_same<cudf::timestamp_us, T>,
std::is_same<cudf::timestamp_ns, T>>;
/// Checks if a type is a duration type.
template <typename T>
using is_duration_t = cuda::std::disjunction<std::is_same<cudf::duration_D, T>,
std::is_same<cudf::duration_s, T>,
std::is_same<cudf::duration_ms, T>,
std::is_same<cudf::duration_us, T>,
std::is_same<cudf::duration_ns, T>>;
/**
* @brief Indicates whether objects of types `L` and `R` can be relationally
*compared.
*
* Given two objects `L l`, and `R r`, returns true if `l < r` and `l > r` are
* well-formed expressions.
*
* @tparam L Type of the first object
* @tparam R Type of the second object
* @return true Objects of types `L` and `R` can be relationally be compared
* @return false Objects of types `L` and `R` cannot be compared
*/
template <typename L, typename R>
constexpr inline bool is_relationally_comparable()
{
return detail::is_relationally_comparable_impl<L, R>::value;
}
/**
* @brief Checks whether `data_type` `type` supports relational comparisons.
*
* @param type Data_type for comparison.
* @return true If `type` supports relational comparisons.
* @return false If `type` does not support relational comparisons.
*/
bool is_relationally_comparable(data_type type);
/**
* @brief Indicates whether objects of types `L` and `R` can be compared
* for equality.
*
* Given two objects `L l`, and `R r`, returns true if `l == r` is a
* well-formed expression.
*
* @tparam L Type of the first object
* @tparam R Type of the second object
* @return true Objects of types `L` and `R` can be compared for equality
* @return false Objects of types `L` and `R` cannot be compared
*/
template <typename L, typename R>
constexpr inline bool is_equality_comparable()
{
return detail::is_equality_comparable_impl<L, R>::value;
}
/**
* @brief Checks whether `data_type` `type` supports equality comparisons.
*
* @param type Data_type for comparison.
* @return true If `type` supports equality comparisons.
* @return false If `type` does not support equality comparisons.
*/
bool is_equality_comparable(data_type type);
/**
* @brief Indicates whether the type `T` is a numeric type.
*
* @tparam T The type to verify
* @return true `T` is numeric
* @return false `T` is not numeric
*/
template <typename T>
constexpr inline bool is_numeric()
{
return cuda::std::is_arithmetic<T>();
}
/**
* @brief Indicates whether `type` is a numeric `data_type`.
*
* "Numeric" types are fundamental integral/floating point types such as `INT*`
* or `FLOAT*`. Types that wrap a numeric type are not considered numeric, e.g.,
*`TIMESTAMP`.
*
* @param type The `data_type` to verify
* @return true `type` is numeric
* @return false `type` is not numeric
*/
bool is_numeric(data_type type);
/**
* @brief Indicates whether the type `T` is a index type.
*
* A type `T` is considered an index type if it is valid to use
* elements of type `T` to index into a column. I.e.,
* index types are integral types such as 'INT*' apart from 'bool'.
*
* @tparam T The type to verify
* @return true `T` is index type
* @return false `T` is not index type
*/
template <typename T>
constexpr inline bool is_index_type()
{
return std::is_integral_v<T> and not std::is_same_v<T, bool>;
}
/**
* @brief Indicates whether the type `type` is a index type.
*
* A type `T` is considered an index type if it is valid to use
* elements of type `T` to index into a column. I.e.,
* index types are integral types such as 'INT*' apart from 'bool'.
*
* @param type The `data_type` to verify
* @return true `type` is index type
* @return false `type` is not index type
*/
bool is_index_type(data_type type);
/**
* @brief Indicates whether the type `T` is a unsigned numeric type.
*
* @tparam T The type to verify
* @return true `T` is unsigned numeric
* @return false `T` is signed numeric
*/
template <typename T>
constexpr inline bool is_unsigned()
{
return std::is_unsigned_v<T>;
}
/**
* @brief Indicates whether `type` is a unsigned numeric `data_type`.
*
* "Unsigned Numeric" types are fundamental integral types such as `UINT*`.
*
* @param type The `data_type` to verify
* @return true `type` is unsigned numeric
* @return false `type` is signed numeric
*/
bool is_unsigned(data_type type);
/**
* @brief Indicates whether the `Iterator` value type is unsigned.
*
* @tparam Iterator The type to verify
* @return true if the iterator's value type is unsigned
*/
template <typename Iterator>
constexpr inline bool is_signed_iterator()
{
return std::is_signed_v<typename std::iterator_traits<Iterator>::value_type>;
}
/**
* @brief Indicates whether the type `T` is an integral type.
*
* @tparam T The type to verify
* @return true `T` is integral
* @return false `T` is not integral
*/
template <typename T>
constexpr inline bool is_integral()
{
return cuda::std::is_integral_v<T>;
}
/**
* @brief Indicates whether `type` is a integral `data_type`.
*
* "Integral" types are fundamental integer types such as `INT*` and `UINT*`.
*
* @param type The `data_type` to verify
* @return true `type` is integral
* @return false `type` is integral
*/
bool is_integral(data_type type);
/**
* @brief Indicates whether the type `T` is an integral type but not bool type.
*
* @tparam T The type to verify
* @return true `T` is integral but not bool
* @return false `T` is not integral or is bool
*/
template <typename T>
constexpr inline bool is_integral_not_bool()
{
return cuda::std::is_integral_v<T> and not std::is_same_v<T, bool>;
}
/**
* @brief Indicates whether `type` is a integral `data_type` and not BOOL8
*
* "Integral" types are fundamental integer types such as `INT*` and `UINT*`.
*
* @param type The `data_type` to verify
* @return true `type` is integral but not bool
* @return false `type` is integral or is bool
*/
bool is_integral_not_bool(data_type type);
/**
* @brief Indicates whether the type `T` is a floating point type.
*
* @tparam T The type to verify
* @return true `T` is floating point
* @return false `T` is not floating point
*/
template <typename T>
constexpr inline bool is_floating_point()
{
return std::is_floating_point_v<T>;
}
/**
* @brief Indicates whether `type` is a floating point `data_type`.
*
* "Floating point" types are fundamental floating point types such as `FLOAT*`.
*
* @param type The `data_type` to verify
* @return true `type` is floating point
* @return false `type` is not floating point
*/
bool is_floating_point(data_type type);
/**
* @brief Indicates whether `T` is a std::byte type.
*
* @tparam T The type to verify
* @return true `type` is std::byte
* @return false `type` is not std::byte
*/
template <typename T>
constexpr inline bool is_byte()
{
return std::is_same_v<std::remove_cv_t<T>, std::byte>;
}
/**
* @brief Indicates whether `T` is a Boolean type.
*
* @param type The `data_type` to verify
* @return true `type` is Boolean
* @return false `type` is not Boolean
*/
template <typename T>
constexpr inline bool is_boolean()
{
return std::is_same_v<T, bool>;
}
/**
* @brief Indicates whether `type` is a Boolean `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a Boolean
* @return false `type` is not a Boolean
*/
bool is_boolean(data_type type);
/**
* @brief Indicates whether the type `T` is a timestamp type.
*
* @tparam T The type to verify
* @return true `T` is a timestamp
* @return false `T` is not a timestamp
*/
template <typename T>
constexpr inline bool is_timestamp()
{
return is_timestamp_t<T>::value;
}
/**
* @brief Indicates whether `type` is a timestamp `data_type`.
*
* "Timestamp" types are int32_t or int64_t durations since the unix epoch.
*
* @param type The `data_type` to verify
* @return true `type` is a timestamp
* @return false `type` is not a timestamp
*/
bool is_timestamp(data_type type);
/**
* @brief Indicates whether the type `T` is a fixed-point type.
*
* @tparam T The type to verify
* @return true `T` is a fixed-point type
* @return false `T` is not a fixed-point type
*/
template <typename T>
constexpr inline bool is_fixed_point()
{
return std::is_same_v<numeric::decimal32, T> || std::is_same_v<numeric::decimal64, T> ||
std::is_same_v<numeric::decimal128, T>;
}
/**
* @brief Indicates whether `type` is a fixed point `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a fixed point type
* @return false `type` is not a fixed point type
*/
bool is_fixed_point(data_type type);
/**
* @brief Indicates whether the type `T` is a duration type.
*
* @tparam T The type to verify
* @return true `T` is a duration
* @return false `T` is not a duration
*/
template <typename T>
constexpr inline bool is_duration()
{
return is_duration_t<T>::value;
}
/**
* @brief Indicates whether `type` is a duration `data_type`.
*
* "Duration" types are int32_t or int64_t tick counts representing a time interval.
*
* @param type The `data_type` to verify
* @return true `type` is a duration
* @return false `type` is not a duration
*/
bool is_duration(data_type type);
/**
* @brief Indicates whether the type `T` is a chrono type.
*
* @tparam T The type to verify
* @return true `T` is a duration or a timestamp type
* @return false `T` is neither a duration nor a timestamp type
*/
template <typename T>
constexpr inline bool is_chrono()
{
return is_duration<T>() || is_timestamp<T>();
}
/**
* @brief Indicates whether `type` is a chrono `data_type`.
*
* Chrono types include cudf timestamp types, which represent a point in time, and cudf
* duration types that represent a time interval.
*
* @param type The `data_type` to verify
* @return true `type` is a chrono type
* @return false `type` is not a chrono type
*/
bool is_chrono(data_type type);
/**
* @brief Indicates whether `T` is layout compatible with its "representation" type.
*
* For example, in a column, a `decimal32` is concretely represented by a single `int32_t`, but the
* `decimal32` type itself contains both the integer representation and the scale. Therefore,
* `decimal32` is _not_ layout compatible with `int32_t`.
*
* As further example, `duration_ns` is distinct from its concrete `int64_t` representation type,
* but they are layout compatible.
*
* @return true if `T` is layout compatible with its "representation" type
*/
template <typename T>
constexpr bool is_rep_layout_compatible()
{
return cudf::is_numeric<T>() or cudf::is_chrono<T>() or cudf::is_boolean<T>() or
cudf::is_byte<T>();
}
/**
* @brief Indicates whether the type `T` is a dictionary type.
*
* @tparam T The type to verify
* @return true `T` is a dictionary-type
* @return false `T` is not dictionary-type
*/
template <typename T>
constexpr inline bool is_dictionary()
{
return std::is_same_v<dictionary32, T>;
}
/**
* @brief Indicates whether `type` is a dictionary `data_type`.
*
* @param type The `data_type` to verify
* @return true `type` is a dictionary type
* @return false `type` is not a dictionary type
*/
bool is_dictionary(data_type type);
/**
* @brief Indicates whether elements of type `T` are fixed-width.
*
* Elements of a fixed-width type all have the same size in bytes.
*
* @tparam T The C++ type to verify
* @return true `T` corresponds to a fixed-width element type
* @return false `T` corresponds to a variable-width element type
*/
template <typename T>
constexpr inline bool is_fixed_width()
{
// TODO Add fixed width wrapper types
// Is a category fixed width?
return cudf::is_numeric<T>() || cudf::is_chrono<T>() || cudf::is_fixed_point<T>();
}
/**
* @brief Indicates whether elements of `type` are fixed-width.
*
* Elements of a fixed-width type all have the same size in bytes.
*
* @param type The `data_type` to verify
* @return true `type` is fixed-width
* @return false `type` is variable-width
*/
bool is_fixed_width(data_type type);
class string_view;
/**
* @brief Indicates whether the type `T` is a compound type.
*
* `column`s with "compound" elements are logically a single column of elements,
* but may be concretely implemented with two or more `column`s. For example, a
* `STRING` column could contain a `column` of offsets and a child `column` of
* characters.
*
* @tparam T The type to verify
* @return true `T` corresponds to a "compound" type
* @return false `T` corresponds to a "simple" type
*/
template <typename T>
constexpr inline bool is_compound()
{
return std::is_same_v<T, cudf::string_view> or std::is_same_v<T, cudf::dictionary32> or
std::is_same_v<T, cudf::list_view> or std::is_same_v<T, cudf::struct_view>;
}
/**
* @brief Indicates whether elements of `type` are compound.
*
* `column`s with "compound" elements are logically a single column of elements,
* but may be concretely implemented with two or more `column`s. For example, a
* `STRING` column could contain a `column` of offsets and a child `column` of
* characters.
*
* @param type The `data_type` to verify
* @return true `type` is a compound type
* @return false `type` is a simple type
*/
bool is_compound(data_type type);
/**
* @brief Indicates whether `T` is a nested type.
*
* "Nested" types are distinct from compound types in that they
* can have an arbitrarily deep list of descendants of the same
* type. Strings are not a nested type, but lists are.
*
* @param T The type to verify
* @return true T is a nested type
* @return false T is not a nested type
*/
template <typename T>
constexpr inline bool is_nested()
{
return std::is_same_v<T, cudf::list_view> || std::is_same_v<T, cudf::struct_view>;
}
/**
* @brief Indicates whether `type` is a nested type
*
* "Nested" types are distinct from compound types in that they
* can have an arbitrarily deep list of descendants of the same
* type. Strings are not a nested type, but lists are.
*
* @param type The `data_type` to verify
* @return true `type` is a nested type
* @return false `type` is not a nested type
*/
bool is_nested(data_type type);
/**
* @brief Indicates whether `from` is bit-castable to `to`.
*
* This casting is based on std::bit_cast. Data types that have the same size and are trivially
* copyable are eligible for this casting.
*
* See `cudf::bit_cast()` which returns a zero-copy `column_view` when casting between
* bit-castable types.
*
* @param from The `data_type` to convert from
* @param to The `data_type` to convert to
* @return `true` if the types are castable
*/
bool is_bit_castable(data_type from, data_type to);
template <typename From, typename To>
struct is_convertible : std::is_convertible<From, To> {};
// This will ensure that timestamps can be promoted to a higher precision. Presently, they can't
// do that due to nvcc/gcc compiler issues
template <typename Duration1, typename Duration2>
struct is_convertible<cudf::detail::timestamp<Duration1>, cudf::detail::timestamp<Duration2>>
: std::is_convertible<typename cudf::detail::time_point<Duration1>::duration,
typename cudf::detail::time_point<Duration2>::duration> {};
/** @} */
} // namespace cudf
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.