repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/LogicalAnd.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::LogicalAnd>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/BitwiseAnd.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::BitwiseAnd>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/binary_ops.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "binary_ops.hpp"
#include "operation.cuh"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace binops {
namespace compiled {
template <typename BinaryOperator, typename TypeLhs, typename TypeRhs>
constexpr bool is_bool_result()
{
using ReturnType = std::invoke_result_t<BinaryOperator, TypeLhs, TypeRhs>;
return std::is_same_v<bool, ReturnType>;
}
/**
* @brief Type casts each element of the column to `CastType`
*
*/
template <typename CastType>
struct type_casted_accessor {
template <typename Element>
__device__ inline CastType operator()(cudf::size_type i,
column_device_view const& col,
bool is_scalar) const
{
if constexpr (column_device_view::has_element_accessor<Element>() and
std::is_convertible_v<Element, CastType>)
return static_cast<CastType>(col.element<Element>(is_scalar ? 0 : i));
return {};
}
};
/**
* @brief Type casts value to column type and stores in `i`th row of the column
*
*/
template <typename FromType>
struct typed_casted_writer {
template <typename Element>
__device__ inline void operator()(cudf::size_type i,
mutable_column_device_view const& col,
FromType val) const
{
if constexpr (mutable_column_device_view::has_element_accessor<Element>() and
std::is_constructible_v<Element, FromType>) {
col.element<Element>(i) = static_cast<Element>(val);
} else if constexpr (is_fixed_point<Element>() and
(is_fixed_point<FromType>() or
std::is_constructible_v<Element, FromType>)) {
if constexpr (is_fixed_point<FromType>())
col.data<Element::rep>()[i] = val.rescaled(numeric::scale_type{col.type().scale()}).value();
else
col.data<Element::rep>()[i] = Element{val, numeric::scale_type{col.type().scale()}}.value();
}
}
};
// Functors to launch only defined operations.
/**
* @brief Functor to launch only defined operations with common type.
*
* @tparam BinaryOperator binary operator functor
*/
template <typename BinaryOperator>
struct ops_wrapper {
mutable_column_device_view& out;
column_device_view const& lhs;
column_device_view const& rhs;
bool const& is_lhs_scalar;
bool const& is_rhs_scalar;
template <typename TypeCommon>
__device__ void operator()(size_type i)
{
if constexpr (std::is_invocable_v<BinaryOperator, TypeCommon, TypeCommon>) {
TypeCommon x =
type_dispatcher(lhs.type(), type_casted_accessor<TypeCommon>{}, i, lhs, is_lhs_scalar);
TypeCommon y =
type_dispatcher(rhs.type(), type_casted_accessor<TypeCommon>{}, i, rhs, is_rhs_scalar);
auto result = [&]() {
if constexpr (std::is_same_v<BinaryOperator, ops::NullEquals> or
std::is_same_v<BinaryOperator, ops::NullLogicalAnd> or
std::is_same_v<BinaryOperator, ops::NullLogicalOr> or
std::is_same_v<BinaryOperator, ops::NullMax> or
std::is_same_v<BinaryOperator, ops::NullMin>) {
bool output_valid = false;
auto result = BinaryOperator{}.template operator()<TypeCommon, TypeCommon>(
x,
y,
lhs.is_valid(is_lhs_scalar ? 0 : i),
rhs.is_valid(is_rhs_scalar ? 0 : i),
output_valid);
if (out.nullable() && !output_valid) out.set_null(i);
return result;
} else {
return BinaryOperator{}.template operator()<TypeCommon, TypeCommon>(x, y);
}
// To suppress nvcc warning
return std::invoke_result_t<BinaryOperator, TypeCommon, TypeCommon>{};
}();
if constexpr (is_bool_result<BinaryOperator, TypeCommon, TypeCommon>())
out.element<decltype(result)>(i) = result;
else
type_dispatcher(out.type(), typed_casted_writer<decltype(result)>{}, i, out, result);
}
(void)i;
}
};
/**
* @brief Functor to launch only defined operations without common type.
*
* @tparam BinaryOperator binary operator functor
*/
template <typename BinaryOperator>
struct ops2_wrapper {
mutable_column_device_view& out;
column_device_view const& lhs;
column_device_view const& rhs;
bool const& is_lhs_scalar;
bool const& is_rhs_scalar;
template <typename TypeLhs, typename TypeRhs>
__device__ void operator()(size_type i)
{
if constexpr (!has_common_type_v<TypeLhs, TypeRhs> and
std::is_invocable_v<BinaryOperator, TypeLhs, TypeRhs>) {
TypeLhs x = lhs.element<TypeLhs>(is_lhs_scalar ? 0 : i);
TypeRhs y = rhs.element<TypeRhs>(is_rhs_scalar ? 0 : i);
auto result = [&]() {
if constexpr (std::is_same_v<BinaryOperator, ops::NullEquals> or
std::is_same_v<BinaryOperator, ops::NullLogicalAnd> or
std::is_same_v<BinaryOperator, ops::NullLogicalOr> or
std::is_same_v<BinaryOperator, ops::NullMax> or
std::is_same_v<BinaryOperator, ops::NullMin>) {
bool output_valid = false;
auto result = BinaryOperator{}.template operator()<TypeLhs, TypeRhs>(
x,
y,
lhs.is_valid(is_lhs_scalar ? 0 : i),
rhs.is_valid(is_rhs_scalar ? 0 : i),
output_valid);
if (out.nullable() && !output_valid) out.set_null(i);
return result;
} else {
return BinaryOperator{}.template operator()<TypeLhs, TypeRhs>(x, y);
}
// To suppress nvcc warning
return std::invoke_result_t<BinaryOperator, TypeLhs, TypeRhs>{};
}();
if constexpr (is_bool_result<BinaryOperator, TypeLhs, TypeRhs>())
out.element<decltype(result)>(i) = result;
else
type_dispatcher(out.type(), typed_casted_writer<decltype(result)>{}, i, out, result);
}
(void)i;
}
};
/**
* @brief Functor which does single type dispatcher in device code
*
* single type dispatcher for lhs and rhs with common types.
*
* @tparam BinaryOperator binary operator functor
*/
template <class BinaryOperator>
struct binary_op_device_dispatcher {
data_type common_data_type;
mutable_column_device_view out;
column_device_view lhs;
column_device_view rhs;
bool is_lhs_scalar;
bool is_rhs_scalar;
__forceinline__ __device__ void operator()(size_type i)
{
type_dispatcher(common_data_type,
ops_wrapper<BinaryOperator>{out, lhs, rhs, is_lhs_scalar, is_rhs_scalar},
i);
}
};
/**
* @brief Functor which does double type dispatcher in device code
*
* double type dispatcher for lhs and rhs without common types.
*
* @tparam BinaryOperator binary operator functor
*/
template <class BinaryOperator>
struct binary_op_double_device_dispatcher {
mutable_column_device_view out;
column_device_view lhs;
column_device_view rhs;
bool is_lhs_scalar;
bool is_rhs_scalar;
__forceinline__ __device__ void operator()(size_type i)
{
double_type_dispatcher(
lhs.type(),
rhs.type(),
ops2_wrapper<BinaryOperator>{out, lhs, rhs, is_lhs_scalar, is_rhs_scalar},
i);
}
};
/**
* @brief Simplified for_each kernel
*
* @param size number of elements to process.
* @param f Functor object to call for each element.
*/
template <typename Functor>
__global__ void for_each_kernel(cudf::size_type size, Functor f)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
#pragma unroll
for (cudf::size_type i = start; i < size; i += step) {
f(i);
}
}
/**
* @brief Launches Simplified for_each kernel with maximum occupancy grid dimensions.
*
* @tparam Functor
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param size number of elements to process.
* @param f Functor object to call for each element.
*/
template <typename Functor>
void for_each(rmm::cuda_stream_view stream, cudf::size_type size, Functor f)
{
int block_size;
int min_grid_size;
CUDF_CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, for_each_kernel<decltype(f)>));
// 2 elements per thread.
int const grid_size = util::div_rounding_up_safe(size, 2 * block_size);
for_each_kernel<<<grid_size, block_size, 0, stream.value()>>>(size, std::forward<Functor&&>(f));
}
template <class BinaryOperator>
void apply_binary_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view stream)
{
auto common_dtype = get_common_type(out.type(), lhs.type(), rhs.type());
auto lhsd = column_device_view::create(lhs, stream);
auto rhsd = column_device_view::create(rhs, stream);
auto outd = mutable_column_device_view::create(out, stream);
// Create binop functor instance
if (common_dtype) {
// Execute it on every element
for_each(stream,
out.size(),
binary_op_device_dispatcher<BinaryOperator>{
*common_dtype, *outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
} else {
// Execute it on every element
for_each(stream,
out.size(),
binary_op_double_device_dispatcher<BinaryOperator>{
*outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
}
}
} // namespace compiled
} // namespace binops
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/BitwiseXor.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::BitwiseXor>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/NullMin.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::NullMin>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
} // namespace cudf::binops::compiled
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/LogBase.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::LogBase>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/util.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operation.cuh"
#include <cudf/binaryop.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <optional>
namespace cudf::binops::compiled {
namespace {
struct common_type_functor {
template <typename TypeLhs, typename TypeRhs>
std::optional<data_type> operator()() const
{
if constexpr (cudf::has_common_type_v<TypeLhs, TypeRhs>) {
using TypeCommon = std::common_type_t<TypeLhs, TypeRhs>;
return data_type{type_to_id<TypeCommon>()};
}
// A compiler bug may cause a compilation error when using empty
// initializer list to construct an std::optional object containing no
// `data_type` value. Therefore, we explicitly return `std::nullopt`
// instead.
return std::nullopt;
}
};
struct has_mutable_element_accessor_functor {
template <typename T>
bool operator()() const
{
return mutable_column_device_view::has_element_accessor<T>();
}
};
bool has_mutable_element_accessor(data_type t)
{
return type_dispatcher(t, has_mutable_element_accessor_functor{});
}
template <typename InputType>
struct is_constructible_functor {
template <typename TargetType>
bool operator()() const
{
return std::is_constructible_v<TargetType, InputType>;
}
};
template <typename InputType>
bool is_constructible(data_type target_type)
{
return type_dispatcher(target_type, is_constructible_functor<InputType>{});
}
/**
* @brief Functor that return true if BinaryOperator supports given input and output types.
*
* @tparam BinaryOperator binary operator functor
*/
template <typename BinaryOperator>
struct is_binary_operation_supported {
// For types where Out type is fixed. (e.g. comparison types)
template <typename TypeLhs, typename TypeRhs>
inline constexpr bool operator()() const
{
if constexpr (column_device_view::has_element_accessor<TypeLhs>() and
column_device_view::has_element_accessor<TypeRhs>()) {
if constexpr (has_common_type_v<TypeLhs, TypeRhs>) {
using common_t = std::common_type_t<TypeLhs, TypeRhs>;
return std::is_invocable_v<BinaryOperator, common_t, common_t>;
} else {
return std::is_invocable_v<BinaryOperator, TypeLhs, TypeRhs>;
}
} else {
return false;
}
}
template <typename TypeLhs, typename TypeRhs>
inline constexpr bool operator()(data_type out_type) const
{
if constexpr (column_device_view::has_element_accessor<TypeLhs>() and
column_device_view::has_element_accessor<TypeRhs>()) {
if (has_mutable_element_accessor(out_type) or is_fixed_point(out_type)) {
if constexpr (has_common_type_v<TypeLhs, TypeRhs>) {
using common_t = std::common_type_t<TypeLhs, TypeRhs>;
if constexpr (std::is_invocable_v<BinaryOperator, common_t, common_t>) {
using ReturnType = std::invoke_result_t<BinaryOperator, common_t, common_t>;
return is_constructible<ReturnType>(out_type) or
(is_fixed_point<ReturnType>() and is_fixed_point(out_type));
}
} else if constexpr (std::is_invocable_v<BinaryOperator, TypeLhs, TypeRhs>) {
using ReturnType = std::invoke_result_t<BinaryOperator, TypeLhs, TypeRhs>;
return is_constructible<ReturnType>(out_type);
}
}
}
return false;
}
};
struct is_supported_operation_functor {
template <typename TypeLhs, typename TypeRhs>
struct nested_support_functor {
template <typename BinaryOperator>
inline constexpr bool call(data_type out_type) const
{
return is_binary_operation_supported<BinaryOperator>{}.template operator()<TypeLhs, TypeRhs>(
out_type);
}
inline constexpr bool operator()(binary_operator op, data_type out_type) const
{
switch (op) {
// clang-format off
case binary_operator::ADD: return call<ops::Add>(out_type);
case binary_operator::SUB: return call<ops::Sub>(out_type);
case binary_operator::MUL: return call<ops::Mul>(out_type);
case binary_operator::DIV: return call<ops::Div>(out_type);
case binary_operator::TRUE_DIV: return call<ops::TrueDiv>(out_type);
case binary_operator::FLOOR_DIV: return call<ops::FloorDiv>(out_type);
case binary_operator::MOD: return call<ops::Mod>(out_type);
case binary_operator::PYMOD: return call<ops::PyMod>(out_type);
case binary_operator::POW: return call<ops::Pow>(out_type);
case binary_operator::INT_POW: return call<ops::IntPow>(out_type);
case binary_operator::BITWISE_AND: return call<ops::BitwiseAnd>(out_type);
case binary_operator::BITWISE_OR: return call<ops::BitwiseOr>(out_type);
case binary_operator::BITWISE_XOR: return call<ops::BitwiseXor>(out_type);
case binary_operator::SHIFT_LEFT: return call<ops::ShiftLeft>(out_type);
case binary_operator::SHIFT_RIGHT: return call<ops::ShiftRight>(out_type);
case binary_operator::SHIFT_RIGHT_UNSIGNED: return call<ops::ShiftRightUnsigned>(out_type);
case binary_operator::LOG_BASE: return call<ops::LogBase>(out_type);
case binary_operator::ATAN2: return call<ops::ATan2>(out_type);
case binary_operator::PMOD: return call<ops::PMod>(out_type);
case binary_operator::NULL_MAX: return call<ops::NullMax>(out_type);
case binary_operator::NULL_MIN: return call<ops::NullMin>(out_type);
/*
case binary_operator::GENERIC_BINARY: // defined in jit only.
*/
default: return false;
// clang-format on
}
}
};
template <typename BinaryOperator, typename TypeLhs, typename TypeRhs>
inline constexpr bool bool_op(data_type out) const
{
return out.id() == type_id::BOOL8 and
is_binary_operation_supported<BinaryOperator>{}.template operator()<TypeLhs, TypeRhs>();
}
template <typename TypeLhs, typename TypeRhs>
inline constexpr bool operator()(data_type out, binary_operator op) const
{
switch (op) {
// output type should be bool type.
case binary_operator::LOGICAL_AND: return bool_op<ops::LogicalAnd, TypeLhs, TypeRhs>(out);
case binary_operator::LOGICAL_OR: return bool_op<ops::LogicalOr, TypeLhs, TypeRhs>(out);
case binary_operator::EQUAL: return bool_op<ops::Equal, TypeLhs, TypeRhs>(out);
case binary_operator::NOT_EQUAL: return bool_op<ops::NotEqual, TypeLhs, TypeRhs>(out);
case binary_operator::LESS: return bool_op<ops::Less, TypeLhs, TypeRhs>(out);
case binary_operator::GREATER: return bool_op<ops::Greater, TypeLhs, TypeRhs>(out);
case binary_operator::LESS_EQUAL: return bool_op<ops::LessEqual, TypeLhs, TypeRhs>(out);
case binary_operator::GREATER_EQUAL: return bool_op<ops::GreaterEqual, TypeLhs, TypeRhs>(out);
case binary_operator::NULL_EQUALS: return bool_op<ops::NullEquals, TypeLhs, TypeRhs>(out);
case binary_operator::NULL_LOGICAL_AND:
return bool_op<ops::NullLogicalAnd, TypeLhs, TypeRhs>(out);
case binary_operator::NULL_LOGICAL_OR:
return bool_op<ops::NullLogicalOr, TypeLhs, TypeRhs>(out);
default: return nested_support_functor<TypeLhs, TypeRhs>{}(op, out);
}
return false;
}
};
} // namespace
std::optional<data_type> get_common_type(data_type out, data_type lhs, data_type rhs)
{
// Compute the common type of (out, lhs, rhs) if it exists, or the common
// type of (lhs, rhs) if it exists, else return a null optional.
// We can avoid a triple type dispatch by using the definition of
// std::common_type to compute this with double type dispatches.
// Specifically, std::common_type_t<TypeOut, TypeLhs, TypeRhs> is the same as
// std::common_type_t<std::common_type_t<TypeOut, TypeLhs>, TypeRhs>.
auto common_type = double_type_dispatcher(out, lhs, common_type_functor{});
if (common_type.has_value()) {
common_type = double_type_dispatcher(common_type.value(), rhs, common_type_functor{});
}
// If no common type of (out, lhs, rhs) exists, fall back to the common type
// of (lhs, rhs).
if (!common_type.has_value()) {
common_type = double_type_dispatcher(lhs, rhs, common_type_functor{});
}
return common_type;
}
bool is_supported_operation(data_type out, data_type lhs, data_type rhs, binary_operator op)
{
return double_type_dispatcher(lhs, rhs, is_supported_operation_functor{}, out, op);
}
} // namespace cudf::binops::compiled
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/operation.cuh
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/traits.hpp>
#include <cmath>
namespace cudf {
namespace binops {
namespace compiled {
// All binary operations
namespace ops {
struct Add {
template <typename T1, typename T2>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs + rhs)
{
return lhs + rhs;
}
};
struct Sub {
template <typename T1, typename T2>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs - rhs)
{
return lhs - rhs;
}
};
struct Mul {
template <typename TypeLhs, typename TypeRhs>
static constexpr inline bool is_supported()
{
return has_common_type_v<TypeLhs, TypeRhs> or
// FIXME: without the following line, compilation error
// _deps/libcudacxx-src/include/cuda/std/detail/libcxx/include/chrono(917): error:
// identifier "cuda::std::__3::ratio<(long)86400000000l, (long)1l> ::num" is undefined in
// device code
(is_duration<TypeLhs>() and std::is_integral<TypeRhs>()) or
(std::is_integral<TypeLhs>() and is_duration<TypeRhs>()) or
(is_fixed_point<TypeLhs>() and is_numeric<TypeRhs>()) or
(is_numeric<TypeLhs>() and is_fixed_point<TypeRhs>());
}
template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs * rhs)
{
return lhs * rhs;
}
};
struct Div {
template <typename TypeLhs, typename TypeRhs>
static constexpr inline bool is_supported()
{
return has_common_type_v<TypeLhs, TypeRhs> or
// FIXME: without this, compilation error on chrono:917
(is_duration<TypeLhs>() and (std::is_integral<TypeRhs>() or is_duration<TypeRhs>())) or
(is_fixed_point<TypeLhs>() and is_numeric<TypeRhs>()) or
(is_numeric<TypeLhs>() and is_fixed_point<TypeRhs>());
}
template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs / rhs)
{
return lhs / rhs;
}
};
struct TrueDiv {
template <typename T1, typename T2>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs)
-> decltype((static_cast<double>(lhs) / static_cast<double>(rhs)))
{
return (static_cast<double>(lhs) / static_cast<double>(rhs));
}
};
struct FloorDiv {
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>> and
std::is_signed_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x / y)
{
auto const quotient = x / y;
auto const nonzero_remainder = (x % y) != 0;
auto const mixed_sign = (x ^ y) < 0;
return quotient - mixed_sign * nonzero_remainder;
}
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>> and
!std::is_signed_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x / y)
{
return x / y;
}
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_same_v<std::common_type_t<TypeLhs, TypeRhs>, float>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> float
{
return floorf(x / y);
}
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_same_v<std::common_type_t<TypeLhs, TypeRhs>, double>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> double
{
return floor(x / y);
}
};
struct Mod {
template <typename TypeLhs, typename TypeRhs>
static constexpr inline bool is_supported()
{
return has_common_type_v<TypeLhs, TypeRhs> or
// FIXME: without this, compilation error
//_deps/libcudacxx-src/include/cuda/std/detail/libcxx/include/chrono(1337):
// error : expression must have integral or unscoped enum type
(is_duration<TypeLhs>() and (std::is_integral<TypeRhs>() or is_duration<TypeRhs>()));
}
template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs % rhs)
{
return lhs % rhs;
}
template <typename T1,
typename T2,
std::enable_if_t<(std::is_same_v<float, std::common_type_t<T1, T2>>)>* = nullptr>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> float
{
return fmodf(static_cast<float>(lhs), static_cast<float>(rhs));
}
template <typename T1,
typename T2,
std::enable_if_t<(std::is_same_v<double, std::common_type_t<T1, T2>>)>* = nullptr>
__device__ inline auto operator()(T1 const& lhs, T2 const& rhs) -> double
{
return fmod(static_cast<double>(lhs), static_cast<double>(rhs));
}
};
struct PMod {
// Ideally, these two specializations - one for integral types and one for non integral
// types shouldn't be required, as std::fmod should promote integral types automatically
// to double and call the std::fmod overload for doubles. Sadly, doing this in jitified
// code does not work - it is having trouble deciding between float/double overloads
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
{
using common_t = std::common_type_t<TypeLhs, TypeRhs>;
common_t xconv = static_cast<common_t>(x);
common_t yconv = static_cast<common_t>(y);
auto rem = xconv % yconv;
if constexpr (std::is_signed_v<decltype(rem)>)
if (rem < 0) rem = (rem + yconv) % yconv;
return rem;
}
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_floating_point_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
{
using common_t = std::common_type_t<TypeLhs, TypeRhs>;
common_t xconv = static_cast<common_t>(x);
common_t yconv = static_cast<common_t>(y);
auto rem = std::fmod(xconv, yconv);
if (rem < 0) rem = std::fmod(rem + yconv, yconv);
return rem;
}
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<cudf::is_fixed_point<TypeLhs>() and
std::is_same_v<TypeLhs, TypeRhs>>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
{
auto const remainder = x % y;
return remainder.value() < 0 ? (remainder + y) % y : remainder;
}
};
struct PyMod {
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>> or
(cudf::is_fixed_point<TypeLhs>() and
std::is_same_v<TypeLhs, TypeRhs>))>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(((x % y) + y) % y)
{
return ((x % y) + y) % y;
}
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_floating_point_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> double
{
auto x1 = static_cast<double>(x);
auto y1 = static_cast<double>(y);
return fmod(fmod(x1, y1) + y1, y1);
}
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(is_duration<TypeLhs>())>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(((x % y) + y) % y)
{
return ((x % y) + y) % y;
}
};
struct Pow {
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and
std::is_convertible_v<TypeRhs, double>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> double
{
return pow(static_cast<double>(x), static_cast<double>(y));
}
};
struct IntPow {
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<TypeLhs> and std::is_integral_v<TypeRhs>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> TypeLhs
{
if constexpr (std::is_signed_v<TypeRhs>) {
if (y < 0) {
// Integer exponentiation with negative exponent is not possible.
return 0;
}
}
if (y == 0) { return 1; }
if (x == 0) { return 0; }
TypeLhs extra = 1;
while (y > 1) {
if (y & 1) {
// The exponent is odd, so multiply by one factor of x.
extra *= x;
y -= 1;
}
// The exponent is even, so square x and divide the exponent y by 2.
y /= 2;
x *= x;
}
return x * extra;
}
};
struct LogBase {
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and
std::is_convertible_v<TypeRhs, double>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> double
{
return (std::log(static_cast<double>(x)) / std::log(static_cast<double>(y)));
}
};
struct ATan2 {
template <typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and
std::is_convertible_v<TypeRhs, double>)>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> double
{
return std::atan2(static_cast<double>(x), static_cast<double>(y));
}
};
struct ShiftLeft {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x << y)
{
return (x << y);
}
};
struct ShiftRight {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x >> y)
{
return (x >> y);
}
};
struct ShiftRightUnsigned {
template <
typename TypeLhs,
typename TypeRhs,
std::enable_if_t<(std::is_integral_v<TypeLhs> and not is_boolean<TypeLhs>())>* = nullptr>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
-> decltype(static_cast<std::make_unsigned_t<TypeLhs>>(x) >> y)
{
return (static_cast<std::make_unsigned_t<TypeLhs>>(x) >> y);
}
};
struct BitwiseAnd {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x & y)
{
return (x & y);
}
};
struct BitwiseOr {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x | y)
{
return (x | y);
}
};
struct BitwiseXor {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x ^ y)
{
return (x ^ y);
}
};
struct LogicalAnd {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x && y)
{
return (x && y);
}
};
struct LogicalOr {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x || y)
{
return (x || y);
}
};
struct Equal {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x == y)
{
return (x == y);
}
};
struct NotEqual {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x != y)
{
return (x != y);
}
};
struct Less {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x < y)
{
return (x < y);
}
};
struct Greater {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x > y)
{
return (x > y);
}
};
struct LessEqual {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x <= y)
{
return (x <= y);
}
};
struct GreaterEqual {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x >= y)
{
return (x >= y);
}
};
struct NullEquals {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(
TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(x == y)
{
output_valid = true;
if (!lhs_valid && !rhs_valid) return true;
if (lhs_valid && rhs_valid) return x == y;
return false;
}
// To allow std::is_invocable_v = true
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x == y);
};
struct NullMax {
template <typename TypeLhs,
typename TypeRhs,
typename common_t = std::common_type_t<TypeLhs, TypeRhs>>
__device__ inline auto operator()(
TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid)
-> decltype(static_cast<common_t>(static_cast<common_t>(x) > static_cast<common_t>(y) ? x : y))
{
output_valid = true;
auto const x_conv = static_cast<common_t>(x);
auto const y_conv = static_cast<common_t>(y);
if (!lhs_valid && !rhs_valid) {
output_valid = false;
return common_t{};
} else if (lhs_valid && rhs_valid) {
return (x_conv > y_conv) ? x_conv : y_conv;
} else if (lhs_valid)
return x_conv;
else
return y_conv;
}
// To allow std::is_invocable_v = true
template <typename TypeLhs,
typename TypeRhs,
typename common_t = std::common_type_t<TypeLhs, TypeRhs>>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
-> decltype(static_cast<common_t>(static_cast<common_t>(x) > static_cast<common_t>(y) ? x : y));
};
struct NullMin {
template <typename TypeLhs,
typename TypeRhs,
typename common_t = std::common_type_t<TypeLhs, TypeRhs>>
__device__ inline auto operator()(
TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid)
-> decltype(static_cast<common_t>(static_cast<common_t>(x) < static_cast<common_t>(y) ? x : y))
{
output_valid = true;
auto const x_conv = static_cast<common_t>(x);
auto const y_conv = static_cast<common_t>(y);
if (!lhs_valid && !rhs_valid) {
output_valid = false;
return common_t{};
} else if (lhs_valid && rhs_valid) {
return (x_conv < y_conv) ? x_conv : y_conv;
} else if (lhs_valid)
return x_conv;
else
return y_conv;
}
// To allow std::is_invocable_v = true
template <typename TypeLhs,
typename TypeRhs,
typename common_t = std::common_type_t<TypeLhs, TypeRhs>>
__device__ inline auto operator()(TypeLhs x, TypeRhs y)
-> decltype(static_cast<common_t>(static_cast<common_t>(x) < static_cast<common_t>(y) ? x : y));
};
struct NullLogicalAnd {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(
TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(x && y)
{
bool lhs_false = lhs_valid && !x;
bool rhs_false = rhs_valid && !y;
bool both_valid = lhs_valid && rhs_valid;
output_valid = lhs_false || rhs_false || both_valid;
return both_valid && !lhs_false && !rhs_false;
}
// To allow std::is_invocable_v = true
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x && y);
};
struct NullLogicalOr {
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(
TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(x || y)
{
bool lhs_true = lhs_valid && x;
bool rhs_true = rhs_valid && y;
bool both_valid = lhs_valid && rhs_valid;
output_valid = lhs_true || rhs_true || both_valid;
return lhs_true || rhs_true;
}
// To allow std::is_invocable_v = true
template <typename TypeLhs, typename TypeRhs>
__device__ inline auto operator()(TypeLhs x, TypeRhs y) -> decltype(x || y);
};
} // namespace ops
} // namespace compiled
} // namespace binops
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/PyMod.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::PyMod>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/binary_ops.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.hpp"
#include "operation.cuh"
#include "struct_binary_ops.cuh"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/detail/strings_children.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace binops {
namespace compiled {
namespace {
/**
* @brief Converts scalar to column_view with single element.
*
* @return pair with column_view and column containing any auxiliary data to create column_view from
* scalar
*/
struct scalar_as_column_view {
using return_type = typename std::pair<column_view, std::unique_ptr<column>>;
template <typename T, CUDF_ENABLE_IF(is_fixed_width<T>())>
return_type operator()(scalar const& s,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource*)
{
auto& h_scalar_type_view = static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(s));
auto col_v = column_view(s.type(),
1,
h_scalar_type_view.data(),
reinterpret_cast<bitmask_type const*>(s.validity_data()),
!s.is_valid(stream));
return std::pair{col_v, std::unique_ptr<column>(nullptr)};
}
template <typename T, CUDF_ENABLE_IF(!is_fixed_width<T>())>
return_type operator()(scalar const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
{
CUDF_FAIL("Unsupported type");
}
};
// specialization for cudf::string_view
template <>
scalar_as_column_view::return_type scalar_as_column_view::operator()<cudf::string_view>(
scalar const& s, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
using T = cudf::string_view;
auto& h_scalar_type_view = static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(s));
// build offsets column from the string size
auto offsets_transformer_itr =
thrust::make_constant_iterator<size_type>(h_scalar_type_view.size());
auto offsets_column = std::get<0>(cudf::detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + 1, stream, mr));
auto chars_column_v = column_view(
data_type{type_id::INT8}, h_scalar_type_view.size(), h_scalar_type_view.data(), nullptr, 0);
// Construct string column_view
auto col_v = column_view(s.type(),
1,
nullptr,
reinterpret_cast<bitmask_type const*>(s.validity_data()),
static_cast<size_type>(!s.is_valid(stream)),
0,
{offsets_column->view(), chars_column_v});
return std::pair{col_v, std::move(offsets_column)};
}
// specializing for struct column
template <>
scalar_as_column_view::return_type scalar_as_column_view::operator()<cudf::struct_view>(
scalar const& s, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
auto col = make_column_from_scalar(s, 1, stream, mr);
return std::pair{col->view(), std::move(col)};
}
/**
* @brief Converts scalar to column_view with single element.
*
* @param scal scalar to convert
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return pair with column_view and column containing any auxiliary data to create
* column_view from scalar
*/
auto scalar_to_column_view(
scalar const& scal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
return type_dispatcher(scal.type(), scalar_as_column_view{}, scal, stream, mr);
}
// This functor does the actual comparison between string column value and a scalar string
// or between two string column values using a comparator
template <typename LhsDeviceViewT, typename RhsDeviceViewT, typename OutT, typename CompareFunc>
struct compare_functor {
LhsDeviceViewT const lhs_dev_view_; // Scalar or a column device view - lhs
RhsDeviceViewT const rhs_dev_view_; // Scalar or a column device view - rhs
CompareFunc const cfunc_; // Comparison function
compare_functor(LhsDeviceViewT const& lhs_dev_view,
RhsDeviceViewT const& rhs_dev_view,
CompareFunc cf)
: lhs_dev_view_(lhs_dev_view), rhs_dev_view_(rhs_dev_view), cfunc_(cf)
{
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
__device__ inline std::enable_if_t<std::is_same_v<LhsViewT, column_device_view> &&
!std::is_same_v<RhsViewT, column_device_view>,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid() ? rhs_dev_view_.value() : cudf::string_view{});
}
// This is used to compare a scalar and a column value
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
__device__ inline std::enable_if_t<!std::is_same_v<LhsViewT, column_device_view> &&
std::is_same_v<RhsViewT, column_device_view>,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid() ? lhs_dev_view_.value() : cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
// This is used to compare 2 column values
template <typename LhsViewT = LhsDeviceViewT, typename RhsViewT = RhsDeviceViewT>
__device__ inline std::enable_if_t<std::is_same_v<LhsViewT, column_device_view> &&
std::is_same_v<RhsViewT, column_device_view>,
OutT>
operator()(cudf::size_type i) const
{
return cfunc_(lhs_dev_view_.is_valid(i),
rhs_dev_view_.is_valid(i),
lhs_dev_view_.is_valid(i) ? lhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{},
rhs_dev_view_.is_valid(i) ? rhs_dev_view_.template element<cudf::string_view>(i)
: cudf::string_view{});
}
};
// This functor performs null aware binop between two columns or a column and a scalar by
// iterating over them on the device
struct null_considering_binop {
[[nodiscard]] auto get_device_view(cudf::scalar const& scalar_item) const
{
return get_scalar_device_view(
static_cast<cudf::scalar_type_t<cudf::string_view>&>(const_cast<scalar&>(scalar_item)));
}
[[nodiscard]] auto get_device_view(column_device_view const& col_item) const { return col_item; }
template <typename LhsViewT, typename RhsViewT, typename OutT, typename CompareFunc>
void populate_out_col(LhsViewT const& lhsv,
RhsViewT const& rhsv,
cudf::size_type col_size,
rmm::cuda_stream_view stream,
CompareFunc cfunc,
OutT* out_col) const
{
// Create binop functor instance
compare_functor<LhsViewT, RhsViewT, OutT, CompareFunc> binop_func{lhsv, rhsv, cfunc};
// Execute it on every element
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(col_size),
out_col,
binop_func);
}
// This is invoked to perform comparison between cudf string types
template <typename LhsT, typename RhsT>
std::unique_ptr<column> operator()(LhsT const& lhs,
RhsT const& rhs,
binary_operator op,
data_type output_type,
cudf::size_type col_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Create device views for inputs
auto const lhs_dev_view = get_device_view(lhs);
auto const rhs_dev_view = get_device_view(rhs);
// Validate input
CUDF_EXPECTS(output_type.id() == lhs.type().id(),
"Output column type should match input column type");
// Shallow copy of the resultant strings
rmm::device_uvector<cudf::string_view> out_col_strings(col_size, stream);
// Invalid output column strings - null rows
cudf::string_view const invalid_str{nullptr, 0};
// Create a compare function lambda
auto minmax_func =
[op, invalid_str] __device__(
bool lhs_valid, bool rhs_valid, cudf::string_view lhs_value, cudf::string_view rhs_value) {
if (!lhs_valid && !rhs_valid)
return invalid_str;
else if (lhs_valid && rhs_valid) {
return (op == binary_operator::NULL_MAX)
? thrust::maximum<cudf::string_view>()(lhs_value, rhs_value)
: thrust::minimum<cudf::string_view>()(lhs_value, rhs_value);
} else if (lhs_valid)
return lhs_value;
else
return rhs_value;
};
// Populate output column
populate_out_col(
lhs_dev_view, rhs_dev_view, col_size, stream, minmax_func, out_col_strings.data());
// Create an output column with the resultant strings
return cudf::make_strings_column(out_col_strings, invalid_str, stream, mr);
}
};
} // namespace
std::unique_ptr<column> string_null_min_max(scalar const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(op == binary_operator::NULL_MAX or op == binary_operator::NULL_MIN,
"Unsupported binary operation");
if (rhs.is_empty()) return cudf::make_empty_column(output_type);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(lhs, *rhs_device_view, op, output_type, rhs.size(), stream, mr);
}
std::unique_ptr<column> string_null_min_max(column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(op == binary_operator::NULL_MAX or op == binary_operator::NULL_MIN,
"Unsupported binary operation");
if (lhs.is_empty()) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
return null_considering_binop{}(*lhs_device_view, rhs, op, output_type, lhs.size(), stream, mr);
}
std::unique_ptr<column> string_null_min_max(column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::type_id::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(op == binary_operator::NULL_MAX or op == binary_operator::NULL_MIN,
"Unsupported binary operation");
CUDF_EXPECTS(lhs.size() == rhs.size(), "Column sizes do not match");
if (lhs.is_empty()) return cudf::make_empty_column(output_type);
auto lhs_device_view = cudf::column_device_view::create(lhs, stream);
auto rhs_device_view = cudf::column_device_view::create(rhs, stream);
return null_considering_binop{}(
*lhs_device_view, *rhs_device_view, op, output_type, lhs.size(), stream, mr);
}
void operator_dispatcher(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
binary_operator op,
rmm::cuda_stream_view stream)
{
// clang-format off
switch (op) {
case binary_operator::ADD: apply_binary_op<ops::Add>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::SUB: apply_binary_op<ops::Sub>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::MUL: apply_binary_op<ops::Mul>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::DIV: apply_binary_op<ops::Div>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::TRUE_DIV: apply_binary_op<ops::TrueDiv>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::FLOOR_DIV: apply_binary_op<ops::FloorDiv>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::MOD: apply_binary_op<ops::Mod>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::PYMOD: apply_binary_op<ops::PyMod>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::POW: apply_binary_op<ops::Pow>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::INT_POW: apply_binary_op<ops::IntPow>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::EQUAL:
case binary_operator::NOT_EQUAL:
if(out.type().id() != type_id::BOOL8) CUDF_FAIL("Output type of Comparison operator should be bool type");
dispatch_equality_op(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, op, stream); break;
case binary_operator::LESS: apply_binary_op<ops::Less>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::GREATER: apply_binary_op<ops::Greater>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::LESS_EQUAL: apply_binary_op<ops::LessEqual>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::GREATER_EQUAL: apply_binary_op<ops::GreaterEqual>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::BITWISE_AND: apply_binary_op<ops::BitwiseAnd>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::BITWISE_OR: apply_binary_op<ops::BitwiseOr>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::BITWISE_XOR: apply_binary_op<ops::BitwiseXor>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::LOGICAL_AND: apply_binary_op<ops::LogicalAnd>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::LOGICAL_OR: apply_binary_op<ops::LogicalOr>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
/*
case binary_operator::GENERIC_BINARY: // Cannot be compiled, should be called by jit::binary_operation
*/
case binary_operator::SHIFT_LEFT: apply_binary_op<ops::ShiftLeft>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::SHIFT_RIGHT: apply_binary_op<ops::ShiftRight>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::SHIFT_RIGHT_UNSIGNED: apply_binary_op<ops::ShiftRightUnsigned>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::LOG_BASE: apply_binary_op<ops::LogBase>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::ATAN2: apply_binary_op<ops::ATan2>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::PMOD: apply_binary_op<ops::PMod>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::NULL_EQUALS: apply_binary_op<ops::NullEquals>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::NULL_MAX: apply_binary_op<ops::NullMax>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::NULL_MIN: apply_binary_op<ops::NullMin>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::NULL_LOGICAL_AND: apply_binary_op<ops::NullLogicalAnd>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
case binary_operator::NULL_LOGICAL_OR: apply_binary_op<ops::NullLogicalOr>(out, lhs, rhs, is_lhs_scalar, is_rhs_scalar, stream); break;
default:;
}
// clang-format on
}
// vector_vector
void binary_operation(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
binary_operator op,
rmm::cuda_stream_view stream)
{
operator_dispatcher(out, lhs, rhs, false, false, op, stream);
}
// scalar_vector
void binary_operation(mutable_column_view& out,
scalar const& lhs,
column_view const& rhs,
binary_operator op,
rmm::cuda_stream_view stream)
{
auto [lhsv, aux] = scalar_to_column_view(lhs, stream);
operator_dispatcher(out, lhsv, rhs, true, false, op, stream);
}
// vector_scalar
void binary_operation(mutable_column_view& out,
column_view const& lhs,
scalar const& rhs,
binary_operator op,
rmm::cuda_stream_view stream)
{
auto [rhsv, aux] = scalar_to_column_view(rhs, stream);
operator_dispatcher(out, lhs, rhsv, false, true, op, stream);
}
namespace detail {
void apply_sorting_struct_binary_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
binary_operator op,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(lhs.type().id() == type_id::STRUCT && rhs.type().id() == type_id::STRUCT,
"Both columns must be struct columns");
CUDF_EXPECTS(!cudf::structs::detail::is_or_has_nested_lists(lhs) and
!cudf::structs::detail::is_or_has_nested_lists(rhs),
"List type is not supported");
// Struct child column type and structure mismatches are caught within the two_table_comparator
switch (op) {
case binary_operator::EQUAL: [[fallthrough]];
case binary_operator::NULL_EQUALS: [[fallthrough]];
case binary_operator::NOT_EQUAL:
detail::apply_struct_equality_op(
out,
lhs,
rhs,
is_lhs_scalar,
is_rhs_scalar,
op,
cudf::experimental::row::equality::nan_equal_physical_equality_comparator{},
stream);
break;
case binary_operator::LESS:
detail::apply_struct_binary_op<ops::Less>(
out,
lhs,
rhs,
is_lhs_scalar,
is_rhs_scalar,
cudf::experimental::row::lexicographic::sorting_physical_element_comparator{},
stream);
break;
case binary_operator::GREATER:
detail::apply_struct_binary_op<ops::Greater>(
out,
lhs,
rhs,
is_lhs_scalar,
is_rhs_scalar,
cudf::experimental::row::lexicographic::sorting_physical_element_comparator{},
stream);
break;
case binary_operator::LESS_EQUAL:
detail::apply_struct_binary_op<ops::LessEqual>(
out,
lhs,
rhs,
is_lhs_scalar,
is_rhs_scalar,
cudf::experimental::row::lexicographic::sorting_physical_element_comparator{},
stream);
break;
case binary_operator::GREATER_EQUAL:
detail::apply_struct_binary_op<ops::GreaterEqual>(
out,
lhs,
rhs,
is_lhs_scalar,
is_rhs_scalar,
cudf::experimental::row::lexicographic::sorting_physical_element_comparator{},
stream);
break;
default: CUDF_FAIL("Unsupported operator for structs");
}
}
} // namespace detail
} // namespace compiled
} // namespace binops
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/ShiftLeft.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::ShiftLeft>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/NullLogicalOr.cu
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::NullLogicalOr>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
} // namespace cudf::binops::compiled
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/equality_ops.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
void dispatch_equality_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
binary_operator op,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(op == binary_operator::EQUAL || op == binary_operator::NOT_EQUAL,
"Unsupported operator for these types",
cudf::data_type_error);
auto common_dtype = get_common_type(out.type(), lhs.type(), rhs.type());
auto outd = mutable_column_device_view::create(out, stream);
auto lhsd = column_device_view::create(lhs, stream);
auto rhsd = column_device_view::create(rhs, stream);
if (common_dtype) {
if (op == binary_operator::EQUAL) {
for_each(stream,
out.size(),
binary_op_device_dispatcher<ops::Equal>{
*common_dtype, *outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
} else if (op == binary_operator::NOT_EQUAL) {
for_each(stream,
out.size(),
binary_op_device_dispatcher<ops::NotEqual>{
*common_dtype, *outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
}
} else {
if (op == binary_operator::EQUAL) {
for_each(stream,
out.size(),
binary_op_double_device_dispatcher<ops::Equal>{
*outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
} else if (op == binary_operator::NOT_EQUAL) {
for_each(stream,
out.size(),
binary_op_double_device_dispatcher<ops::NotEqual>{
*outd, *lhsd, *rhsd, is_lhs_scalar, is_rhs_scalar});
}
}
}
} // namespace cudf::binops::compiled
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/Greater.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::Greater>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/Mod.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::Mod>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/PMod.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::PMod>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/TrueDiv.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::TrueDiv>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/struct_binary_ops.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "binary_ops.hpp"
#include "operation.cuh"
#include <cudf/binaryop.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/tabulate.h>
namespace cudf::binops::compiled::detail {
template <class T, class... Ts>
inline constexpr bool is_any_v = std::disjunction<std::is_same<T, Ts>...>::value;
template <typename OptionalIterator, typename DeviceComparator>
struct device_comparison_functor {
// Explicit constructor definition required to avoid a "no instance of constructor" compilation
// error
device_comparison_functor(OptionalIterator const optional_iter,
bool const is_lhs_scalar,
bool const is_rhs_scalar,
DeviceComparator const& comparator)
: _optional_iter(optional_iter),
_is_lhs_scalar(is_lhs_scalar),
_is_rhs_scalar(is_rhs_scalar),
_comparator(comparator)
{
}
bool __device__ operator()(size_type i)
{
return _optional_iter[i].has_value() &&
_comparator(cudf::experimental::row::lhs_index_type{_is_lhs_scalar ? 0 : i},
cudf::experimental::row::rhs_index_type{_is_rhs_scalar ? 0 : i});
}
OptionalIterator const _optional_iter;
bool const _is_lhs_scalar;
bool const _is_rhs_scalar;
DeviceComparator const _comparator;
};
template <class BinaryOperator,
typename PhysicalElementComparator =
cudf::experimental::row::lexicographic::sorting_physical_element_comparator>
void apply_struct_binary_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
PhysicalElementComparator comparator,
rmm::cuda_stream_view stream)
{
auto const compare_orders = std::vector<order>(
lhs.size(),
is_any_v<BinaryOperator, ops::Greater, ops::GreaterEqual> ? order::DESCENDING
: order::ASCENDING);
auto const tlhs = table_view{{lhs}};
auto const trhs = table_view{{rhs}};
auto const table_comparator = cudf::experimental::row::lexicographic::two_table_comparator{
tlhs, trhs, compare_orders, {}, stream};
auto outd = column_device_view::create(out, stream);
auto optional_iter =
cudf::detail::make_optional_iterator<bool>(*outd, nullate::DYNAMIC{out.has_nulls()});
auto const comparator_nulls = nullate::DYNAMIC{has_nested_nulls(tlhs) || has_nested_nulls(trhs)};
auto tabulate_device_operator = [&](auto device_comparator) {
thrust::tabulate(
rmm::exec_policy(stream),
out.begin<bool>(),
out.end<bool>(),
device_comparison_functor{optional_iter, is_lhs_scalar, is_rhs_scalar, device_comparator});
};
if (cudf::detail::has_nested_columns(tlhs) || cudf::detail::has_nested_columns(trhs)) {
is_any_v<BinaryOperator, ops::LessEqual, ops::GreaterEqual>
? tabulate_device_operator(
table_comparator.less_equivalent<true>(comparator_nulls, comparator))
: tabulate_device_operator(table_comparator.less<true>(comparator_nulls, comparator));
} else {
is_any_v<BinaryOperator, ops::LessEqual, ops::GreaterEqual>
? tabulate_device_operator(
table_comparator.less_equivalent<false>(comparator_nulls, comparator))
: tabulate_device_operator(table_comparator.less<false>(comparator_nulls, comparator));
}
}
template <typename OptionalIteratorType, typename DeviceComparatorType>
struct struct_equality_functor {
struct_equality_functor(OptionalIteratorType optional_iter,
DeviceComparatorType device_comparator,
bool is_lhs_scalar,
bool is_rhs_scalar,
bool preserve_output)
: _optional_iter(optional_iter),
_device_comparator(device_comparator),
_is_lhs_scalar(is_lhs_scalar),
_is_rhs_scalar(is_rhs_scalar),
_preserve_output(preserve_output)
{
}
auto __device__ operator()(size_type i) const noexcept
{
auto const lhs = cudf::experimental::row::lhs_index_type{_is_lhs_scalar ? 0 : i};
auto const rhs = cudf::experimental::row::rhs_index_type{_is_rhs_scalar ? 0 : i};
return _optional_iter[i].has_value() and (_device_comparator(lhs, rhs) == _preserve_output);
}
private:
OptionalIteratorType _optional_iter;
DeviceComparatorType _device_comparator;
bool _is_lhs_scalar;
bool _is_rhs_scalar;
bool _preserve_output;
};
template <typename PhysicalEqualityComparator =
cudf::experimental::row::equality::physical_equality_comparator>
void apply_struct_equality_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
binary_operator op,
PhysicalEqualityComparator comparator,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(op == binary_operator::EQUAL || op == binary_operator::NOT_EQUAL ||
op == binary_operator::NULL_EQUALS,
"Unsupported operator for these types",
cudf::data_type_error);
auto tlhs = table_view{{lhs}};
auto trhs = table_view{{rhs}};
auto table_comparator =
cudf::experimental::row::equality::two_table_comparator{tlhs, trhs, stream};
auto outd = column_device_view::create(out, stream);
auto optional_iter =
cudf::detail::make_optional_iterator<bool>(*outd, nullate::DYNAMIC{out.has_nulls()});
auto const comparator_helper = [&](auto const device_comparator) {
thrust::tabulate(rmm::exec_policy(stream),
out.begin<bool>(),
out.end<bool>(),
struct_equality_functor<decltype(optional_iter), decltype(device_comparator)>(
optional_iter,
device_comparator,
is_lhs_scalar,
is_rhs_scalar,
op != binary_operator::NOT_EQUAL));
};
if (cudf::detail::has_nested_columns(tlhs) or cudf::detail::has_nested_columns(trhs)) {
auto device_comparator = table_comparator.equal_to<true>(
nullate::DYNAMIC{has_nested_nulls(tlhs) || has_nested_nulls(trhs)},
null_equality::EQUAL,
comparator);
comparator_helper(device_comparator);
} else {
auto device_comparator = table_comparator.equal_to<false>(
nullate::DYNAMIC{has_nested_nulls(tlhs) || has_nested_nulls(trhs)},
null_equality::EQUAL,
comparator);
comparator_helper(device_comparator);
}
}
} // namespace cudf::binops::compiled::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/Add.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::Add>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
}
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/compiled/NullLogicalAnd.cu
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "binary_ops.cuh"
namespace cudf::binops::compiled {
template void apply_binary_op<ops::NullLogicalAnd>(mutable_column_view&,
column_view const&,
column_view const&,
bool is_lhs_scalar,
bool is_rhs_scalar,
rmm::cuda_stream_view);
} // namespace cudf::binops::compiled
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/jit/kernel.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Copyright 2018-2019 BlazingDB, Inc.
* Copyright 2018 Christian Noboa Mardini <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <binaryop/jit/operation-udf.hpp>
#include <cuda/std/type_traits>
namespace cudf {
namespace binops {
namespace jit {
struct UserDefinedOp {
template <typename TypeOut, typename TypeLhs, typename TypeRhs>
static TypeOut operate(TypeLhs x, TypeRhs y)
{
TypeOut output;
using TypeCommon = typename cuda::std::common_type<TypeOut, TypeLhs, TypeRhs>::type;
GENERIC_BINARY_OP(&output, static_cast<TypeCommon>(x), static_cast<TypeCommon>(y));
return output;
}
};
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(lhs_data[i], rhs_data[i]);
}
}
template <typename TypeOut, typename TypeLhs, typename TypeRhs, typename TypeOpe>
__global__ void kernel_v_v_with_validity(cudf::size_type size,
TypeOut* out_data,
TypeLhs* lhs_data,
TypeRhs* rhs_data,
cudf::bitmask_type* output_mask,
cudf::bitmask_type const* lhs_mask,
cudf::size_type lhs_offset,
cudf::bitmask_type const* rhs_mask,
cudf::size_type rhs_offset)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (cudf::size_type i = start; i < size; i += step) {
bool output_valid = false;
out_data[i] = TypeOpe::template operate<TypeOut, TypeLhs, TypeRhs>(
lhs_data[i],
rhs_data[i],
lhs_mask ? cudf::bit_is_set(lhs_mask, lhs_offset + i) : true,
rhs_mask ? cudf::bit_is_set(rhs_mask, rhs_offset + i) : true,
output_valid);
if (output_mask && !output_valid) cudf::clear_bit(output_mask, i);
}
}
} // namespace jit
} // namespace binops
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/binaryop
|
rapidsai_public_repos/cudf/cpp/src/binaryop/jit/operation-udf.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// This file serves as a placeholder for user defined functions, so jitify can choose to override it
// at runtime.
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/encode.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
/**
* @copydoc cudf::dictionary::encode
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> encode(column_view const& input_column,
data_type indices_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_unsigned(indices_type), "indices must be type unsigned integer");
CUDF_EXPECTS(input_column.type().id() != type_id::DICTIONARY32,
"cannot encode a dictionary from a dictionary");
auto codified = cudf::detail::encode(cudf::table_view({input_column}), stream, mr);
auto keys_table = std::move(codified.first);
auto indices_column = std::move(codified.second);
auto keys_column = std::move(keys_table->release().front());
if (keys_column->has_nulls()) {
keys_column = std::make_unique<column>(
cudf::detail::slice(
keys_column->view(), std::vector<size_type>{0, keys_column->size() - 1}, stream)
.front(),
stream,
mr);
keys_column->set_null_mask(rmm::device_buffer{0, stream, mr}, 0); // remove the null-mask
}
// the encode() returns INT32 for indices
if (indices_column->type().id() != indices_type.id())
indices_column = cudf::detail::cast(indices_column->view(), indices_type, stream, mr);
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
cudf::detail::copy_bitmask(input_column, stream, mr),
input_column.null_count());
}
/**
* @copydoc cudf::dictionary::detail::get_indices_type_for_size
*/
data_type get_indices_type_for_size(size_type keys_size)
{
if (keys_size <= std::numeric_limits<uint8_t>::max()) return data_type{type_id::UINT8};
if (keys_size <= std::numeric_limits<uint16_t>::max()) return data_type{type_id::UINT16};
return data_type{type_id::UINT32};
}
} // namespace detail
// external API
std::unique_ptr<column> encode(column_view const& input_column,
data_type indices_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::encode(input_column, indices_type, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/replace.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief This utility uses `copy_if_else` to replace null entries using the input bitmask as a
* predicate.
*
* The predicate identifies which column row to copy from and the bitmask specifies which rows
* are null. Since the `copy_if_else` accepts iterators, we also supply it with pair-iterators
* created from indexalators and the validity masks.
*
* @tparam ReplacementItr must be a pair iterator of (index,valid).
*
* @param input lhs for `copy_if_else`
* @param replacement_iter rhs for `copy_if_else`
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return Always returns column of type INT32 (size_type)
*/
template <typename ReplacementIter>
std::unique_ptr<column> replace_indices(column_view const& input,
ReplacementIter replacement_iter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const input_view = column_device_view::create(input, stream);
auto const d_input = *input_view;
auto predicate = [d_input] __device__(auto i) { return d_input.is_valid(i); };
auto input_iterator = cudf::detail::indexalator_factory::make_input_optional_iterator(input);
return cudf::detail::copy_if_else(true,
input_iterator,
input_iterator + input.size(),
replacement_iter,
predicate,
data_type{type_to_id<size_type>()},
stream,
mr);
}
} // namespace
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::column_view
* const& rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
dictionary_column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input.parent(), stream, mr); }
CUDF_EXPECTS(input.keys().type() == replacement.keys().type(), "keys must match");
CUDF_EXPECTS(replacement.size() == input.size(), "column sizes must match");
// first combine the keys so both input dictionaries have the same set
auto matched =
match_dictionaries(std::vector<dictionary_column_view>({input, replacement}), stream, mr);
// now build the new indices by doing replace-null using the updated input indices
auto const input_indices =
dictionary_column_view(matched.front()->view()).get_indices_annotated();
auto const repl_indices = dictionary_column_view(matched.back()->view()).get_indices_annotated();
auto new_indices =
replace_indices(input_indices,
cudf::detail::indexalator_factory::make_input_optional_iterator(repl_indices),
stream,
mr);
return make_dictionary_column(
std::move(matched.front()->release().children.back()), std::move(new_indices), stream, mr);
}
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::scalar
* const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls() || !replacement.is_valid(stream)) {
return std::make_unique<cudf::column>(input.parent(), stream, mr);
}
CUDF_EXPECTS(input.keys().type() == replacement.type(), "keys must match scalar type");
// first add the replacement to the keys so only the indices need to be processed
auto input_matched = dictionary::detail::add_keys(
input, make_column_from_scalar(replacement, 1, stream)->view(), stream, mr);
auto const input_view = dictionary_column_view(input_matched->view());
auto const scalar_index =
get_index(input_view, replacement, stream, rmm::mr::get_current_device_resource());
// now build the new indices by doing replace-null on the updated indices
auto const input_indices = input_view.get_indices_annotated();
auto new_indices =
replace_indices(input_indices,
cudf::detail::indexalator_factory::make_input_optional_iterator(*scalar_index),
stream,
mr);
new_indices->set_null_mask(rmm::device_buffer{0, stream, mr}, 0);
return make_dictionary_column(
std::move(input_matched->release().children.back()), std::move(new_indices), stream, mr);
}
} // namespace detail
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/dictionary_column_view.cpp
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/utilities/error.hpp>
namespace cudf {
//
dictionary_column_view::dictionary_column_view(column_view const& dictionary_column)
: column_view(dictionary_column)
{
CUDF_EXPECTS(type().id() == type_id::DICTIONARY32,
"dictionary_column_view only supports DICTIONARY type");
if (size() > 0) CUDF_EXPECTS(num_children() == 2, "dictionary column has no children");
}
column_view dictionary_column_view::parent() const noexcept
{
return static_cast<column_view>(*this);
}
column_view dictionary_column_view::indices() const noexcept { return child(0); }
column_view dictionary_column_view::get_indices_annotated() const noexcept
{
return column_view(
indices().type(), size(), indices().head(), null_mask(), null_count(), offset());
}
column_view dictionary_column_view::keys() const noexcept { return child(1); }
size_type dictionary_column_view::keys_size() const noexcept
{
return (size() == 0) ? 0 : keys().size();
}
data_type dictionary_column_view::keys_type() const noexcept
{
return (size() == 0) ? data_type{type_id::EMPTY} : keys().type();
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/dictionary_factories.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace {
struct dispatch_create_indices {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(std::is_unsigned<IndexType>(), "indices must be an unsigned type");
column_view indices_view{
indices.type(), indices.size(), indices.data<IndexType>(), nullptr, 0, indices.offset()};
return std::make_unique<column>(indices_view, stream, mr);
}
template <typename IndexType, std::enable_if_t<!is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("indices must be an integer type.");
}
};
} // namespace
std::unique_ptr<column> make_dictionary_column(column_view const& keys_column,
column_view const& indices_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!keys_column.has_nulls(), "keys column must not have nulls");
if (keys_column.is_empty()) return make_empty_column(type_id::DICTIONARY32);
auto keys_copy = std::make_unique<column>(keys_column, stream, mr);
auto indices_copy =
type_dispatcher(indices_column.type(), dispatch_create_indices{}, indices_column, stream, mr);
rmm::device_buffer null_mask{0, stream, mr};
auto null_count = indices_column.null_count();
if (null_count) null_mask = detail::copy_bitmask(indices_column, stream, mr);
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(indices_copy));
children.emplace_back(std::move(keys_copy));
return std::make_unique<column>(data_type{type_id::DICTIONARY32},
indices_column.size(),
rmm::device_buffer{0, stream, mr},
std::move(null_mask),
null_count,
std::move(children));
}
std::unique_ptr<column> make_dictionary_column(std::unique_ptr<column> keys_column,
std::unique_ptr<column> indices_column,
rmm::device_buffer&& null_mask,
size_type null_count)
{
CUDF_EXPECTS(!keys_column->has_nulls(), "keys column must not have nulls");
CUDF_EXPECTS(!indices_column->has_nulls(), "indices column must not have nulls");
CUDF_EXPECTS(is_unsigned(indices_column->type()), "indices must be type unsigned integer");
auto count = indices_column->size();
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(indices_column));
children.emplace_back(std::move(keys_column));
return std::make_unique<column>(data_type{type_id::DICTIONARY32},
count,
rmm::device_buffer{},
std::move(null_mask),
null_count,
std::move(children));
}
namespace {
/**
* @brief This functor maps signed type_ids to unsigned counterparts.
*/
struct make_unsigned_fn {
template <typename T, std::enable_if_t<is_index_type<T>()>* = nullptr>
constexpr cudf::type_id operator()()
{
return cudf::type_to_id<std::make_unsigned_t<T>>();
}
template <typename T, std::enable_if_t<not is_index_type<T>()>* = nullptr>
constexpr cudf::type_id operator()()
{
return cudf::type_to_id<T>();
}
};
} // namespace
std::unique_ptr<column> make_dictionary_column(std::unique_ptr<column> keys,
std::unique_ptr<column> indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!keys->has_nulls(), "keys column must not have nulls");
// signed integer data can be used directly in the unsigned indices column
auto const indices_type = cudf::type_dispatcher(indices->type(), make_unsigned_fn{});
auto const indices_size = indices->size(); // these need to be saved
auto const null_count = indices->null_count(); // before calling release()
auto contents = indices->release();
// compute the indices type using the size of the key set
auto const new_type = dictionary::detail::get_indices_type_for_size(keys->size());
// create the dictionary indices: convert to unsigned and remove nulls
auto indices_column = [&] {
// If the types match, then just commandeer the column's data buffer.
if (new_type.id() == indices_type) {
return std::make_unique<column>(
new_type, indices_size, std::move(*(contents.data.release())), rmm::device_buffer{}, 0);
}
// If the new type does not match, then convert the data.
cudf::column_view cast_view{
cudf::data_type{indices_type}, indices_size, contents.data->data(), nullptr, 0};
return cudf::detail::cast(cast_view, new_type, stream, mr);
}();
return make_dictionary_column(std::move(keys),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/remove_keys.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scatter.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief Return a new dictionary by removing identified keys from the provided dictionary.
*
* This is a common utility for `remove_keys` and `remove_unused_keys` detail functions.
* It will create a new dictionary with the remaining keys and create new indices values
* to go with these new keys.
*
* @tparam KeysKeeper Function bool(size_type) that takes keys position index
* and returns true if that key is to be used in the output dictionary.
* @param dictionary_column The column to use for creating the new dictionary.
* @param keys_to_keep_fn Called to determine which keys in `dictionary_column` to keep.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
*/
template <typename KeysKeeper>
std::unique_ptr<column> remove_keys_fn(dictionary_column_view const& dictionary_column,
KeysKeeper keys_to_keep_fn,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const keys_view = dictionary_column.keys();
auto const indices_type = dictionary_column.indices().type();
auto const max_size = dictionary_column.size();
// create/init indices map array
auto map_indices =
make_fixed_width_column(indices_type, keys_view.size(), mask_state::UNALLOCATED, stream);
auto map_itr =
cudf::detail::indexalator_factory::make_output_iterator(map_indices->mutable_view());
// init to max to identify new nulls
thrust::fill(rmm::exec_policy(stream),
map_itr,
map_itr + keys_view.size(),
max_size); // all valid indices are less than this value
// build keys column and indices map
std::unique_ptr<column> keys_column = [&] {
// create keys positions column to identify original key positions after removing they keys
auto keys_positions = [&] {
auto positions = make_fixed_width_column(
indices_type, keys_view.size(), cudf::mask_state::UNALLOCATED, stream);
auto itr = cudf::detail::indexalator_factory::make_output_iterator(positions->mutable_view());
thrust::sequence(rmm::exec_policy(stream), itr, itr + keys_view.size());
return positions;
}();
// copy the non-removed keys ( keys_to_keep_fn(idx)==true )
auto table_keys =
cudf::detail::copy_if(
table_view{{keys_view, keys_positions->view()}}, keys_to_keep_fn, stream, mr)
->release();
auto const filtered_view = table_keys[1]->view();
auto filtered_itr = cudf::detail::indexalator_factory::make_input_iterator(filtered_view);
auto positions_itr =
cudf::detail::indexalator_factory::make_input_iterator(keys_positions->view());
// build indices mapper
// Example scatter([0,1,2][0,2,4][max,max,max,max,max]) => [0,max,1,max,2]
thrust::scatter(rmm::exec_policy(stream),
positions_itr,
positions_itr + filtered_view.size(),
filtered_itr,
map_itr);
return std::move(table_keys.front());
}();
// create non-nullable indices view with offset applied -- this is used as a gather map
column_view indices_view(dictionary_column.indices().type(),
dictionary_column.size(),
dictionary_column.indices().head(),
nullptr,
0,
dictionary_column.offset());
// create new indices column
// Example: gather([0,max,1,max,2],[4,0,3,1,2,2,2,4,0]) => [2,0,max,max,1,1,1,2,0]
auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}},
indices_view,
cudf::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
std::unique_ptr<column> indices_column(std::move(table_indices.front()));
indices_column->set_null_mask(rmm::device_buffer{}, 0);
// compute new nulls -- merge the existing nulls with the newly created ones (value<0)
auto const offset = dictionary_column.offset();
auto d_null_mask = dictionary_column.null_mask();
auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(indices_column->view());
auto new_nulls = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(dictionary_column.size()),
[offset, d_null_mask, indices_itr, max_size] __device__(size_type idx) {
if (d_null_mask && !bit_is_set(d_null_mask, idx + offset)) return false;
return (indices_itr[idx] < max_size); // new nulls have max values
},
stream,
mr);
rmm::device_buffer new_null_mask =
(new_nulls.second > 0) ? std::move(new_nulls.first) : rmm::device_buffer{0, stream, mr};
// create column with keys_column and indices_column
return make_dictionary_column(
std::move(keys_column), std::move(indices_column), std::move(new_null_mask), new_nulls.second);
}
} // namespace
std::unique_ptr<column> remove_keys(dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!keys_to_remove.has_nulls(), "keys_to_remove must not have nulls");
auto const keys_view = dictionary_column.keys();
CUDF_EXPECTS(keys_view.type() == keys_to_remove.type(), "keys types must match");
// locate keys to remove by searching the keys column
auto const matches = cudf::detail::contains(keys_to_remove, keys_view, stream, mr);
auto d_matches = matches->view().data<bool>();
// call common utility method to keep the keys not matched to keys_to_remove
auto key_matcher = [d_matches] __device__(size_type idx) { return !d_matches[idx]; };
return remove_keys_fn(dictionary_column, key_matcher, stream, mr);
}
std::unique_ptr<column> remove_unused_keys(dictionary_column_view const& dictionary_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// locate the keys to remove
auto const keys_size = dictionary_column.keys_size();
column_view indices_view = dictionary_column.get_indices_annotated();
// search the indices values with key indices to look for any holes
auto const matches = [&] {
// build keys index to verify against indices values
rmm::device_uvector<uint32_t> keys_positions(keys_size, stream);
thrust::sequence(rmm::exec_policy(stream), keys_positions.begin(), keys_positions.end());
// wrap the indices for comparison in contains()
column_view keys_positions_view(
data_type{type_id::UINT32}, keys_size, keys_positions.data(), nullptr, 0);
return cudf::detail::contains(indices_view, keys_positions_view, stream, mr);
}();
auto d_matches = matches->view().data<bool>();
// call common utility method to keep the keys that match
auto key_matcher = [d_matches] __device__(size_type idx) { return d_matches[idx]; };
return remove_keys_fn(dictionary_column, key_matcher, stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<column> remove_keys(dictionary_column_view const& dictionary_column,
column_view const& keys_to_remove,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::remove_keys(dictionary_column, keys_to_remove, stream, mr);
}
std::unique_ptr<column> remove_unused_keys(dictionary_column_view const& dictionary_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::remove_unused_keys(dictionary_column, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/set_keys.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <iterator>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief Type-dispatch functor for remapping the old indices to new values based on the new
* key-set.
*
* The dispatch is based on the key type.
* The output column is the new indices column for the new dictionary column.
*/
struct dispatch_compute_indices {
template <typename Element>
std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>>
operator()(dictionary_column_view const& input,
column_view const& new_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = column_device_view::create(input.parent(), stream);
auto dictionary_itr = make_dictionary_iterator<Element>(*dictionary_view);
auto new_keys_view = column_device_view::create(new_keys, stream);
auto begin = new_keys_view->begin<Element>();
auto end = new_keys_view->end<Element>();
// create output indices column
auto result = make_numeric_column(get_indices_type_for_size(new_keys.size()),
input.size(),
mask_state::UNALLOCATED,
stream,
mr);
auto result_itr =
cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view());
#ifdef NDEBUG
thrust::lower_bound(rmm::exec_policy(stream),
begin,
end,
dictionary_itr,
dictionary_itr + input.size(),
result_itr,
thrust::less<Element>());
#else
// There is a problem with thrust::lower_bound and the output_indexalator
// https://github.com/NVIDIA/thrust/issues/1452; thrust team created nvbug 3322776
// This is a workaround.
thrust::transform(rmm::exec_policy(stream),
dictionary_itr,
dictionary_itr + input.size(),
result_itr,
[begin, end] __device__(auto key) {
auto itr = thrust::lower_bound(thrust::seq, begin, end, key);
return static_cast<size_type>(thrust::distance(begin, itr));
});
#endif
result->set_null_count(0);
return result;
}
template <typename Element, typename... Args>
std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>>
operator()(Args&&...)
{
CUDF_FAIL("dictionary set_keys not supported for this column type");
}
};
} // namespace
//
std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!new_keys.has_nulls(), "keys parameter must not have nulls");
auto keys = dictionary_column.keys();
CUDF_EXPECTS(keys.type() == new_keys.type(), "keys types must match");
// copy the keys -- use cudf::distinct to make sure there are no duplicates,
// then sort the results.
auto distinct_keys = cudf::detail::distinct(table_view{{new_keys}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_ANY,
null_equality::EQUAL,
nan_equality::ALL_EQUAL,
stream,
mr);
auto sorted_keys = cudf::detail::sort(distinct_keys->view(),
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::BEFORE},
stream,
mr)
->release();
std::unique_ptr<column> keys_column(std::move(sorted_keys.front()));
// compute the new nulls
auto matches = cudf::detail::contains(keys_column->view(), keys, stream, mr);
auto d_matches = matches->view().data<bool>();
auto indices_itr =
cudf::detail::indexalator_factory::make_input_iterator(dictionary_column.indices());
auto d_null_mask = dictionary_column.null_mask();
auto new_nulls = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(dictionary_column.offset()),
thrust::make_counting_iterator<size_type>(dictionary_column.offset() +
dictionary_column.size()),
[d_null_mask, indices_itr, d_matches] __device__(size_type idx) {
if (d_null_mask && !bit_is_set(d_null_mask, idx)) return false;
return d_matches[indices_itr[idx]];
},
stream,
mr);
// compute the new indices
auto indices_column = type_dispatcher(keys_column->type(),
dispatch_compute_indices{},
dictionary_column,
keys_column->view(),
stream,
mr);
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(new_nulls.first),
new_nulls.second);
}
std::vector<std::unique_ptr<column>> match_dictionaries(
cudf::host_span<dictionary_column_view const> input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<column_view> keys(input.size());
std::transform(input.begin(), input.end(), keys.begin(), [](auto& col) { return col.keys(); });
auto new_keys = cudf::detail::concatenate(keys, stream, rmm::mr::get_current_device_resource());
auto keys_view = new_keys->view();
std::vector<std::unique_ptr<column>> result(input.size());
std::transform(input.begin(), input.end(), result.begin(), [keys_view, mr, stream](auto& col) {
return set_keys(col, keys_view, stream, mr);
});
return result;
}
std::pair<std::vector<std::unique_ptr<column>>, std::vector<table_view>> match_dictionaries(
std::vector<table_view> tables, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
// Make a copy of all the column views from each table_view
std::vector<std::vector<column_view>> updated_columns;
std::transform(tables.begin(), tables.end(), std::back_inserter(updated_columns), [](auto& t) {
return std::vector<column_view>(t.begin(), t.end());
});
// Each column in a table must match in type.
// Once a dictionary column is found, all the corresponding column_views in the
// other table_views are matched. The matched column_views then replace the originals.
std::vector<std::unique_ptr<column>> dictionary_columns;
auto first_table = tables.front();
for (size_type col_idx = 0; col_idx < first_table.num_columns(); ++col_idx) {
auto col = first_table.column(col_idx);
if (col.type().id() == type_id::DICTIONARY32) {
std::vector<dictionary_column_view> dict_views; // hold all column_views at col_idx
std::transform(
tables.begin(), tables.end(), std::back_inserter(dict_views), [col_idx](auto& t) {
return dictionary_column_view(t.column(col_idx));
});
// now match the keys in these dictionary columns
auto dict_cols = dictionary::detail::match_dictionaries(dict_views, stream, mr);
// replace the updated_columns vector entries for the set of columns at col_idx
auto dict_col_idx = 0;
for (auto& v : updated_columns)
v[col_idx] = dict_cols[dict_col_idx++]->view();
// move the updated dictionary columns into the main output vector
std::move(dict_cols.begin(), dict_cols.end(), std::back_inserter(dictionary_columns));
}
}
// All the new column_views are in now updated_columns.
// Rebuild the table_views from the column_views.
std::vector<table_view> updated_tables;
std::transform(updated_columns.begin(),
updated_columns.end(),
std::back_inserter(updated_tables),
[](auto& v) { return table_view{v}; });
// Return the new dictionary columns and table_views
return {std::move(dictionary_columns), std::move(updated_tables)};
}
} // namespace detail
// external API
std::unique_ptr<column> set_keys(dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::set_keys(dictionary_column, keys, stream, mr);
}
std::vector<std::unique_ptr<column>> match_dictionaries(
cudf::host_span<dictionary_column_view const> input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::match_dictionaries(input, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/search.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/search.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
struct dispatch_scalar_index {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(size_type index,
bool is_valid,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return std::make_unique<numeric_scalar<IndexType>>(index, is_valid, stream, mr);
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("indices must be an integral type");
}
};
/**
* @brief Find index of a given key within a dictionary's keys column.
*
* The index is the position within the keys column where the given key (scalar) is found.
* The keys column is sorted and unique so only one value is expected.
* The result is an integer scalar identifying the index value.
* If the key is not found, the resulting scalar has `is_valid()=false`.
*/
struct find_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same_v<Element, dictionary32> and
not std::is_same_v<Element, list_view> and
not std::is_same_v<Element, struct_view>>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid(stream))
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::equal_range(
rmm::exec_policy(stream), keys_view->begin<Element>(), keys_view->end<Element>(), find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter.first),
(thrust::distance(iter.first, iter.second) > 0),
stream,
mr);
}
template <
typename Element,
std::enable_if_t<std::is_same_v<Element, dictionary32> or std::is_same_v<Element, list_view> or
std::is_same_v<Element, struct_view>>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL(
"dictionary, list_view, and struct_view columns cannot be the keys column of a dictionary");
}
};
struct find_insert_index_fn {
template <typename Element,
std::enable_if_t<not std::is_same_v<Element, dictionary32> and
not std::is_same_v<Element, list_view> and
not std::is_same_v<Element, struct_view>>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const& input,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
if (!key.is_valid(stream))
return type_dispatcher(input.indices().type(), dispatch_scalar_index{}, 0, false, stream, mr);
CUDF_EXPECTS(input.keys().type() == key.type(),
"search key type must match dictionary keys type");
using ScalarType = cudf::scalar_type_t<Element>;
auto find_key = static_cast<ScalarType const&>(key).value(stream);
auto keys_view = column_device_view::create(input.keys(), stream);
auto iter = thrust::lower_bound(
rmm::exec_policy(stream), keys_view->begin<Element>(), keys_view->end<Element>(), find_key);
return type_dispatcher(input.indices().type(),
dispatch_scalar_index{},
thrust::distance(keys_view->begin<Element>(), iter),
true,
stream,
mr);
}
template <
typename Element,
std::enable_if_t<std::is_same_v<Element, dictionary32> or std::is_same_v<Element, list_view> or
std::is_same_v<Element, struct_view>>* = nullptr>
std::unique_ptr<scalar> operator()(dictionary_column_view const&,
scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*) const
{
CUDF_FAIL("dictionary, list_view, and struct_view columns cannot be the keys for a dictionary");
}
};
} // namespace
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_index_fn(), dictionary, key, stream, mr);
}
std::unique_ptr<scalar> get_insert_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (dictionary.is_empty())
return std::make_unique<numeric_scalar<uint32_t>>(0, false, stream, mr);
return type_dispatcher<dispatch_storage_type>(
dictionary.keys().type(), find_insert_index_fn(), dictionary, key, stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<scalar> get_index(dictionary_column_view const& dictionary,
scalar const& key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_index(dictionary, key, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/add_keys.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/dictionary/update_keys.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
/**
* @brief Create a new dictionary column by adding the new keys elements
* to the existing dictionary_column.
*
* ```
* Example:
* d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}}
* d2 = add_keys( d1, [d, b, e] )
* d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]}
* ```
*/
std::unique_ptr<column> add_keys(dictionary_column_view const& dictionary_column,
column_view const& new_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!new_keys.has_nulls(), "Keys must not have nulls");
auto old_keys = dictionary_column.keys(); // [a,b,c,d,f]
CUDF_EXPECTS(new_keys.type() == old_keys.type(), "Keys must be the same type");
// first, concatenate the keys together
// [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e]
auto combined_keys = cudf::detail::concatenate(
std::vector<column_view>{old_keys, new_keys}, stream, rmm::mr::get_current_device_resource());
// Drop duplicates from the combined keys, then sort the result.
// sort(distinct([a,b,c,d,f,d,b,e])) = [a,b,c,d,e,f]
auto table_keys = cudf::detail::distinct(table_view{{combined_keys->view()}},
std::vector<size_type>{0}, // only one key column
duplicate_keep_option::KEEP_ANY,
null_equality::EQUAL,
nan_equality::ALL_EQUAL,
stream,
mr);
std::vector<order> column_order{order::ASCENDING};
std::vector<null_order> null_precedence{null_order::AFTER}; // should be no nulls here
auto sorted_keys =
cudf::detail::sort(table_keys->view(), column_order, null_precedence, stream, mr)->release();
std::unique_ptr<column> keys_column(std::move(sorted_keys.front()));
// create a map for the indices
// lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5]
auto map_indices = cudf::detail::lower_bound(table_view{{keys_column->view()}},
table_view{{old_keys}},
column_order,
null_precedence,
stream,
mr);
// now create the indices column -- map old values to the new ones
// gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0]
column_view indices_view(dictionary_column.indices().type(),
dictionary_column.size(),
dictionary_column.indices().head(),
nullptr,
0,
dictionary_column.offset());
// the result may contain nulls if the input contains nulls
// and the corresponding index is therefore invalid/undefined
auto table_indices = cudf::detail::gather(table_view{{map_indices->view()}},
indices_view,
cudf::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
// The output of lower_bound is INT32 but we need to convert to unsigned indices.
auto const indices_type = get_indices_type_for_size(keys_column->size());
auto indices_column = [&] {
column_view gather_result = table_indices.front()->view();
auto const indices_size = gather_result.size();
// we can just use the lower-bound/gather data directly for UINT32 case
if (indices_type.id() == type_id::UINT32) {
auto contents = table_indices.front()->release();
return std::make_unique<column>(data_type{type_id::UINT32},
indices_size,
std::move(*(contents.data.release())),
rmm::device_buffer{0, stream, mr},
0);
}
// otherwise we need to convert the gather result
column_view cast_view(gather_result.type(), indices_size, gather_result.head(), nullptr, 0);
return cudf::detail::cast(cast_view, indices_type, stream, mr);
}();
// create new dictionary column with keys_column and indices_column
// null mask has not changed
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
cudf::detail::copy_bitmask(dictionary_column.parent(), stream, mr),
dictionary_column.null_count());
}
} // namespace detail
std::unique_ptr<column> add_keys(dictionary_column_view const& dictionary_column,
column_view const& keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::add_keys(dictionary_column, keys, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/dictionary/decode.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace dictionary {
namespace detail {
/**
* @brief Decode a column from a dictionary.
*/
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (source.is_empty()) return make_empty_column(type_id::EMPTY);
column_view indices{source.indices().type(),
source.size(),
source.indices().head(),
nullptr, // no nulls for gather indices
0,
source.offset()};
// use gather to create the output column -- use ignore_out_of_bounds=true
auto table_column = cudf::detail::gather(table_view{{source.keys()}},
indices,
cudf::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
auto output_column = std::unique_ptr<column>(std::move(table_column.front()));
// apply any nulls to the output column
output_column->set_null_mask(cudf::detail::copy_bitmask(source.parent(), stream, mr),
source.null_count());
return output_column;
}
} // namespace detail
std::unique_ptr<column> decode(dictionary_column_view const& source,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::decode(source, stream, mr);
}
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/dictionary
|
rapidsai_public_repos/cudf/cpp/src/dictionary/detail/merge.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/merge.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace dictionary {
namespace detail {
std::unique_ptr<column> merge(dictionary_column_view const& lcol,
dictionary_column_view const& rcol,
cudf::detail::index_vector const& row_order,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const lcol_iter = cudf::detail::indexalator_factory::make_input_iterator(lcol.indices());
auto const rcol_iter = cudf::detail::indexalator_factory::make_input_iterator(rcol.indices());
// create output indices column
auto const merged_size = lcol.size() + rcol.size();
auto const indices_type = get_indices_type_for_size(merged_size);
auto indices_column =
make_fixed_width_column(indices_type, merged_size, cudf::mask_state::UNALLOCATED, stream, mr);
auto output_iter =
cudf::detail::indexalator_factory::make_output_iterator(indices_column->mutable_view());
// merge the input indices columns into the output column
thrust::transform(rmm::exec_policy(stream),
row_order.begin(),
row_order.end(),
output_iter,
[lcol_iter, rcol_iter] __device__(auto const& index_pair) {
auto const [side, index] = index_pair;
return side == cudf::detail::side::LEFT ? lcol_iter[index] : rcol_iter[index];
});
// build dictionary; the validity mask is updated by the caller
return make_dictionary_column(
std::make_unique<column>(lcol.keys(), stream, mr),
std::move(indices_column),
cudf::detail::create_null_mask(
lcol.has_nulls() || rcol.has_nulls() ? static_cast<size_t>(merged_size) : 0,
mask_state::UNINITIALIZED,
stream,
mr),
lcol.null_count() + rcol.null_count());
}
} // namespace detail
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/dictionary
|
rapidsai_public_repos/cudf/cpp/src/dictionary/detail/concatenate.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <vector>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief Keys and indices offsets values.
*
* The first value is the keys offsets and the second values is the indices offsets.
* These are offsets to the beginning of each input column after concatenating.
*/
using offsets_pair = thrust::pair<size_type, size_type>;
/**
* @brief Utility for calculating the offsets for the concatenated child columns
* of the output dictionary column.
*/
struct compute_children_offsets_fn {
/**
* @brief Create the utility functor.
*
* The columns vector is converted into vector of column_view pointers so they
* can be used in thrust::transform_exclusive_scan without causing the
* compiler warning/error: "host/device function calling host function".
*
* @param columns The input dictionary columns.
*/
compute_children_offsets_fn(host_span<column_view const> columns) : columns_ptrs{columns.size()}
{
std::transform(
columns.begin(), columns.end(), columns_ptrs.begin(), [](auto& cv) { return &cv; });
}
/**
* @brief Return the first keys().type of the dictionary columns.
*/
data_type get_keys_type()
{
auto const view(*std::find_if(
columns_ptrs.begin(), columns_ptrs.end(), [](auto pcv) { return pcv->size() > 0; }));
return dictionary_column_view(*view).keys().type();
}
/**
* @brief Create the offsets pair for the concatenated columns.
*
* Both vectors have the length of the number of input columns.
* The sizes of each child (keys and indices) of the individual columns
* are used to create the offsets.
*
* @param stream Stream used for allocating the output rmm::device_uvector.
* @return Vector of offsets_pair objects for keys and indices.
*/
rmm::device_uvector<offsets_pair> create_children_offsets(rmm::cuda_stream_view stream)
{
std::vector<offsets_pair> offsets(columns_ptrs.size());
thrust::transform_exclusive_scan(
thrust::host,
columns_ptrs.begin(),
columns_ptrs.end(),
offsets.begin(),
[](auto pcv) {
dictionary_column_view view(*pcv);
return offsets_pair{view.keys_size(), view.size()};
},
offsets_pair{0, 0},
[](auto lhs, auto rhs) {
return offsets_pair{lhs.first + rhs.first, lhs.second + rhs.second};
});
return cudf::detail::make_device_uvector_sync(
offsets, stream, rmm::mr::get_current_device_resource());
}
private:
std::vector<column_view const*> columns_ptrs; ///< pointer version of input column_view vector
};
/**
* @brief Type-dispatch functor for remapping the old indices to new values based
* on the new key-set.
*
* The dispatch is based on the key type.
* The output column is the updated indices child for the new dictionary column.
*/
struct dispatch_compute_indices {
template <typename Element>
std::enable_if_t<cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>>
operator()(column_view const& all_keys,
column_view const& all_indices,
column_view const& new_keys,
offsets_pair const* d_offsets,
size_type const* d_map_to_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto keys_view = column_device_view::create(all_keys, stream);
auto indices_view = column_device_view::create(all_indices, stream);
auto d_all_indices = *indices_view;
auto indices_itr = cudf::detail::indexalator_factory::make_input_iterator(all_indices);
// map the concatenated indices to the concatenated keys
auto all_itr = thrust::make_permutation_iterator(
keys_view->begin<Element>(),
thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0),
[d_offsets, d_map_to_keys, d_all_indices, indices_itr] __device__(size_type idx) {
if (d_all_indices.is_null(idx)) return 0;
return indices_itr[idx] + d_offsets[d_map_to_keys[idx]].first;
}));
auto new_keys_view = column_device_view::create(new_keys, stream);
auto begin = new_keys_view->begin<Element>();
auto end = new_keys_view->end<Element>();
// create the indices output column
auto result = make_numeric_column(
all_indices.type(), all_indices.size(), mask_state::UNALLOCATED, stream, mr);
auto result_itr =
cudf::detail::indexalator_factory::make_output_iterator(result->mutable_view());
// new indices values are computed by matching the concatenated keys to the new key set
#ifdef NDEBUG
thrust::lower_bound(rmm::exec_policy(stream),
begin,
end,
all_itr,
all_itr + all_indices.size(),
result_itr,
thrust::less<Element>());
#else
// There is a problem with thrust::lower_bound and the output_indexalator.
// https://github.com/NVIDIA/thrust/issues/1452; thrust team created nvbug 3322776
// This is a workaround.
thrust::transform(rmm::exec_policy(stream),
all_itr,
all_itr + all_indices.size(),
result_itr,
[begin, end] __device__(auto key) {
auto itr = thrust::lower_bound(thrust::seq, begin, end, key);
return static_cast<size_type>(thrust::distance(begin, itr));
});
#endif
return result;
}
template <typename Element, typename... Args>
std::enable_if_t<!cudf::is_relationally_comparable<Element, Element>(), std::unique_ptr<column>>
operator()(Args&&...)
{
CUDF_FAIL("dictionary concatenate not supported for this column type");
}
};
} // namespace
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// exception here is the same behavior as in cudf::concatenate
CUDF_EXPECTS(not columns.empty(), "Unexpected empty list of columns to concatenate.");
// concatenate the keys (and check the keys match)
compute_children_offsets_fn child_offsets_fn{columns};
auto keys_type = child_offsets_fn.get_keys_type();
std::vector<column_view> keys_views(columns.size());
std::transform(columns.begin(), columns.end(), keys_views.begin(), [keys_type](auto cv) {
auto dict_view = dictionary_column_view(cv);
// empty column may not have keys so we create an empty column_view place-holder
if (dict_view.is_empty()) return column_view{keys_type, 0, nullptr, nullptr, 0};
auto keys = dict_view.keys();
CUDF_EXPECTS(keys.type() == keys_type, "key types of all dictionary columns must match");
return keys;
});
auto all_keys =
cudf::detail::concatenate(keys_views, stream, rmm::mr::get_current_device_resource());
// sort keys and remove duplicates;
// this becomes the keys child for the output dictionary column
auto table_keys = cudf::detail::distinct(table_view{{all_keys->view()}},
std::vector<size_type>{0},
duplicate_keep_option::KEEP_ANY,
null_equality::EQUAL,
nan_equality::ALL_EQUAL,
stream,
mr);
auto sorted_keys = cudf::detail::sort(table_keys->view(),
std::vector<order>{order::ASCENDING},
std::vector<null_order>{null_order::BEFORE},
stream,
mr)
->release();
std::unique_ptr<column> keys_column(std::move(sorted_keys.front()));
// next, concatenate the indices
std::vector<column_view> indices_views(columns.size());
std::transform(columns.begin(), columns.end(), indices_views.begin(), [](auto cv) {
auto dict_view = dictionary_column_view(cv);
if (dict_view.is_empty()) {
return column_view{data_type{type_id::UINT32}, 0, nullptr, nullptr, 0};
}
return dict_view.get_indices_annotated(); // nicely includes validity mask and view offset
});
auto all_indices = cudf::detail::concatenate(indices_views, stream, mr);
auto const indices_size = all_indices->size();
// build a vector of values to map the old indices to the concatenated keys
auto children_offsets = child_offsets_fn.create_children_offsets(stream);
rmm::device_uvector<size_type> map_to_keys(indices_size, stream);
auto indices_itr =
cudf::detail::make_counting_transform_iterator(1, [] __device__(size_type idx) {
return offsets_pair{0, idx};
});
// the indices offsets (pair.second) are for building the map
thrust::lower_bound(
rmm::exec_policy(stream),
children_offsets.begin() + 1,
children_offsets.end(),
indices_itr,
indices_itr + indices_size,
map_to_keys.begin(),
[] __device__(auto const& lhs, auto const& rhs) { return lhs.second < rhs.second; });
// now recompute the indices values for the new keys_column;
// the keys offsets (pair.first) are for mapping to the input keys
auto indices_column = type_dispatcher(keys_type,
dispatch_compute_indices{},
all_keys->view(), // old keys
all_indices->view(), // old indices
keys_column->view(), // new keys
children_offsets.data(),
map_to_keys.data(),
stream,
mr);
// remove the bitmask from the all_indices
auto null_count = all_indices->null_count(); // get before release()
auto contents = all_indices->release(); // all_indices will now be empty
// finally, frankenstein that dictionary column together
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
} // namespace detail
} // namespace dictionary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/json/json_path.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/json/json.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <io/utilities/parsing_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/optional.h>
#include <thrust/pair.h>
#include <thrust/scan.h>
#include <thrust/tuple.h>
namespace cudf {
namespace detail {
namespace {
// change to "\n" and 1 to make output more readable
#define DEBUG_NEWLINE
constexpr int DEBUG_NEWLINE_LEN = 0;
/**
* @brief Result of calling a parse function.
*
* The primary use of this is to distinguish between "success" and
* "success but no data" return cases. For example, if you are reading the
* values of an array you might call a parse function in a while loop. You
* would want to continue doing this until you either encounter an error (parse_result::ERROR)
* or you get nothing back (parse_result::EMPTY)
*/
enum class parse_result {
ERROR, // failure
SUCCESS, // success
MISSING_FIELD, // success, but the field is missing
EMPTY, // success, but no data
};
/**
* @brief Base parser class inherited by the (device-side) json_state class and
* (host-side) path_state class.
*
* Contains a number of useful utility functions common to parsing json and
* JSONPath strings.
*/
class parser {
protected:
CUDF_HOST_DEVICE inline parser() {}
CUDF_HOST_DEVICE inline parser(char const* _input, int64_t _input_len)
: input(_input), input_len(_input_len), pos(_input)
{
parse_whitespace();
}
CUDF_HOST_DEVICE inline parser(parser const& p)
: input(p.input), input_len(p.input_len), pos(p.pos)
{
}
CUDF_HOST_DEVICE inline bool eof(char const* p) { return p - input >= input_len; }
CUDF_HOST_DEVICE inline bool eof() { return eof(pos); }
CUDF_HOST_DEVICE inline bool parse_whitespace()
{
while (!eof()) {
if (is_whitespace(*pos)) {
pos++;
} else {
return true;
}
}
return false;
}
CUDF_HOST_DEVICE inline bool is_hex_digit(char c)
{
return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
}
CUDF_HOST_DEVICE inline int64_t chars_left() { return input_len - ((pos - input) + 1); }
/**
* @brief Parse an escape sequence.
*
* Must be a valid sequence as specified by the JSON format
* https://www.json.org/json-en.html
*
* @returns True on success or false on fail.
*/
CUDF_HOST_DEVICE inline bool parse_escape_seq()
{
if (*pos != '\\') { return false; }
char c = *++pos;
// simple case
if (c == '\"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || c == 'n' || c == 'r' ||
c == 't') {
pos++;
return true;
}
// hex digits: must be of the form uXXXX where each X is a valid hex digit
if (c == 'u' && chars_left() >= 4 && is_hex_digit(pos[1]) && is_hex_digit(pos[2]) &&
is_hex_digit(pos[3]) && is_hex_digit(pos[4])) {
pos += 5;
return true;
}
// an illegal escape sequence.
return false;
}
/**
* @brief Parse a quote-enclosed JSON string.
*
* @param[out] str The resulting string.
* @param can_be_empty Parameter indicating whether it is valid for the string
* to not be present.
* @param quote Character expected as the surrounding quotes. A value of 0
* indicates allowing either single or double quotes (but not a mixture of both).
* @returns A result code indicating success, failure or other result.
*/
CUDF_HOST_DEVICE inline parse_result parse_string(string_view& str, bool can_be_empty, char quote)
{
str = string_view(nullptr, 0);
if (parse_whitespace()) {
// if the user specifies 0 for quote, allow either ' or ". otherwise
// use the char directly
if ((quote == 0 && (*pos == '\'' || *pos == '\"')) || (quote == *pos)) {
quote = *pos;
char const* start = ++pos;
while (!eof()) {
// handle escaped characters
if (*pos == '\\') {
if (!parse_escape_seq()) { return parse_result::ERROR; }
} else if (*pos == quote) {
str = string_view(start, pos - start);
pos++;
return parse_result::SUCCESS;
} else {
pos++;
}
}
}
}
return can_be_empty ? parse_result::EMPTY : parse_result::ERROR;
}
protected:
char const* input{nullptr};
int64_t input_len{0};
char const* pos{nullptr};
CUDF_HOST_DEVICE inline bool is_whitespace(char c) { return c <= ' '; }
};
/**
* @brief Output buffer object. Used during the preprocess/size-computation step
* and the actual output step.
*
* There is an important distinction between two cases:
*
* - producing no output at all. that is, the query matched nothing in the input.
* - producing empty output. the query matched something in the input, but the
* value of the result is an empty string.
*
* The `has_output` field is the flag which indicates whether or not the output
* from the query should be considered empty or null.
*
*/
struct json_output {
size_t output_max_len;
char* output;
thrust::optional<size_t> output_len;
__device__ void add_output(char const* str, size_t len)
{
if (output != nullptr) { memcpy(output + output_len.value_or(0), str, len); }
output_len = output_len.value_or(0) + len;
}
__device__ void add_output(string_view const& str) { add_output(str.data(), str.size_bytes()); }
};
enum json_element_type { NONE, OBJECT, ARRAY, VALUE };
/**
* @brief Parsing class that holds the current state of the json to be parse and provides
* functions for navigating through it.
*/
class json_state : private parser {
public:
__device__ json_state() : parser() {}
__device__ json_state(char const* _input,
int64_t _input_len,
cudf::get_json_object_options _options)
: parser(_input, _input_len),
options(_options)
{
}
__device__ json_state(json_state const& j)
: parser(j),
cur_el_start(j.cur_el_start),
cur_el_type(j.cur_el_type),
parent_el_type(j.parent_el_type),
options(j.options)
{
}
// retrieve the entire current element into the output
__device__ parse_result extract_element(json_output* output, bool list_element)
{
char const* start = cur_el_start;
char const* end = start;
// if we're a value type, do a simple value parse.
if (cur_el_type == VALUE) {
pos = cur_el_start;
if (parse_value() != parse_result::SUCCESS) { return parse_result::ERROR; }
end = pos;
// potentially strip quotes from individually returned string values.
if (options.get_strip_quotes_from_single_strings() && !list_element && is_quote(*start) &&
*(end - 1) == *start) {
start++;
end--;
}
}
// otherwise, march through everything inside
else {
int obj_count = 0;
int arr_count = 0;
while (!eof(end)) {
// parse strings explicitly so we handle all interesting corner cases (such as strings
// containing {, }, [ or ]
if (is_quote(*end)) {
string_view str;
pos = end;
if (parse_string(str, false, *end) == parse_result::ERROR) { return parse_result::ERROR; }
end = pos;
} else {
char const c = *end++;
switch (c) {
case '{': obj_count++; break;
case '}': obj_count--; break;
case '[': arr_count++; break;
case ']': arr_count--; break;
default: break;
}
}
if (obj_count == 0 && arr_count == 0) { break; }
}
if (obj_count > 0 || arr_count > 0) { return parse_result::ERROR; }
pos = end;
}
// parse trailing ,
if (parse_whitespace()) {
if (*pos == ',') { pos++; }
}
if (output != nullptr) { output->add_output({start, static_cast<size_type>(end - start)}); }
return parse_result::SUCCESS;
}
// skip the next element
__device__ parse_result skip_element() { return extract_element(nullptr, false); }
// advance to the next element
__device__ parse_result next_element() { return next_element_internal(false); }
// advance inside the current element
__device__ parse_result child_element(json_element_type expected_type)
{
if (expected_type != NONE && cur_el_type != expected_type) { return parse_result::ERROR; }
// if we succeed, record our parent element type.
auto const prev_el_type = cur_el_type;
auto const result = next_element_internal(true);
if (result == parse_result::SUCCESS) { parent_el_type = prev_el_type; }
return result;
}
// return the next element that matches the specified name.
__device__ parse_result next_matching_element(string_view const& name, bool inclusive)
{
// if we're not including the current element, skip it
if (!inclusive) {
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) { return result; }
}
// loop until we find a match or there's nothing left
do {
if (name.size_bytes() == 1 && name.data()[0] == '*') {
return parse_result::SUCCESS;
} else if (cur_el_name == name) {
return parse_result::SUCCESS;
}
// next
parse_result result = next_element_internal(false);
if (result != parse_result::SUCCESS) {
return options.get_missing_fields_as_nulls() && result == parse_result::EMPTY
? parse_result::MISSING_FIELD
: result;
}
} while (true);
return parse_result::ERROR;
}
/**
* @brief Parse a name field for a JSON element.
*
* When parsing JSON objects, it is not always a requirement that the name
* actually exists. For example, the outer object bounded by {} here has
* no name, while the inner element "a" does.
*
* ```
* {
* "a" : "b"
* }
* ```
*
* The user can specify whether or not the name string must be present via
* the `can_be_empty` flag.
*
* When a name is present, it must be followed by a colon `:`
*
* @param[out] name The resulting name.
* @param can_be_empty Parameter indicating whether it is valid for the name
* to not be present.
* @returns A result code indicating success, failure or other result.
*/
CUDF_HOST_DEVICE inline parse_result parse_name(string_view& name, bool can_be_empty)
{
char const quote = options.get_allow_single_quotes() ? 0 : '\"';
if (parse_string(name, can_be_empty, quote) == parse_result::ERROR) {
return parse_result::ERROR;
}
// if we got a real string, the next char must be a :
if (name.size_bytes() > 0) {
if (!parse_whitespace()) { return parse_result::ERROR; }
if (*pos == ':') {
pos++;
return parse_result::SUCCESS;
}
}
return parse_result::EMPTY;
}
private:
/**
* @brief Parse a non-string JSON value.
*
* Non-string values include numbers, true, false, or null. This function does not
* do any validation of the value.
*
* @param val (Output) The string containing the parsed value
* @returns A result code indicating success, failure or other result.
*/
CUDF_HOST_DEVICE inline parse_result parse_non_string_value(string_view& val)
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// parse to the end of the value
char const* start = pos;
char const* end = start;
while (!eof(end)) {
char const c = *end;
if (c == ',' || c == '}' || c == ']' || is_whitespace(c)) { break; }
// illegal chars
if (c == '[' || c == '{' || c == ':' || is_quote(c)) { return parse_result::ERROR; }
end++;
}
pos = end;
val = string_view(start, end - start);
return parse_result::SUCCESS;
}
// parse a value - either a string or a number/null/bool
__device__ parse_result parse_value()
{
if (!parse_whitespace()) { return parse_result::ERROR; }
// string or number?
string_view unused;
return is_quote(*pos) ? parse_string(unused, false, *pos) : parse_non_string_value(unused);
}
__device__ parse_result next_element_internal(bool child)
{
// if we're not getting a child element, skip the current element.
// this will leave pos as the first character -after- the close of
// the current element
if (!child && cur_el_start != nullptr) {
if (skip_element() == parse_result::ERROR) { return parse_result::ERROR; }
cur_el_start = nullptr;
}
// otherwise pos will be at the first character within the current element
// can only get the child of an object or array.
// this could theoretically be handled as an error, but the evaluators I've found
// seem to treat this as "it's nothing"
if (child && (cur_el_type == VALUE || cur_el_type == NONE)) { return parse_result::EMPTY; }
// what's next
if (!parse_whitespace()) { return parse_result::EMPTY; }
// if we're closing off a parent element, we're done
char const c = *pos;
if (c == ']' || c == '}') { return parse_result::EMPTY; }
// if we're not accessing elements of an array, check for name.
bool const array_access =
(cur_el_type == ARRAY && child) || (parent_el_type == ARRAY && !child);
if (!array_access && parse_name(cur_el_name, true) == parse_result::ERROR) {
return parse_result::ERROR;
}
// element type
if (!parse_whitespace()) { return parse_result::EMPTY; }
switch (*pos++) {
case '[': cur_el_type = ARRAY; break;
case '{': cur_el_type = OBJECT; break;
case ',':
case ':': return parse_result::ERROR;
case '\'':
if (!options.get_allow_single_quotes()) { return parse_result::ERROR; }
cur_el_type = VALUE;
break;
// value type
default: cur_el_type = VALUE; break;
}
// the start of the current element is always at the value, not the name
cur_el_start = pos - 1;
return parse_result::SUCCESS;
}
CUDF_HOST_DEVICE inline bool is_quote(char c)
{
return (c == '\"') || (options.get_allow_single_quotes() && (c == '\''));
}
char const* cur_el_start{nullptr}; // pointer to the first character of the -value- of the
// current element - not the name
string_view cur_el_name; // name of the current element (if applicable)
json_element_type cur_el_type{json_element_type::NONE}; // type of the current element
json_element_type parent_el_type{json_element_type::NONE}; // parent element type
get_json_object_options options; // behavior options
};
enum class path_operator_type { ROOT, CHILD, CHILD_WILDCARD, CHILD_INDEX, ERROR, END };
/**
* @brief A "command" operator used to query a json string. A full query is
* an array of these operators applied to the incoming json string,
*/
struct path_operator {
CUDF_HOST_DEVICE inline path_operator() {}
CUDF_HOST_DEVICE inline path_operator(path_operator_type _type,
json_element_type _expected_type = NONE)
: type(_type), expected_type{_expected_type}
{
}
path_operator_type type{path_operator_type::ERROR}; // operator type
// the expected element type we're applying this operation to.
// for example:
// - you cannot retrieve a subscripted field (eg [5]) from an object.
// - you cannot retrieve a field by name (eg .book) from an array.
// - you -can- use .* for both arrays and objects
// a value of NONE implies any type accepted
json_element_type expected_type{NONE}; // the expected type of the element we're working with
string_view name; // name to match against (if applicable)
int index{-1}; // index for subscript operator
};
/**
* @brief Parsing class that holds the current state of the JSONPath string to be parsed
* and provides functions for navigating through it. This is only called on the host
* during the preprocess step which builds a command buffer that the gpu uses.
*/
class path_state : private parser {
public:
path_state(char const* _path, size_t _path_len) : parser(_path, _path_len) {}
// get the next operator in the JSONPath string
path_operator get_next_operator()
{
if (eof()) { return {path_operator_type::END}; }
switch (*pos++) {
case '$': return {path_operator_type::ROOT};
case '.': {
path_operator op;
string_view term{".[", 2};
if (parse_path_name(op.name, term)) {
// this is another potential use case for __SPARK_BEHAVIORS / configurability
// Spark currently only handles the wildcard operator inside [*], it does
// not handle .*
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
}
return op;
}
} break;
// 3 ways this can be used
// indices: [0]
// name: ['book']
// wildcard: [*]
case '[': {
path_operator op;
string_view term{"]", 1};
bool const is_string = *pos == '\'';
if (parse_path_name(op.name, term)) {
pos++;
if (op.name.size_bytes() == 1 && op.name.data()[0] == '*') {
op.type = path_operator_type::CHILD_WILDCARD;
op.expected_type = NONE;
} else {
if (is_string) {
op.type = path_operator_type::CHILD;
op.expected_type = OBJECT;
} else {
op.type = path_operator_type::CHILD_INDEX;
auto const value = cudf::io::parse_numeric<int>(
op.name.data(), op.name.data() + op.name.size_bytes(), json_opts);
op.index = value.value_or(-1);
CUDF_EXPECTS(op.index >= 0, "Invalid numeric index specified in JSONPath");
op.expected_type = ARRAY;
}
}
return op;
}
} break;
// wildcard operator
case '*': {
pos++;
return path_operator{path_operator_type::CHILD_WILDCARD};
} break;
default: CUDF_FAIL("Unrecognized JSONPath operator", std::invalid_argument); break;
}
return {path_operator_type::ERROR};
}
private:
cudf::io::parse_options_view json_opts{',', '\n', '\"', '.'};
bool parse_path_name(string_view& name, string_view const& terminators)
{
switch (*pos) {
case '*':
name = string_view(pos, 1);
pos++;
break;
case '\'':
if (parse_string(name, false, '\'') != parse_result::SUCCESS) { return false; }
break;
default: {
size_t const chars_left = input_len - (pos - input);
char const* end = std::find_first_of(
pos, pos + chars_left, terminators.data(), terminators.data() + terminators.size_bytes());
if (end) {
name = string_view(pos, end - pos);
pos = end;
} else {
name = string_view(pos, chars_left);
pos = input + input_len;
}
break;
}
}
// an empty name is not valid
CUDF_EXPECTS(
name.size_bytes() > 0, "Invalid empty name in JSONPath query string", std::invalid_argument);
return true;
}
};
/**
* @brief Preprocess the incoming JSONPath string on the host to generate a
* command buffer for use by the GPU.
*
* @param json_path The incoming json path
* @param stream Cuda stream to perform any gpu actions on
* @returns A pair containing the command buffer, and maximum stack depth required.
*/
std::pair<thrust::optional<rmm::device_uvector<path_operator>>, int> build_command_buffer(
cudf::string_scalar const& json_path, rmm::cuda_stream_view stream)
{
std::string h_json_path = json_path.to_string(stream);
path_state p_state(h_json_path.data(), static_cast<size_type>(h_json_path.size()));
std::vector<path_operator> h_operators;
path_operator op;
int max_stack_depth = 1;
do {
op = p_state.get_next_operator();
if (op.type == path_operator_type::ERROR) {
CUDF_FAIL("Encountered invalid JSONPath input string");
}
if (op.type == path_operator_type::CHILD_WILDCARD) { max_stack_depth++; }
// convert pointer to device pointer
if (op.name.size_bytes() > 0) {
op.name =
string_view(json_path.data() + (op.name.data() - h_json_path.data()), op.name.size_bytes());
}
if (op.type == path_operator_type::ROOT) {
CUDF_EXPECTS(h_operators.size() == 0, "Root operator ($) can only exist at the root");
}
// if we have not gotten a root operator to start, and we're not empty, quietly push a
// root operator now.
if (h_operators.size() == 0 && op.type != path_operator_type::ROOT &&
op.type != path_operator_type::END) {
h_operators.push_back(path_operator{path_operator_type::ROOT});
}
h_operators.push_back(op);
} while (op.type != path_operator_type::END);
auto const is_empty = h_operators.size() == 1 && h_operators[0].type == path_operator_type::END;
return is_empty ? std::pair(thrust::nullopt, 0)
: std::pair(thrust::make_optional(cudf::detail::make_device_uvector_sync(
h_operators, stream, rmm::mr::get_current_device_resource())),
max_stack_depth);
}
#define PARSE_TRY(_x) \
do { \
last_result = _x; \
if (last_result == parse_result::ERROR) { return parse_result::ERROR; } \
} while (0)
/**
* @brief Parse a single json string using the provided command buffer
*
* @param j_state The incoming json string and associated parser
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param output Buffer user to store the results of the query
* @returns A result code indicating success/fail/empty.
*/
template <int max_command_stack_depth>
__device__ parse_result parse_json_path(json_state& j_state,
path_operator const* commands,
json_output& output)
{
// manually maintained context stack in lieu of calling parse_json_path recursively.
struct context {
json_state j_state;
path_operator const* commands;
bool list_element;
bool state_flag;
};
context stack[max_command_stack_depth];
int stack_pos = 0;
auto push_context = [&stack, &stack_pos](json_state const& _j_state,
path_operator const* _commands,
bool _list_element = false,
bool _state_flag = false) {
if (stack_pos == max_command_stack_depth - 1) { return false; }
stack[stack_pos++] = context{_j_state, _commands, _list_element, _state_flag};
return true;
};
auto pop_context = [&stack, &stack_pos](context& c) {
if (stack_pos > 0) {
c = stack[--stack_pos];
return true;
}
return false;
};
push_context(j_state, commands, false);
parse_result last_result = parse_result::SUCCESS;
context ctx;
int element_count = 0;
while (pop_context(ctx)) {
path_operator op = *ctx.commands;
switch (op.type) {
// whatever the first object is
case path_operator_type::ROOT:
PARSE_TRY(ctx.j_state.next_element());
push_context(ctx.j_state, ctx.commands + 1);
break;
// .name
// ['name']
// [1]
// will return a single thing
case path_operator_type::CHILD: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
PARSE_TRY(ctx.j_state.next_matching_element(op.name, true));
if (last_result == parse_result::SUCCESS) {
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
} else if (last_result == parse_result::MISSING_FIELD) {
if (ctx.list_element && element_count > 0) {
output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
output.add_output({"null", 4});
element_count++;
}
}
} break;
// .*
// [*]
// will return an array of things
case path_operator_type::CHILD_WILDCARD: {
// if we're on the first element of this wildcard
if (!ctx.state_flag) {
// we will only ever be returning 1 array
if (!ctx.list_element) { output.add_output({"[" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN}); }
// step into the child element
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// first element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, true));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
} else {
// next element
PARSE_TRY(ctx.j_state.next_matching_element({"*", 1}, false));
if (last_result == parse_result::EMPTY) {
if (!ctx.list_element) {
output.add_output({"]" DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
last_result = parse_result::SUCCESS;
break;
}
// re-push ourselves
push_context(ctx.j_state, ctx.commands, ctx.list_element, true);
// push the next command
push_context(ctx.j_state, ctx.commands + 1, true);
}
} break;
// [0]
// [1]
// etc
// returns a single thing
case path_operator_type::CHILD_INDEX: {
PARSE_TRY(ctx.j_state.child_element(op.expected_type));
if (last_result == parse_result::SUCCESS) {
string_view const any{"*", 1};
PARSE_TRY(ctx.j_state.next_matching_element(any, true));
if (last_result == parse_result::SUCCESS) {
int idx;
for (idx = 1; idx <= op.index; idx++) {
PARSE_TRY(ctx.j_state.next_matching_element(any, false));
if (last_result == parse_result::EMPTY) { break; }
}
// if we didn't end up at the index we requested, this is an invalid index
if (idx - 1 != op.index) { return parse_result::ERROR; }
push_context(ctx.j_state, ctx.commands + 1, ctx.list_element);
}
}
} break;
// some sort of error.
case path_operator_type::ERROR: return parse_result::ERROR; break;
// END case
default: {
if (ctx.list_element && element_count > 0) {
output.add_output({"," DEBUG_NEWLINE, 1 + DEBUG_NEWLINE_LEN});
}
PARSE_TRY(ctx.j_state.extract_element(&output, ctx.list_element));
if (ctx.list_element && last_result != parse_result::EMPTY) { element_count++; }
} break;
}
}
return parse_result::SUCCESS;
}
// hardcoding this for now. to reach a stack depth of 8 would require
// a JSONPath containing 7 nested wildcards so this is probably reasonable.
constexpr int max_command_stack_depth = 8;
/**
* @brief Parse a single json string using the provided command buffer
*
* This function exists primarily as a shim for debugging purposes.
*
* @param input The incoming json string
* @param input_len Size of the incoming json string
* @param commands The command buffer to be applied to the string. Always ends with a
* path_operator_type::END
* @param out_buf Buffer user to store the results of the query (nullptr in the size computation
* step)
* @param out_buf_size Size of the output buffer
* @param options Options controlling behavior
* @returns A pair containing the result code the output buffer.
*/
__device__ thrust::pair<parse_result, json_output> get_json_object_single(
char const* input,
size_t input_len,
path_operator const* const commands,
char* out_buf,
size_t out_buf_size,
get_json_object_options options)
{
json_state j_state(input, input_len, options);
json_output output{out_buf_size, out_buf};
auto const result = parse_json_path<max_command_stack_depth>(j_state, commands, output);
return {result, output};
}
/**
* @brief Kernel for running the JSONPath query.
*
* This kernel operates in a 2-pass way. On the first pass, it computes
* output sizes. On the second pass it fills in the provided output buffers
* (chars and validity)
*
* @param col Device view of the incoming string
* @param commands JSONPath command buffer
* @param output_offsets Buffer used to store the string offsets for the results of the query
* @param out_buf Buffer used to store the results of the query
* @param out_validity Output validity buffer
* @param out_valid_count Output count of # of valid bits
* @param options Options controlling behavior
*/
template <int block_size>
__launch_bounds__(block_size) __global__
void get_json_object_kernel(column_device_view col,
path_operator const* const commands,
size_type* output_offsets,
thrust::optional<char*> out_buf,
thrust::optional<bitmask_type*> out_validity,
thrust::optional<size_type*> out_valid_count,
get_json_object_options options)
{
auto tid = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::thread_index_type{blockDim.x} * cudf::thread_index_type{gridDim.x};
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffff'ffffu, tid < col.size());
while (tid < col.size()) {
bool is_valid = false;
string_view const str = col.element<string_view>(tid);
size_type output_size = 0;
if (str.size_bytes() > 0) {
char* dst = out_buf.has_value() ? out_buf.value() + output_offsets[tid] : nullptr;
size_t const dst_size =
out_buf.has_value() ? output_offsets[tid + 1] - output_offsets[tid] : 0;
parse_result result;
json_output out;
thrust::tie(result, out) =
get_json_object_single(str.data(), str.size_bytes(), commands, dst, dst_size, options);
output_size = out.output_len.value_or(0);
if (out.output_len.has_value() && result == parse_result::SUCCESS) { is_valid = true; }
}
// filled in only during the precompute step. during the compute step, the offsets
// are fed back in so we do -not- want to write them out
if (!out_buf.has_value()) { output_offsets[tid] = static_cast<size_type>(output_size); }
// validity filled in only during the output step
if (out_validity.has_value()) {
uint32_t mask = __ballot_sync(active_threads, is_valid);
// 0th lane of the warp writes the validity
if (!(tid % cudf::detail::warp_size)) {
out_validity.value()[cudf::word_index(tid)] = mask;
warp_valid_count += __popc(mask);
}
}
tid += stride;
active_threads = __ballot_sync(active_threads, tid < col.size());
}
// sum the valid counts across the whole block
if (out_valid_count) {
size_type block_valid_count =
cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count.value(), block_valid_count); }
}
}
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
get_json_object_options options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// preprocess the json_path into a command buffer
auto preprocess = build_command_buffer(json_path, stream);
CUDF_EXPECTS(std::get<1>(preprocess) <= max_command_stack_depth,
"Encountered JSONPath string that is too complex");
if (col.is_empty()) return make_empty_column(type_id::STRING);
// allocate output offsets buffer.
auto offsets = cudf::make_fixed_width_column(
data_type{type_id::INT32}, col.size() + 1, mask_state::UNALLOCATED, stream, mr);
cudf::mutable_column_view offsets_view(*offsets);
// if the query is empty, return a string column containing all nulls
if (!std::get<0>(preprocess).has_value()) {
return std::make_unique<column>(
data_type{type_id::STRING},
col.size(),
rmm::device_buffer{0, stream, mr}, // no data
cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr),
col.size()); // null count
}
constexpr int block_size = 512;
cudf::detail::grid_1d const grid{col.size(), block_size};
auto cdv = column_device_view::create(col.parent(), stream);
// preprocess sizes (returned in the offsets buffer)
get_json_object_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<size_type>(),
thrust::nullopt,
thrust::nullopt,
thrust::nullopt,
options);
// convert sizes to offsets
thrust::exclusive_scan(rmm::exec_policy(stream),
offsets_view.head<size_type>(),
offsets_view.head<size_type>() + col.size() + 1,
offsets_view.head<size_type>(),
0);
size_type const output_size =
cudf::detail::get_value<size_type>(offsets_view, col.size(), stream);
// allocate output string column
auto chars = cudf::strings::detail::create_chars_child_column(output_size, stream, mr);
// potential optimization : if we know that all outputs are valid, we could skip creating
// the validity mask altogether
rmm::device_buffer validity =
cudf::detail::create_null_mask(col.size(), mask_state::UNINITIALIZED, stream, mr);
// compute results
cudf::mutable_column_view chars_view(*chars);
rmm::device_scalar<size_type> d_valid_count{0, stream};
get_json_object_kernel<block_size>
<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>(
*cdv,
std::get<0>(preprocess).value().data(),
offsets_view.head<size_type>(),
chars_view.head<char>(),
static_cast<bitmask_type*>(validity.data()),
d_valid_count.data(),
options);
return make_strings_column(col.size(),
std::move(offsets),
std::move(chars),
col.size() - d_valid_count.value(stream),
std::move(validity));
}
} // namespace
} // namespace detail
std::unique_ptr<cudf::column> get_json_object(cudf::strings_column_view const& col,
cudf::string_scalar const& json_path,
get_json_object_options options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::get_json_object(col, json_path, options, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/segmented_sort.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/lists/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Create output offsets for segmented sort
*
* This creates a normalized set of offsets from the offsets child column of the input.
*/
std::unique_ptr<column> build_output_offsets(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output_offset = make_numeric_column(
input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
output_offset->mutable_view().begin<size_type>(),
[first = input.offsets_begin()] __device__(auto offset_index) {
return offset_index - *first;
});
return output_offset;
}
} // namespace
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input.parent());
auto output_offset = build_output_offsets(input, stream, mr);
auto const child = input.get_sliced_child(stream);
auto const sorted_child_table = cudf::detail::segmented_sort_by_key(table_view{{child}},
table_view{{child}},
output_offset->view(),
{column_order},
{null_precedence},
stream,
mr);
return make_lists_column(input.size(),
std::move(output_offset),
std::move(sorted_child_table->release().front()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return empty_like(input.parent()); }
auto output_offset = build_output_offsets(input, stream, mr);
auto const child = input.get_sliced_child(stream);
auto const sorted_child_table = cudf::detail::stable_segmented_sort_by_key(table_view{{child}},
table_view{{child}},
output_offset->view(),
{column_order},
{null_precedence},
stream,
mr);
return make_lists_column(input.size(),
std::move(output_offset),
std::move(sorted_child_table->release().front()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sort_lists(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<column> stable_sort_lists(lists_column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_sort_lists(input, column_order, null_precedence, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/contains.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/detail/contains.hpp>
#include <cudf/lists/detail/lists_column_factories.hpp>
#include <cudf/lists/list_device_view.cuh>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/row_operators.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/logical.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <type_traits>
namespace cudf::lists {
namespace {
/**
* @brief A sentinel value used for marking that a given key has not been found in the search list.
*
* The value should be `-1` as indicated in the public API documentation.
*/
auto constexpr __device__ NOT_FOUND_SENTINEL = size_type{-1};
/**
* @brief A sentinel value used for marking that a given output row should be null.
*
* This value should be different from `NOT_FOUND_SENTINEL`.
*/
auto constexpr __device__ NULL_SENTINEL = std::numeric_limits<size_type>::min();
/**
* @brief Check if the given type is a supported non-nested type in `cudf::lists::contains`.
*/
template <typename Element>
static auto constexpr is_supported_non_nested_type()
{
return cudf::is_fixed_width<Element>() || std::is_same_v<Element, cudf::string_view>;
}
/**
* @brief Check if the given type is supported in `cudf::lists::contains`.
*/
struct is_supported_type_fn {
template <typename Element>
auto constexpr operator()()
{
return is_supported_non_nested_type<Element>() || cudf::is_nested<Element>();
}
};
/**
* @brief Return a pair of index iterators {begin, end} to loop through elements within a
* list.
*
* Depending on the value of `forward`, a pair of forward or reverse iterators will be
* returned, allowing to loop through elements in the list in first-to-last or last-to-first
* order.
*
* Note that the element indices always restart to `0` at the first position in each list.
*
* @tparam forward A boolean value indicating whether we want to iterate elements in the list
* by forward or reverse order.
* @param size The number of elements in the list.
* @return A pair of {begin, end} iterators to iterate through the range `[0, size)`.
*/
template <bool forward>
__device__ auto element_index_pair_iter(size_type const size)
{
auto const begin = thrust::make_counting_iterator(0);
auto const end = thrust::make_counting_iterator(size);
if constexpr (forward) {
return thrust::pair{begin, end};
} else {
return thrust::pair{thrust::make_reverse_iterator(end), thrust::make_reverse_iterator(begin)};
}
}
/**
* @brief Functor to perform searching for index of a key element in a given list, specialized
* for nested types.
*/
template <typename KeyValidityIter, typename EqComparator>
struct search_list_fn {
duplicate_find_option const find_option;
KeyValidityIter const key_validity_iter;
EqComparator const d_comp;
search_list_fn(duplicate_find_option const find_option,
KeyValidityIter const key_validity_iter,
EqComparator const& d_comp)
: find_option(find_option), key_validity_iter(key_validity_iter), d_comp(d_comp)
{
}
__device__ size_type operator()(list_device_view const list) const
{
// A null list or null key will result in a null output row.
if (list.is_null() || !key_validity_iter[list.row_index()]) { return NULL_SENTINEL; }
return find_option == duplicate_find_option::FIND_FIRST ? search_list_op<true>(list)
: search_list_op<false>(list);
}
private:
template <bool forward>
__device__ inline size_type search_list_op(list_device_view const list) const
{
using cudf::experimental::row::lhs_index_type;
using cudf::experimental::row::rhs_index_type;
auto const [begin, end] = element_index_pair_iter<forward>(list.size());
auto const found_iter =
thrust::find_if(thrust::seq, begin, end, [=] __device__(auto const idx) {
return !list.is_null(idx) && d_comp(static_cast<lhs_index_type>(list.element_offset(idx)),
static_cast<rhs_index_type>(list.row_index()));
});
// If the key is found, return its found position in the list from `found_iter`.
return found_iter == end ? NOT_FOUND_SENTINEL : *found_iter;
}
};
/**
* @brief Function to search for index of key element(s) in the corresponding rows of a lists
* column, specialized for nested types.
*/
template <typename InputIterator, typename OutputIterator, typename DeviceComp>
void index_of(InputIterator input_it,
size_type num_rows,
OutputIterator output_it,
column_view const& child,
column_view const& search_keys,
duplicate_find_option find_option,
DeviceComp d_comp,
rmm::cuda_stream_view stream)
{
auto const keys_dv_ptr = column_device_view::create(search_keys, stream);
auto const key_validity_iter = cudf::detail::make_validity_iterator<true>(*keys_dv_ptr);
thrust::transform(rmm::exec_policy(stream),
input_it,
input_it + num_rows,
output_it,
search_list_fn{find_option, key_validity_iter, d_comp});
}
/**
* @brief Dispatch function to search for index of key element(s) in the corresponding rows of a
* lists column.
*/
std::unique_ptr<column> dispatch_index_of(lists_column_view const& lists,
column_view const& search_keys,
duplicate_find_option find_option,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(cudf::type_dispatcher(search_keys.type(), is_supported_type_fn{}),
"Unsupported type in `dispatch_index_of` function.");
// Access the child column through `child()` method, not `get_sliced_child()`.
// This is because slicing offset has already been taken into account during row
// comparisons.
auto const child = lists.child();
CUDF_EXPECTS(child.type() == search_keys.type(),
"Type/Scale of search key does not match list column element type.",
cudf::data_type_error);
CUDF_EXPECTS(search_keys.type().id() != type_id::EMPTY, "Type cannot be empty.");
auto const search_keys_have_nulls = search_keys.has_nulls();
auto const num_rows = lists.size();
auto const lists_cdv_ptr = column_device_view::create(lists.parent(), stream);
auto const input_it = cudf::detail::make_counting_transform_iterator(
size_type{0},
[lists = cudf::detail::lists_column_device_view{*lists_cdv_ptr}] __device__(auto const idx) {
return list_device_view{lists, idx};
});
auto out_positions = make_numeric_column(
data_type{type_to_id<size_type>()}, num_rows, cudf::mask_state::UNALLOCATED, stream, mr);
auto const output_it = out_positions->mutable_view().template begin<size_type>();
auto const keys_tview = cudf::table_view{{search_keys}};
auto const child_tview = cudf::table_view{{child}};
auto const has_nulls = has_nested_nulls(child_tview) || has_nested_nulls(keys_tview);
auto const comparator =
cudf::experimental::row::equality::two_table_comparator(child_tview, keys_tview, stream);
if (cudf::is_nested(search_keys.type())) {
auto const d_comp = comparator.equal_to<true>(nullate::DYNAMIC{has_nulls});
index_of(input_it, num_rows, output_it, child, search_keys, find_option, d_comp, stream);
} else {
auto const d_comp = comparator.equal_to<false>(nullate::DYNAMIC{has_nulls});
index_of(input_it, num_rows, output_it, child, search_keys, find_option, d_comp, stream);
}
if (search_keys_have_nulls || lists.has_nulls()) {
auto [null_mask, null_count] = cudf::detail::valid_if(
output_it,
output_it + num_rows,
[] __device__(auto const idx) { return idx != NULL_SENTINEL; },
stream,
mr);
out_positions->set_null_mask(std::move(null_mask), null_count);
}
return out_positions;
}
/**
* @brief Converts key-positions vector (from `index_of()`) to a BOOL8 vector, indicating if
* the search key(s) were found.
*/
std::unique_ptr<column> to_contains(std::unique_ptr<column>&& key_positions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(key_positions->type().id() == type_to_id<size_type>(),
"Expected input column of type cudf::size_type.");
auto const positions_begin = key_positions->view().template begin<size_type>();
auto result = make_numeric_column(
data_type{type_id::BOOL8}, key_positions->size(), mask_state::UNALLOCATED, stream, mr);
thrust::transform(rmm::exec_policy(stream),
positions_begin,
positions_begin + key_positions->size(),
result->mutable_view().template begin<bool>(),
[] __device__(auto const i) {
// position == NOT_FOUND_SENTINEL: the list does not contain the search key.
return i != NOT_FOUND_SENTINEL;
});
auto const null_count = key_positions->null_count();
[[maybe_unused]] auto [data, null_mask, children] = key_positions->release();
result->set_null_mask(std::move(*null_mask.release()), null_count);
return result;
}
} // namespace
namespace detail {
std::unique_ptr<column> index_of(lists_column_view const& lists,
cudf::scalar const& search_key,
duplicate_find_option find_option,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (!search_key.is_valid(stream)) {
return make_numeric_column(
data_type{cudf::type_to_id<size_type>()},
lists.size(),
cudf::detail::create_null_mask(lists.size(), mask_state::ALL_NULL, stream, mr),
lists.size(),
stream,
mr);
}
if (lists.size() == 0) {
return make_numeric_column(
data_type{type_to_id<size_type>()}, 0, cudf::mask_state::UNALLOCATED, stream, mr);
}
auto search_key_col = cudf::make_column_from_scalar(search_key, lists.size(), stream, mr);
return detail::index_of(lists, search_key_col->view(), find_option, stream, mr);
}
std::unique_ptr<column> index_of(lists_column_view const& lists,
column_view const& search_keys,
duplicate_find_option find_option,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(search_keys.size() == lists.size(),
"Number of search keys must match list column size.");
return dispatch_index_of(lists, search_keys, find_option, stream, mr);
}
std::unique_ptr<column> contains(lists_column_view const& lists,
cudf::scalar const& search_key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto key_indices = detail::index_of(lists,
search_key,
duplicate_find_option::FIND_FIRST,
stream,
rmm::mr::get_current_device_resource());
return to_contains(std::move(key_indices), stream, mr);
}
std::unique_ptr<column> contains(lists_column_view const& lists,
column_view const& search_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(search_keys.size() == lists.size(),
"Number of search keys must match list column size.");
auto key_indices = detail::index_of(lists,
search_keys,
duplicate_find_option::FIND_FIRST,
stream,
rmm::mr::get_current_device_resource());
return to_contains(std::move(key_indices), stream, mr);
}
std::unique_ptr<column> contains_nulls(lists_column_view const& lists,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const lists_cv = lists.parent();
auto output = make_numeric_column(data_type{type_to_id<bool>()},
lists.size(),
cudf::detail::copy_bitmask(lists_cv, stream, mr),
lists_cv.null_count(),
stream,
mr);
auto const out_begin = output->mutable_view().template begin<bool>();
auto const lists_cdv_ptr = column_device_view::create(lists_cv, stream);
thrust::tabulate(rmm::exec_policy(stream),
out_begin,
out_begin + lists.size(),
[lists = cudf::detail::lists_column_device_view{*lists_cdv_ptr}] __device__(
auto const list_idx) {
auto const list = list_device_view{lists, list_idx};
return list.is_null() ||
thrust::any_of(thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(list.size()),
[&list](auto const idx) { return list.is_null(idx); });
});
return output;
}
} // namespace detail
std::unique_ptr<column> contains(lists_column_view const& lists,
cudf::scalar const& search_key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(lists, search_key, stream, mr);
}
std::unique_ptr<column> contains(lists_column_view const& lists,
column_view const& search_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(lists, search_keys, stream, mr);
}
std::unique_ptr<column> contains_nulls(lists_column_view const& lists,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains_nulls(lists, stream, mr);
}
std::unique_ptr<column> index_of(lists_column_view const& lists,
cudf::scalar const& search_key,
duplicate_find_option find_option,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::index_of(lists, search_key, find_option, stream, mr);
}
std::unique_ptr<column> index_of(lists_column_view const& lists,
column_view const& search_keys,
duplicate_find_option find_option,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::index_of(lists, search_keys, find_option, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/dremel.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/detail/dremel.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/gather.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
namespace cudf::detail {
namespace {
/**
* @brief Functor to get definition level value for a nested struct column until the leaf level or
* the first list level.
*
*/
struct def_level_fn {
column_device_view const* parent_col;
uint8_t const* d_nullability;
uint8_t sub_level_start;
uint8_t curr_def_level;
bool always_nullable;
__device__ uint32_t operator()(size_type i)
{
uint32_t def = curr_def_level;
uint8_t l = sub_level_start;
bool is_col_struct = false;
auto col = *parent_col;
do {
// If col not nullable then it does not contribute to def levels
if (always_nullable or d_nullability[l]) {
if (not col.nullable() or bit_is_set(col.null_mask(), i)) {
++def;
} else { // We have found the shallowest level at which this row is null
break;
}
}
is_col_struct = (col.type().id() == type_id::STRUCT);
if (is_col_struct) {
col = col.child(0);
++l;
}
} while (is_col_struct);
return def;
}
};
dremel_data get_encoding(column_view h_col,
std::vector<uint8_t> nullability,
bool output_as_byte_array,
bool always_nullable,
rmm::cuda_stream_view stream)
{
auto get_list_level = [](column_view col) {
while (col.type().id() == type_id::STRUCT) {
col = col.child(0);
}
return col;
};
auto get_empties = [&](column_view col, size_type start, size_type end) {
auto lcv = lists_column_view(get_list_level(col));
rmm::device_uvector<size_type> empties_idx(lcv.size(), stream);
rmm::device_uvector<size_type> empties(lcv.size(), stream);
auto d_off = lcv.offsets().data<size_type>();
auto empties_idx_end =
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(start),
thrust::make_counting_iterator(end),
empties_idx.begin(),
[d_off] __device__(auto i) { return d_off[i] == d_off[i + 1]; });
auto empties_end = thrust::gather(rmm::exec_policy(stream),
empties_idx.begin(),
empties_idx_end,
lcv.offsets().begin<size_type>(),
empties.begin());
auto empties_size = empties_end - empties.begin();
return std::make_tuple(std::move(empties), std::move(empties_idx), empties_size);
};
// Check if there are empty lists with empty offsets in this column
bool has_empty_list_offsets = false;
{
auto curr_col = h_col;
while (is_nested(curr_col.type())) {
if (curr_col.type().id() == type_id::LIST) {
auto lcv = lists_column_view(curr_col);
if (lcv.offsets().size() == 0) {
has_empty_list_offsets = true;
break;
}
curr_col = lcv.child();
} else if (curr_col.type().id() == type_id::STRUCT) {
curr_col = curr_col.child(0);
}
}
}
std::unique_ptr<column> empty_list_offset_col;
if (has_empty_list_offsets) {
empty_list_offset_col = make_fixed_width_column(data_type(type_id::INT32), 1);
CUDF_CUDA_TRY(cudaMemsetAsync(
empty_list_offset_col->mutable_view().head(), 0, sizeof(size_type), stream.value()));
std::function<column_view(column_view const&)> normalize_col = [&](column_view const& col) {
auto children = [&]() -> std::vector<column_view> {
if (col.type().id() == type_id::LIST) {
auto lcol = lists_column_view(col);
auto offset_col =
lcol.offsets().head() == nullptr ? empty_list_offset_col->view() : lcol.offsets();
return {offset_col, normalize_col(lcol.child())};
} else if (col.type().id() == type_id::STRUCT) {
return {normalize_col(col.child(0))};
} else {
return {col.child_begin(), col.child_end()};
}
}();
return column_view(col.type(),
col.size(),
col.head(),
col.null_mask(),
col.null_count(),
col.offset(),
std::move(children));
};
h_col = normalize_col(h_col);
}
auto curr_col = h_col;
std::vector<column_view> nesting_levels;
std::vector<uint8_t> def_at_level;
std::vector<uint8_t> start_at_sub_level;
uint8_t curr_nesting_level_idx = 0;
if (nullability.empty()) {
while (is_nested(curr_col.type())) {
nullability.push_back(curr_col.nullable());
curr_col = curr_col.type().id() == type_id::LIST ? curr_col.child(1) : curr_col.child(0);
}
nullability.push_back(curr_col.nullable());
}
curr_col = h_col;
auto add_def_at_level = [&](column_view col) {
// Add up all def level contributions in this column all the way till the first list column
// appears in the hierarchy or until we get to leaf
uint32_t def = 0;
start_at_sub_level.push_back(curr_nesting_level_idx);
while (col.type().id() == type_id::STRUCT) {
def += (always_nullable or nullability[curr_nesting_level_idx]) ? 1 : 0;
col = col.child(0);
++curr_nesting_level_idx;
}
// At the end of all those structs is either a list column or the leaf. List column contributes
// at least one def level. Leaf contributes 1 level only if it is nullable.
def += (col.type().id() == type_id::LIST ? 1 : 0) +
(always_nullable or nullability[curr_nesting_level_idx] ? 1 : 0);
def_at_level.push_back(def);
++curr_nesting_level_idx;
};
while (cudf::is_nested(curr_col.type())) {
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
while (curr_col.type().id() == type_id::STRUCT) {
// Go down the hierarchy until we get to the LIST or the leaf level
curr_col = curr_col.child(0);
}
if (curr_col.type().id() == type_id::LIST) {
auto child = curr_col.child(lists_column_view::child_column_index);
if (output_as_byte_array && child.type().id() == type_id::UINT8) {
// consider this the bottom
break;
}
curr_col = child;
if (not is_nested(curr_col.type())) {
// Special case: when the leaf data column is the immediate child of the list col then we
// want it to be included right away. Otherwise the struct containing it will be included in
// the next iteration of this loop.
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
break;
}
}
}
[[maybe_unused]] auto [device_view_owners, d_nesting_levels] =
contiguous_copy_column_device_views<column_device_view>(nesting_levels, stream);
auto max_def_level = def_at_level.back();
thrust::exclusive_scan(
thrust::host, def_at_level.begin(), def_at_level.end(), def_at_level.begin());
max_def_level += def_at_level.back();
// Sliced list column views only have offsets applied to top level. Get offsets for each level.
rmm::device_uvector<size_type> d_column_offsets(nesting_levels.size(), stream);
rmm::device_uvector<size_type> d_column_ends(nesting_levels.size(), stream);
auto d_col = column_device_view::create(h_col, stream);
cudf::detail::device_single_thread(
[offset_at_level = d_column_offsets.data(),
end_idx_at_level = d_column_ends.data(),
level_max = d_column_offsets.size(),
col = *d_col] __device__() {
auto curr_col = col;
size_type off = curr_col.offset();
size_type end = off + curr_col.size();
size_type level = 0;
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
// Apply offset recursively until we get to leaf data
// Skip doing the following for any structs we encounter in between.
while (curr_col.type().id() == type_id::LIST or curr_col.type().id() == type_id::STRUCT) {
if (curr_col.type().id() == type_id::LIST) {
off = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(off);
end = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(end);
if (level < level_max) {
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
}
curr_col = curr_col.child(lists_column_view::child_column_index);
} else {
curr_col = curr_col.child(0);
}
}
},
stream);
thrust::host_vector<size_type> column_offsets =
cudf::detail::make_host_vector_async(d_column_offsets, stream);
thrust::host_vector<size_type> column_ends =
cudf::detail::make_host_vector_async(d_column_ends, stream);
stream.synchronize();
size_t max_vals_size = 0;
for (size_t l = 0; l < column_offsets.size(); ++l) {
max_vals_size += column_ends[l] - column_offsets[l];
}
auto d_nullability = cudf::detail::make_device_uvector_async(
nullability, stream, rmm::mr::get_current_device_resource());
rmm::device_uvector<uint8_t> rep_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> def_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_rep_vals(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_def_vals(max_vals_size, stream);
rmm::device_uvector<size_type> new_offsets(0, stream);
size_type curr_rep_values_size = 0;
{
// At this point, curr_col contains the leaf column. Max nesting level is
// nesting_levels.size().
// We are going to start by merging the last column in nesting_levels (the leaf, which is at the
// index `nesting_levels.size() - 1`) with the second-to-last (which is at
// `nesting_levels.size() - 2`).
size_t level = nesting_levels.size() - 2;
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
auto [empties, empties_idx, empties_size] =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
// Merge empty at deepest parent level with the rep, def level vals at leaf level
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level],
always_nullable});
// `nesting_levels.size()` == no of list levels + leaf. Max repetition level = no of list levels
auto input_child_rep_it = thrust::make_constant_iterator(nesting_levels.size() - 1);
auto input_child_def_it =
thrust::make_transform_iterator(thrust::make_counting_iterator(column_offsets[level + 1]),
def_level_fn{d_nesting_levels + level + 1,
d_nullability.data(),
start_at_sub_level[level + 1],
def_at_level[level + 1],
always_nullable});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_child_rep_it, input_child_def_it));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
empties.begin(),
empties.begin() + empties_size,
thrust::make_counting_iterator(column_offsets[level + 1]),
thrust::make_counting_iterator(column_ends[level + 1]),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of empties
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level],
[off = lcv.offsets().data<size_type>(), size = lcv.offsets().size()] __device__(
auto i) -> int { return (i + 1 < size) && (off[i] == off[i + 1]); });
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
new_offsets = rmm::device_uvector<size_type>(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = new_offsets.data()] __device__(auto i) {
new_off[i] = off[i] - off[0] + scan_out[i];
});
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
// Having already merged the last two levels, we are now going to merge the result with the
// third-last level which is at index `nesting_levels.size() - 3`.
for (int level = nesting_levels.size() - 3; level >= 0; level--) {
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
auto [empties, empties_idx, empties_size] =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
auto offset_transformer = [new_child_offsets = new_offsets.data(),
child_start = column_offsets[level + 1]] __device__(auto x) {
return new_child_offsets[x - child_start]; // (x - child's offset)
};
// We will be reading from old rep_levels and writing again to rep_levels. Swap the current
// rep values into temp_rep_vals so it can become the input and rep_levels can again be output.
std::swap(temp_rep_vals, rep_level);
std::swap(temp_def_vals, def_level);
// Merge empty at parent level with the rep, def level vals at current level
auto transformed_empties = thrust::make_transform_iterator(empties.begin(), offset_transformer);
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level],
always_nullable});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(temp_rep_vals.begin(), temp_def_vals.begin()));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
transformed_empties,
transformed_empties + empties_size,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(curr_rep_values_size),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of dremel
// level value fof an empty list
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level],
[off = lcv.offsets().data<size_type>(), size = lcv.offsets().size()] __device__(
auto i) -> int { return (i + 1 < size) && (off[i] == off[i + 1]); });
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
rmm::device_uvector<size_type> temp_new_offsets(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = temp_new_offsets.data(),
offset_transformer] __device__(auto i) {
new_off[i] = offset_transformer(off[i]) + scan_out[i];
});
new_offsets = std::move(temp_new_offsets);
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
size_t level_vals_size = new_offsets.back_element(stream);
rep_level.resize(level_vals_size, stream);
def_level.resize(level_vals_size, stream);
stream.synchronize();
size_type leaf_data_size = column_ends.back() - column_offsets.back();
return dremel_data{std::move(new_offsets),
std::move(rep_level),
std::move(def_level),
leaf_data_size,
max_def_level};
}
} // namespace
dremel_data get_dremel_data(column_view h_col,
std::vector<uint8_t> nullability,
bool output_as_byte_array,
rmm::cuda_stream_view stream)
{
return get_encoding(h_col, nullability, output_as_byte_array, false, stream);
}
dremel_data get_comparator_data(column_view h_col,
std::vector<uint8_t> nullability,
bool output_as_byte_array,
rmm::cuda_stream_view stream)
{
return get_encoding(h_col, nullability, output_as_byte_array, true, stream);
}
} // namespace cudf::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/utilities.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
namespace cudf::lists::detail {
/**
* @brief Generate list labels for elements in the child column of the input lists column.
*
* @param input The input lists column
* @param n_elements The number of elements in the child column of the input lists column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned object
* @return A column containing list labels corresponding to each element in the child column
*/
std::unique_ptr<column> generate_labels(lists_column_view const& input,
size_type n_elements,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Reconstruct an offsets column from the input list labels column.
*
* @param labels The list labels corresponding to each list element
* @param n_lists The number of lists to build the offsets column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned object
* @return The output offsets column
*/
std::unique_ptr<column> reconstruct_offsets(column_view const& labels,
size_type n_lists,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Generate 0-based list offsets from the offsets of the input lists column.
*
* @param input The input lists column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned object
* @return The output offsets column with values start from 0
*/
std::unique_ptr<column> get_normalized_offsets(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
} // namespace cudf::lists::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/lists_column_view.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/lists/list_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
lists_column_view::lists_column_view(column_view const& lists_column) : column_view(lists_column)
{
CUDF_EXPECTS(type().id() == type_id::LIST, "lists_column_view only supports lists");
}
column_view lists_column_view::parent() const { return static_cast<column_view>(*this); }
column_view lists_column_view::offsets() const
{
CUDF_EXPECTS(num_children() == 2, "lists column has an incorrect number of children");
return column_view::child(offsets_column_index);
}
column_view lists_column_view::child() const
{
CUDF_EXPECTS(num_children() == 2, "lists column has an incorrect number of children");
return column_view::child(child_column_index);
}
column_view lists_column_view::get_sliced_child(rmm::cuda_stream_view stream) const
{
// if I have a positive offset, I need to slice my child
if (offset() > 0) {
// theoretically this function could always do this step and be correct, but get_value<>
// actually hits the gpu so it's best to avoid it if possible.
size_type child_offset_start = cudf::detail::get_value<size_type>(offsets(), offset(), stream);
size_type child_offset_end =
cudf::detail::get_value<size_type>(offsets(), offset() + size(), stream);
return cudf::detail::slice(child(), {child_offset_start, child_offset_end}, stream).front();
}
// if I don't have a positive offset, but I am shorter than my offsets() would otherwise indicate,
// I need to do a split and return the front.
if (size() < offsets().size() - 1) {
size_type child_offset = cudf::detail::get_value<size_type>(offsets(), size(), stream);
return cudf::detail::slice(child(), {0, child_offset}, stream).front();
}
// otherwise just return the child directly
return child();
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/lists_column_factories.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/sizes_to_offsets_iterator.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/detail/lists_column_factories.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
namespace cudf {
namespace lists {
namespace detail {
std::unique_ptr<cudf::column> make_lists_column_from_scalar(list_scalar const& value,
size_type size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (size == 0) {
return make_lists_column(0,
make_empty_column(type_to_id<size_type>()),
empty_like(value.view()),
0,
cudf::detail::create_null_mask(0, mask_state::UNALLOCATED, stream, mr),
stream,
mr);
}
auto mr_final = size == 1 ? mr : rmm::mr::get_current_device_resource();
// Handcraft a 1-row column
auto sizes_itr = thrust::constant_iterator<size_type>(value.view().size());
auto offsets = std::get<0>(
cudf::detail::make_offsets_child_column(sizes_itr, sizes_itr + 1, stream, mr_final));
size_type null_count = value.is_valid(stream) ? 0 : 1;
auto null_mask_state = null_count ? mask_state::ALL_NULL : mask_state::UNALLOCATED;
auto null_mask = cudf::detail::create_null_mask(1, null_mask_state, stream, mr_final);
if (size == 1) {
auto child = std::make_unique<column>(value.view(), stream, mr_final);
return make_lists_column(
1, std::move(offsets), std::move(child), null_count, std::move(null_mask), stream, mr_final);
}
auto children_views = std::vector<column_view>{offsets->view(), value.view()};
auto one_row_col_view = column_view(data_type{type_id::LIST},
1,
nullptr,
static_cast<bitmask_type const*>(null_mask.data()),
null_count,
0,
children_views);
auto begin = thrust::make_constant_iterator(0);
auto res = cudf::detail::gather(table_view({one_row_col_view}),
begin,
begin + size,
out_of_bounds_policy::DONT_CHECK,
stream,
mr_final);
return std::move(res->release()[0]);
}
std::unique_ptr<column> make_empty_lists_column(data_type child_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto offsets = make_empty_column(data_type(type_to_id<size_type>()));
auto child = make_empty_column(child_type);
return make_lists_column(
0, std::move(offsets), std::move(child), 0, rmm::device_buffer{}, stream, mr);
}
std::unique_ptr<column> make_all_nulls_lists_column(size_type size,
data_type child_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto offsets = [&] {
auto offsets_buff =
cudf::detail::make_zeroed_device_uvector_async<size_type>(size + 1, stream, mr);
return std::make_unique<column>(std::move(offsets_buff), rmm::device_buffer{}, 0);
}();
auto child = make_empty_column(child_type);
auto null_mask = cudf::detail::create_null_mask(size, mask_state::ALL_NULL, stream, mr);
return make_lists_column(
size, std::move(offsets), std::move(child), size, std::move(null_mask), stream, mr);
}
} // namespace detail
} // namespace lists
/**
* @copydoc cudf::make_lists_column
*/
std::unique_ptr<column> make_lists_column(size_type num_rows,
std::unique_ptr<column> offsets_column,
std::unique_ptr<column> child_column,
size_type null_count,
rmm::device_buffer&& null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (null_count > 0) { CUDF_EXPECTS(null_mask.size() > 0, "Column with nulls must be nullable."); }
CUDF_EXPECTS(
(num_rows == 0 && offsets_column->size() == 0) || num_rows == offsets_column->size() - 1,
"Invalid offsets column size for lists column.");
CUDF_EXPECTS(offsets_column->null_count() == 0, "Offsets column should not contain nulls");
CUDF_EXPECTS(child_column != nullptr, "Must pass a valid child column");
// Save type_id of the child column for later use.
auto const child_type_id = child_column->type().id();
std::vector<std::unique_ptr<column>> children;
children.emplace_back(std::move(offsets_column));
children.emplace_back(std::move(child_column));
auto output = std::make_unique<column>(cudf::data_type{type_id::LIST},
num_rows,
rmm::device_buffer{},
std::move(null_mask),
null_count,
std::move(children));
// We need to enforce all null lists to be empty.
// `has_nonempty_nulls` is less expensive than `purge_nonempty_nulls` and can save some
// run time if we don't have any non-empty nulls.
if (auto const output_cv = output->view(); detail::has_nonempty_nulls(output_cv, stream)) {
return detail::purge_nonempty_nulls(output_cv, stream, mr);
}
return output;
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/explode.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/lists/explode.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/optional.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <memory>
#include <type_traits>
namespace cudf {
namespace detail {
// explode column gather map uses cudf::out_of_bounds_policy::NULLIFY to
// fill nulls where there are invalid indices
constexpr size_type InvalidIndex = -1;
namespace {
std::unique_ptr<table> build_table(
table_view const& input_table,
size_type const explode_column_idx,
column_view const& sliced_child,
cudf::device_span<size_type const> gather_map,
thrust::optional<cudf::device_span<size_type const>> explode_col_gather_map,
thrust::optional<rmm::device_uvector<size_type>> position_array,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto select_iter = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[explode_column_idx](size_type i) { return i >= explode_column_idx ? i + 1 : i; });
auto gathered_table =
detail::gather(input_table.select(select_iter, select_iter + input_table.num_columns() - 1),
gather_map.begin(),
gather_map.end(),
cudf::out_of_bounds_policy::DONT_CHECK,
stream,
mr);
std::vector<std::unique_ptr<column>> columns = gathered_table->release();
columns.insert(columns.begin() + explode_column_idx,
explode_col_gather_map
? std::move(detail::gather(table_view({sliced_child}),
explode_col_gather_map->begin(),
explode_col_gather_map->end(),
cudf::out_of_bounds_policy::NULLIFY,
stream,
mr)
->release()[0])
: std::make_unique<column>(sliced_child, stream, mr));
if (position_array) {
size_type position_size = position_array->size();
// build the null mask for position based on invalid entries in gather map
auto nullmask = explode_col_gather_map ? valid_if(
explode_col_gather_map->begin(),
explode_col_gather_map->end(),
[] __device__(auto i) { return i != InvalidIndex; },
stream,
mr)
: std::pair<rmm::device_buffer, size_type>{
rmm::device_buffer(0, stream), size_type{0}};
columns.insert(columns.begin() + explode_column_idx,
std::make_unique<column>(data_type(type_to_id<size_type>()),
position_size,
position_array->release(),
std::move(nullmask.first),
nullmask.second));
}
return std::make_unique<table>(std::move(columns));
}
} // namespace
std::unique_ptr<table> explode(table_view const& input_table,
size_type const explode_column_idx,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
lists_column_view explode_col{input_table.column(explode_column_idx)};
auto sliced_child = explode_col.get_sliced_child(stream);
rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream);
// Sliced columns may require rebasing of the offsets.
auto offsets = explode_col.offsets_begin();
// offsets + 1 here to skip the 0th offset, which removes a - 1 operation later.
auto offsets_minus_one = thrust::make_transform_iterator(
thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; });
auto counting_iter = thrust::make_counting_iterator(0);
// This looks like an off-by-one bug, but what is going on here is that we need to reduce each
// result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by
// skipping the first entry and using the result of `lower_bound` directly.
thrust::lower_bound(rmm::exec_policy(stream),
offsets_minus_one,
offsets_minus_one + explode_col.size(),
counting_iter,
counting_iter + gather_map.size(),
gather_map.begin());
return build_table(input_table,
explode_column_idx,
sliced_child,
gather_map,
thrust::nullopt,
thrust::nullopt,
stream,
mr);
}
std::unique_ptr<table> explode_position(table_view const& input_table,
size_type const explode_column_idx,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
lists_column_view explode_col{input_table.column(explode_column_idx)};
auto sliced_child = explode_col.get_sliced_child(stream);
rmm::device_uvector<size_type> gather_map(sliced_child.size(), stream);
// Sliced columns may require rebasing of the offsets.
auto offsets = explode_col.offsets_begin();
// offsets + 1 here to skip the 0th offset, which removes a - 1 operation later.
auto offsets_minus_one = thrust::make_transform_iterator(
offsets + 1, [offsets] __device__(auto i) { return (i - offsets[0]) - 1; });
auto counting_iter = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> pos(sliced_child.size(), stream, mr);
// This looks like an off-by-one bug, but what is going on here is that we need to reduce each
// result from `lower_bound` by 1 to build the correct gather map. This can be accomplished by
// skipping the first entry and using the result of `lower_bound` directly.
thrust::transform(
rmm::exec_policy(stream),
counting_iter,
counting_iter + gather_map.size(),
gather_map.begin(),
[position_array = pos.data(),
offsets_minus_one,
offsets,
offset_size = explode_col.size()] __device__(auto idx) -> size_type {
auto lb_idx = thrust::distance(
offsets_minus_one,
thrust::lower_bound(thrust::seq, offsets_minus_one, offsets_minus_one + offset_size, idx));
position_array[idx] = idx - (offsets[lb_idx] - offsets[0]);
return lb_idx;
});
return build_table(input_table,
explode_column_idx,
sliced_child,
gather_map,
thrust::nullopt,
std::move(pos),
stream,
mr);
}
std::unique_ptr<table> explode_outer(table_view const& input_table,
size_type const explode_column_idx,
bool include_position,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
lists_column_view explode_col{input_table.column(explode_column_idx)};
auto sliced_child = explode_col.get_sliced_child(stream);
auto counting_iter = thrust::make_counting_iterator(0);
auto offsets = explode_col.offsets_begin();
// number of nulls or empty lists found so far in the explode column
rmm::device_uvector<size_type> null_or_empty_offset(explode_col.size(), stream);
auto null_or_empty = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[offsets, offsets_size = explode_col.size() - 1] __device__(int idx) {
return (idx > offsets_size || (offsets[idx + 1] != offsets[idx])) ? 0 : 1;
});
thrust::inclusive_scan(rmm::exec_policy(stream),
null_or_empty,
null_or_empty + explode_col.size(),
null_or_empty_offset.begin());
auto null_or_empty_count =
null_or_empty_offset.size() > 0 ? null_or_empty_offset.back_element(stream) : 0;
if (null_or_empty_count == 0) {
// performance penalty to run the below loop if there are no nulls or empty lists.
// run simple explode instead
return include_position ? explode_position(input_table, explode_column_idx, stream, mr)
: explode(input_table, explode_column_idx, stream, mr);
}
auto gather_map_size = sliced_child.size() + null_or_empty_count;
rmm::device_uvector<size_type> gather_map(gather_map_size, stream);
rmm::device_uvector<size_type> explode_col_gather_map(gather_map_size, stream);
rmm::device_uvector<size_type> pos(include_position ? gather_map_size : 0, stream, mr);
// offsets + 1 here to skip the 0th offset, which removes a - 1 operation later.
auto offsets_minus_one = thrust::make_transform_iterator(
thrust::next(offsets), [offsets] __device__(auto i) { return (i - offsets[0]) - 1; });
auto fill_gather_maps = [offsets_minus_one,
gather_map_p = gather_map.begin(),
explode_col_gather_map_p = explode_col_gather_map.begin(),
position_array = pos.begin(),
sliced_child_size = sliced_child.size(),
null_or_empty_offset_p = null_or_empty_offset.begin(),
include_position,
offsets,
null_or_empty,
offset_size = explode_col.offsets().size() - 1] __device__(auto idx) {
if (idx < sliced_child_size) {
auto lb_idx =
thrust::distance(offsets_minus_one,
thrust::lower_bound(
thrust::seq, offsets_minus_one, offsets_minus_one + (offset_size), idx));
auto index_to_write = null_or_empty_offset_p[lb_idx] + idx;
gather_map_p[index_to_write] = lb_idx;
explode_col_gather_map_p[index_to_write] = idx;
if (include_position) {
position_array[index_to_write] = idx - (offsets[lb_idx] - offsets[0]);
}
}
if (null_or_empty[idx]) {
auto invalid_index = null_or_empty_offset_p[idx] == 0
? offsets[idx]
: offsets[idx] + null_or_empty_offset_p[idx] - 1;
gather_map_p[invalid_index] = idx;
explode_col_gather_map_p[invalid_index] = InvalidIndex;
if (include_position) { position_array[invalid_index] = 0; }
}
};
// we need to do this loop at least explode_col times or we may not properly fill in null and
// empty entries.
auto loop_count = std::max(sliced_child.size(), explode_col.size());
// Fill in gather map with all the child column's entries
thrust::for_each(
rmm::exec_policy(stream), counting_iter, counting_iter + loop_count, fill_gather_maps);
return build_table(
input_table,
explode_column_idx,
sliced_child,
gather_map,
explode_col_gather_map,
include_position ? std::move(pos) : thrust::optional<rmm::device_uvector<size_type>>{},
stream,
mr);
}
} // namespace detail
/**
* @copydoc cudf::explode(table_view const&, size_type, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<table> explode(table_view const& input_table,
size_type explode_column_idx,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST,
"Unsupported non-list column");
return detail::explode(input_table, explode_column_idx, cudf::get_default_stream(), mr);
}
/**
* @copydoc cudf::explode_position(table_view const&, size_type, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<table> explode_position(table_view const& input_table,
size_type explode_column_idx,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST,
"Unsupported non-list column");
return detail::explode_position(input_table, explode_column_idx, cudf::get_default_stream(), mr);
}
/**
* @copydoc cudf::explode_outer(table_view const&, size_type, rmm::mr::device_memory_resource*)
*/
std::unique_ptr<table> explode_outer(table_view const& input_table,
size_type explode_column_idx,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST,
"Unsupported non-list column");
return detail::explode_outer(
input_table, explode_column_idx, false, cudf::get_default_stream(), mr);
}
/**
* @copydoc cudf::explode_outer_position(table_view const&, size_type,
* rmm::mr::device_memory_resource*)
*/
std::unique_ptr<table> explode_outer_position(table_view const& input_table,
size_type explode_column_idx,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(input_table.column(explode_column_idx).type().id() == type_id::LIST,
"Unsupported non-list column");
return detail::explode_outer(
input_table, explode_column_idx, true, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/interleave_columns.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Generate list offsets and list validities for the output lists column from the table_view
* of the input lists columns.
*/
std::pair<std::unique_ptr<column>, rmm::device_uvector<int8_t>>
generate_list_offsets_and_validities(table_view const& input,
bool has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_cols = input.num_columns();
auto const num_rows = input.num_rows();
auto const num_output_lists = num_rows * num_cols;
auto const table_dv_ptr = table_device_view::create(input, stream);
// The output offsets column.
auto list_offsets = make_numeric_column(
data_type{type_to_id<size_type>()}, num_output_lists + 1, mask_state::UNALLOCATED, stream, mr);
auto const d_offsets = list_offsets->mutable_view().template begin<size_type>();
// The array of int8_t to store validities for list elements.
auto validities = rmm::device_uvector<int8_t>(has_null_mask ? num_output_lists : 0, stream);
// Compute list sizes and validities.
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_output_lists),
d_offsets,
[num_cols,
table_dv = *table_dv_ptr,
d_validities = validities.begin(),
has_null_mask] __device__(size_type const idx) {
auto const col_id = idx % num_cols;
auto const list_id = idx / num_cols;
auto const& lists_col = table_dv.column(col_id);
if (has_null_mask) { d_validities[idx] = static_cast<int8_t>(lists_col.is_valid(list_id)); }
auto const list_offsets =
lists_col.child(lists_column_view::offsets_column_index).template data<size_type>() +
lists_col.offset();
return list_offsets[list_id + 1] - list_offsets[list_id];
});
// Compute offsets from sizes.
thrust::exclusive_scan(
rmm::exec_policy(stream), d_offsets, d_offsets + num_output_lists + 1, d_offsets);
return {std::move(list_offsets), std::move(validities)};
}
/**
* @brief Concatenate all input columns into one column and gather its rows to generate an output
* column that is the result of interleaving the input columns.
*/
std::unique_ptr<column> concatenate_and_gather_lists(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Concatenate all columns into a single (temporary) column.
auto const concatenated_col =
cudf::detail::concatenate(columns_to_concat, stream, rmm::mr::get_current_device_resource());
// The number of input columns is known to be non-zero thus it's safe to call `front()` here.
auto const num_cols = columns_to_concat.size();
auto const num_input_rows = columns_to_concat.front().size();
// Generate the gather map that interleaves the input columns.
auto const iter_gather = cudf::detail::make_counting_transform_iterator(
0, [num_cols, num_input_rows] __device__(auto const idx) {
auto const source_col_idx = idx % num_cols;
auto const source_row_idx = idx / num_cols;
return source_col_idx * num_input_rows + source_row_idx;
});
// The gather API should be able to handle any data type for the input columns.
auto result = cudf::detail::gather(table_view{{concatenated_col->view()}},
iter_gather,
iter_gather + concatenated_col->size(),
out_of_bounds_policy::DONT_CHECK,
stream,
mr);
return std::move(result->release()[0]);
}
/**
* @brief Compute string sizes, string validities, and interleave string lists functor.
*
* This functor is executed twice. In the first pass, the sizes and validities of the output strings
* will be computed. In the second pass, this will interleave the lists of strings of the given
* table containing those lists.
*/
struct compute_string_sizes_and_interleave_lists_fn {
table_device_view const table_dv;
// Store list offsets of the output lists column.
size_type const* const dst_list_offsets;
// Flag to specify whether to compute string validities.
bool const has_null_mask;
// Store offsets of the strings.
size_type* d_offsets{nullptr};
// If d_chars == nullptr: only compute sizes and validities of the output strings.
// If d_chars != nullptr: only interleave lists of strings.
char* d_chars{nullptr};
// We need to set `1` or `0` for the validities of the strings in the child column.
int8_t* d_validities{nullptr};
__device__ void operator()(size_type const idx)
{
auto const num_cols = table_dv.num_columns();
auto const col_id = idx % num_cols;
auto const list_id = idx / num_cols;
auto const& lists_col = table_dv.column(col_id);
if (has_null_mask and lists_col.is_null(list_id)) { return; }
auto const list_offsets =
lists_col.child(lists_column_view::offsets_column_index).template data<size_type>() +
lists_col.offset();
auto const& str_col = lists_col.child(lists_column_view::child_column_index);
auto const str_offsets =
str_col.child(strings_column_view::offsets_column_index).template data<size_type>();
// The range of indices of the strings within the source list.
auto const start_str_idx = list_offsets[list_id];
auto const end_str_idx = list_offsets[list_id + 1];
// In case of empty list (i.e. it doesn't contain any string element), we just ignore it because
// there will not be anything to store for that list in the child column.
if (start_str_idx == end_str_idx) { return; }
// read_idx and write_idx are indices of string elements.
size_type write_idx = dst_list_offsets[idx];
if (not d_chars) { // just compute sizes and validities of strings within a list
for (auto read_idx = start_str_idx; read_idx < end_str_idx; ++read_idx, ++write_idx) {
if (has_null_mask) {
d_validities[write_idx] = static_cast<int8_t>(str_col.is_valid(read_idx));
}
d_offsets[write_idx] = str_offsets[read_idx + 1] - str_offsets[read_idx];
}
} else { // just copy the entire memory region containing all strings in the list
// start_byte and end_byte are indices of character of the string elements.
auto const start_byte = str_offsets[start_str_idx];
auto const end_byte = str_offsets[end_str_idx];
if (start_byte < end_byte) {
auto const input_ptr =
str_col.child(strings_column_view::chars_column_index).template data<char>() + start_byte;
auto const output_ptr = d_chars + d_offsets[write_idx];
thrust::copy(thrust::seq, input_ptr, input_ptr + end_byte - start_byte, output_ptr);
}
}
}
};
// Error case when no other overload or specialization is available
template <typename T, typename Enable = void>
struct interleave_list_entries_impl {
template <typename... Args>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Called `interleave_list_entries_fn()` on non-supported types.");
}
};
template <typename T>
struct interleave_list_entries_impl<T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>> {
std::unique_ptr<column> operator()(table_view const& input,
column_view const& output_list_offsets,
size_type num_output_lists,
size_type num_output_entries,
bool data_has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const noexcept
{
auto const table_dv_ptr = table_device_view::create(input, stream);
auto comp_fn = compute_string_sizes_and_interleave_lists_fn{
*table_dv_ptr, output_list_offsets.template begin<size_type>(), data_has_null_mask};
auto validities =
rmm::device_uvector<int8_t>(data_has_null_mask ? num_output_entries : 0, stream);
comp_fn.d_validities = validities.data();
auto [offsets_column, chars_column] = cudf::strings::detail::make_strings_children(
comp_fn, num_output_lists, num_output_entries, stream, mr);
auto [null_mask, null_count] =
cudf::detail::valid_if(validities.begin(), validities.end(), thrust::identity{}, stream, mr);
return make_strings_column(num_output_entries,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask));
}
};
template <typename T>
struct interleave_list_entries_impl<T, std::enable_if_t<cudf::is_fixed_width<T>()>> {
std::unique_ptr<column> operator()(table_view const& input,
column_view const& output_list_offsets,
size_type num_output_lists,
size_type num_output_entries,
bool data_has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const noexcept
{
auto const num_cols = input.num_columns();
auto const table_dv_ptr = table_device_view::create(input, stream);
// The output child column.
auto output = cudf::detail::allocate_like(lists_column_view(*input.begin()).child(),
num_output_entries,
mask_allocation_policy::NEVER,
stream,
mr);
auto output_dv_ptr = mutable_column_device_view::create(*output, stream);
// The array of int8_t to store entry validities.
auto validities =
rmm::device_uvector<int8_t>(data_has_null_mask ? num_output_entries : 0, stream);
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
num_output_lists,
[num_cols,
table_dv = *table_dv_ptr,
d_validities = validities.begin(),
d_offsets = output_list_offsets.template begin<size_type>(),
d_output = output_dv_ptr->template begin<T>(),
data_has_null_mask] __device__(size_type const idx) {
auto const col_id = idx % num_cols;
auto const list_id = idx / num_cols;
auto const& lists_col = table_dv.column(col_id);
auto const list_offsets =
lists_col.child(lists_column_view::offsets_column_index).template data<size_type>() +
lists_col.offset();
auto const& data_col = lists_col.child(lists_column_view::child_column_index);
// The range of indices of the entries within the source list.
auto const start_idx = list_offsets[list_id];
auto const end_idx = list_offsets[list_id + 1];
auto const write_start = d_offsets[idx];
// Fill the validities array if necessary.
if (data_has_null_mask) {
for (auto read_idx = start_idx, write_idx = write_start; read_idx < end_idx;
++read_idx, ++write_idx) {
d_validities[write_idx] = static_cast<int8_t>(data_col.is_valid(read_idx));
}
}
// Do a copy for the entire list entries.
auto const input_ptr =
reinterpret_cast<char const*>(data_col.template data<T>() + start_idx);
auto const output_ptr = reinterpret_cast<char*>(&d_output[write_start]);
thrust::copy(
thrust::seq, input_ptr, input_ptr + sizeof(T) * (end_idx - start_idx), output_ptr);
});
if (data_has_null_mask) {
auto [null_mask, null_count] = cudf::detail::valid_if(
validities.begin(), validities.end(), thrust::identity{}, stream, mr);
if (null_count > 0) { output->set_null_mask(null_mask, null_count); }
}
return output;
}
};
/**
* @brief Struct used in type_dispatcher to interleave list entries of the input lists columns and
* output the results into a destination column.
*/
struct interleave_list_entries_fn {
template <class T>
std::unique_ptr<column> operator()(table_view const& input,
column_view const& output_list_offsets,
size_type num_output_lists,
size_type num_output_entries,
bool data_has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
return interleave_list_entries_impl<T>{}(input,
output_list_offsets,
num_output_lists,
num_output_entries,
data_has_null_mask,
stream,
mr);
}
};
} // anonymous namespace
/**
* @copydoc cudf::lists::detail::interleave_columns
*
*/
std::unique_ptr<column> interleave_columns(table_view const& input,
bool has_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const entry_type = lists_column_view(*input.begin()).child().type();
for (auto const& col : input) {
CUDF_EXPECTS(col.type().id() == type_id::LIST,
"All columns of the input table must be of lists column type.");
auto const child_col = lists_column_view(col).child();
CUDF_EXPECTS(entry_type == child_col.type(),
"The types of entries in the input columns must be the same.");
}
if (input.num_rows() == 0) { return cudf::empty_like(input.column(0)); }
if (input.num_columns() == 1) { return std::make_unique<column>(*(input.begin()), stream, mr); }
// For nested types, we rely on the `concatenate_and_gather` method, which costs more memory due
// to concatenation of the input columns into a temporary column. For non-nested types, we can
// directly interleave the input columns into the output column for better efficiency.
if (cudf::is_nested(entry_type)) {
auto const input_columns = std::vector<column_view>(input.begin(), input.end());
return concatenate_and_gather_lists(host_span<column_view const>{input_columns}, stream, mr);
}
// Generate offsets of the output lists column.
auto [list_offsets, list_validities] =
generate_list_offsets_and_validities(input, has_null_mask, stream, mr);
auto const offsets_view = list_offsets->view();
// Copy entries from the input lists columns to the output lists column - this needed to be
// specialized for different types.
auto const num_output_lists = input.num_rows() * input.num_columns();
auto const num_output_entries =
cudf::detail::get_value<size_type>(offsets_view, num_output_lists, stream);
auto const data_has_null_mask =
std::any_of(std::cbegin(input), std::cend(input), [](auto const& col) {
return col.child(lists_column_view::child_column_index).nullable();
});
auto list_entries = type_dispatcher<dispatch_storage_type>(entry_type,
interleave_list_entries_fn{},
input,
offsets_view,
num_output_lists,
num_output_entries,
data_has_null_mask,
stream,
mr);
if (not has_null_mask) {
return make_lists_column(num_output_lists,
std::move(list_offsets),
std::move(list_entries),
0,
rmm::device_buffer{},
stream,
mr);
}
auto [null_mask, null_count] = cudf::detail::valid_if(
list_validities.begin(), list_validities.end(), thrust::identity{}, stream, mr);
return make_lists_column(num_output_lists,
std::move(list_offsets),
std::move(list_entries),
null_count,
null_count ? std::move(null_mask) : rmm::device_buffer{},
stream,
mr);
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/count_elements.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/lists/count_elements.hpp>
#include <cudf/lists/list_device_view.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace lists {
namespace detail {
/**
* @brief Returns a numeric column containing lengths of each element
*
* @param input Input lists column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New size_type column with lengths
*/
std::unique_ptr<column> count_elements(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto device_column = cudf::column_device_view::create(input.parent(), stream);
auto d_column = *device_column;
// create output column
auto output = make_fixed_width_column(data_type{type_to_id<size_type>()},
input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
// fill in the sizes
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(input.size()),
output->mutable_view().begin<size_type>(),
list_size_functor{d_column});
output->set_null_count(input.null_count()); // reset null count
return output;
}
} // namespace detail
// external APIS
std::unique_ptr<column> count_elements(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_elements(input, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/utilities.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "utilities.hpp"
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/labeling/label_segments.cuh>
namespace cudf::lists::detail {
std::unique_ptr<column> generate_labels(lists_column_view const& input,
size_type n_elements,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto labels = make_numeric_column(
data_type(type_to_id<size_type>()), n_elements, cudf::mask_state::UNALLOCATED, stream, mr);
auto const labels_begin = labels->mutable_view().template begin<size_type>();
cudf::detail::label_segments(
input.offsets_begin(), input.offsets_end(), labels_begin, labels_begin + n_elements, stream);
return labels;
}
std::unique_ptr<column> reconstruct_offsets(column_view const& labels,
size_type n_lists,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto out_offsets = make_numeric_column(
data_type{type_to_id<size_type>()}, n_lists + 1, mask_state::UNALLOCATED, stream, mr);
auto const labels_begin = labels.template begin<size_type>();
auto const offsets_begin = out_offsets->mutable_view().template begin<size_type>();
cudf::detail::labels_to_offsets(labels_begin,
labels_begin + labels.size(),
offsets_begin,
offsets_begin + out_offsets->size(),
stream);
return out_offsets;
}
std::unique_ptr<column> get_normalized_offsets(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return empty_like(input.offsets()); }
auto out_offsets = make_numeric_column(data_type(type_to_id<size_type>()),
input.size() + 1,
cudf::mask_state::UNALLOCATED,
stream,
mr);
thrust::transform(rmm::exec_policy(stream),
input.offsets_begin(),
input.offsets_end(),
out_offsets->mutable_view().begin<size_type>(),
[d_offsets = input.offsets_begin()] __device__(auto const offset_val) {
// The first offset value, used for zero-normalizing offsets.
return offset_val - *d_offsets;
});
return out_offsets;
}
} // namespace cudf::lists::detail
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/extract.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/lists/detail/extract.hpp>
#include <cudf/lists/detail/gather.cuh>
#include <cudf/lists/extract.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <limits>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Helper to construct a column of indices, for use with `segmented_gather()`.
*
* When indices are specified as a column, e.g. `{5, -4, 3, -2, 1, null}`,
* the column returned is: `{5, -4, 3, -2, 1, MAX_SIZE_TYPE}`.
* All null indices are replaced with `MAX_SIZE_TYPE = numeric_limits<size_type>::max()`.
*
* The returned column can then be used to construct a lists column, for use
* with `segmented_gather()`.
*/
std::unique_ptr<cudf::column> make_index_child(column_view const& indices,
size_type,
rmm::cuda_stream_view stream)
{
// New column, near identical to `indices`, except with null values replaced.
// `segmented_gather()` on a null index should produce a null row.
if (not indices.nullable()) { return std::make_unique<column>(indices, stream); }
auto const d_indices = column_device_view::create(indices, stream);
// Replace null indices with MAX_SIZE_TYPE, so that gather() returns null for them.
auto const null_replaced_iter_begin =
cudf::detail::make_null_replacement_iterator(*d_indices, std::numeric_limits<size_type>::max());
auto index_child =
make_numeric_column(data_type{type_id::INT32}, indices.size(), mask_state::UNALLOCATED, stream);
thrust::copy_n(rmm::exec_policy(stream),
null_replaced_iter_begin,
indices.size(),
index_child->mutable_view().begin<size_type>());
return index_child;
}
/**
* @brief Helper to construct a column of indices, for use with `segmented_gather()`.
*
* When indices are specified as a size_type, e.g. `7`,
* the column returned is: `{ 7, 7, 7, 7, 7 }`.
*
* The returned column can then be used to construct a lists column, for use
* with `segmented_gather()`.
*/
std::unique_ptr<cudf::column> make_index_child(size_type index,
size_type num_rows,
rmm::cuda_stream_view stream)
{
auto index_child = // [index, index, index, ..., index]
make_numeric_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED, stream);
thrust::fill_n(
rmm::exec_policy(stream), index_child->mutable_view().begin<size_type>(), num_rows, index);
return index_child;
}
/**
* @brief Helper to construct offsets column for an index vector.
*
* Constructs the sequence: `{ 0, 1, 2, 3, ... num_lists + 1}`.
* This may be used to construct an "index-list" column, where each list row
* has a single element.
*/
std::unique_ptr<cudf::column> make_index_offsets(size_type num_lists, rmm::cuda_stream_view stream)
{
return cudf::detail::sequence(num_lists + 1,
cudf::scalar_type_t<size_type>(0, true, stream),
stream,
rmm::mr::get_current_device_resource());
}
} // namespace
/**
* @copydoc cudf::lists::extract_list_element
* @tparam index_t The type used to specify the index values (either column_view or size_type)
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename index_t>
std::unique_ptr<column> extract_list_element_impl(lists_column_view lists_column,
index_t const& index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_lists = lists_column.size();
if (num_lists == 0) { return empty_like(lists_column.child()); }
// Given an index (or indices vector), an index lists column may be constructed,
// with each list row having a single element.
// E.g.
// 1. If index = 7, index_lists_column = { {7}, {7}, {7}, {7}, ... }.
// 2. If indices = {4, 3, 2, 1, null},
// index_lists_column = { {4}, {3}, {2}, {1}, {MAX_SIZE_TYPE} }.
auto const index_lists_column = make_lists_column(num_lists,
make_index_offsets(num_lists, stream),
make_index_child(index, num_lists, stream),
0,
{},
stream);
// We want the output of `segmented_gather` to be a lists column in which each list has exactly
// one element, even for the null lists.
// Thus, the input into `segmented_gather` should not be nullable.
auto const lists_column_removed_null_mask = lists_column_view{
column_view{data_type{type_id::LIST},
lists_column.size(),
nullptr, // data
nullptr, // null_mask
0, // null_count
lists_column.offset(),
std::vector<column_view>{lists_column.child_begin(), lists_column.child_end()}}};
auto extracted_lists = segmented_gather(lists_column_removed_null_mask,
index_lists_column->view(),
out_of_bounds_policy::NULLIFY,
stream,
mr);
auto output =
std::move(extracted_lists->release().children[lists_column_view::child_column_index]);
if (!lists_column.has_nulls()) { return output; }
// The input lists column may have non-empty nulls if it is nullable, although this is rare.
// In such cases, the extracted elements corresponding to these non-empty nulls may not be null.
// Thus, we need to superimpose nulls from the input column into the output to make sure each
// input null list always results in a null output row.
return cudf::structs::detail::superimpose_nulls(
lists_column.null_mask(), lists_column.null_count(), std::move(output), stream, mr);
}
/**
* @copydoc cudf::lists::extract_list_element
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> extract_list_element(lists_column_view lists_column,
size_type const index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::extract_list_element_impl(lists_column, index, stream, mr);
}
std::unique_ptr<column> extract_list_element(lists_column_view lists_column,
column_view const& indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return detail::extract_list_element_impl(lists_column, indices, stream, mr);
}
} // namespace detail
/**
* @copydoc cudf::lists::extract_list_element(lists_column_view const&,
* size_type,
* rmm::mr::device_memory_resource*)
*/
std::unique_ptr<column> extract_list_element(lists_column_view const& lists_column,
size_type index,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::extract_list_element(lists_column, index, stream, mr);
}
/**
* @copydoc cudf::lists::extract_list_element(lists_column_view const&,
* column_view const&,
* rmm::mr::device_memory_resource*)
*/
std::unique_ptr<column> extract_list_element(lists_column_view const& lists_column,
column_view const& indices,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(indices.size() == lists_column.size(),
"Index column must have as many elements as lists column.");
return detail::extract_list_element(lists_column, indices, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/set_operations.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "utilities.hpp"
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/copy_if.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/lists/detail/combine.hpp>
#include <cudf/lists/detail/set_operations.hpp>
#include <cudf/lists/detail/stream_compaction.hpp>
#include <cudf/utilities/type_checks.hpp>
#include <thrust/distance.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/scatter.h>
#include <thrust/uninitialized_fill.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf::lists {
namespace detail {
namespace {
/**
* @brief Check if two input lists columns are valid input into the list operations.
* @param lhs The left lists column
* @param rhs The right lists column
*/
void check_compatibility(lists_column_view const& lhs, lists_column_view const& rhs)
{
CUDF_EXPECTS(lhs.size() == rhs.size(), "The input lists column must have the same size.");
CUDF_EXPECTS(column_types_equal(lhs.child(), rhs.child()),
"The input lists columns must have children having the same type structure");
}
} // namespace
std::unique_ptr<column> have_overlap(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
check_compatibility(lhs, rhs);
// Algorithm:
// - Generate labels for lhs and rhs child elements.
// - Check existence for rows of the table {rhs_labels, rhs_child} in the table
// {lhs_labels, lhs_child}.
// - `reduce_by_key` with keys are rhs_labels and `logical_or` reduction on the existence reults
// computed in the previous step.
auto const lhs_child = lhs.get_sliced_child(stream);
auto const rhs_child = rhs.get_sliced_child(stream);
auto const lhs_labels =
generate_labels(lhs, lhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const rhs_labels =
generate_labels(rhs, rhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const lhs_table = table_view{{lhs_labels->view(), lhs_child}};
auto const rhs_table = table_view{{rhs_labels->view(), rhs_child}};
// Check existence for each row of the rhs_table in lhs_table.
auto const contained = cudf::detail::contains(
lhs_table, rhs_table, nulls_equal, nans_equal, stream, rmm::mr::get_current_device_resource());
auto const num_rows = lhs.size();
// This stores the unique label values, used as scatter map.
auto list_indices = rmm::device_uvector<size_type>(num_rows, stream);
// Stores the result of checking overlap for non-empty lists.
auto overlap_results = rmm::device_uvector<bool>(num_rows, stream);
auto const labels_begin = rhs_labels->view().begin<size_type>();
auto const end = thrust::reduce_by_key(rmm::exec_policy(stream),
labels_begin, // keys
labels_begin + rhs_labels->size(), // keys
contained.begin(), // values to reduce
list_indices.begin(), // out keys
overlap_results.begin(), // out values
thrust::equal_to{}, // comp for keys
thrust::logical_or{}); // reduction op for values
auto const num_non_empty_segments = thrust::distance(overlap_results.begin(), end.second);
auto [null_mask, null_count] =
cudf::detail::bitmask_and(table_view{{lhs.parent(), rhs.parent()}}, stream, mr);
auto result = make_numeric_column(
data_type{type_to_id<bool>()}, num_rows, std::move(null_mask), null_count, stream, mr);
auto const result_begin = result->mutable_view().begin<bool>();
// `overlap_results` only stores the results of non-empty lists.
// We need to initialize `false` for the entire output array then scatter these results over.
thrust::uninitialized_fill(
rmm::exec_policy(stream), result_begin, result_begin + num_rows, false);
thrust::scatter(rmm::exec_policy(stream),
overlap_results.begin(),
overlap_results.begin() + num_non_empty_segments,
list_indices.begin(),
result_begin);
// Reset null count, which was invalidated when calling to `mutable_view()`.
result->set_null_count(null_count);
return result;
}
std::unique_ptr<column> intersect_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
check_compatibility(lhs, rhs);
// Algorithm:
// - Generate labels for lhs and rhs child elements.
// - Check existence for rows of the table {rhs_labels, rhs_child} in the table
// {lhs_labels, lhs_child}.
// - Extract rows of the rhs table using the existence results computed in the previous step.
// - Remove duplicate rows, and build the output lists.
auto const lhs_child = lhs.get_sliced_child(stream);
auto const rhs_child = rhs.get_sliced_child(stream);
auto const lhs_labels =
generate_labels(lhs, lhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const rhs_labels =
generate_labels(rhs, rhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const lhs_table = table_view{{lhs_labels->view(), lhs_child}};
auto const rhs_table = table_view{{rhs_labels->view(), rhs_child}};
auto const contained = cudf::detail::contains(
lhs_table, rhs_table, nulls_equal, nans_equal, stream, rmm::mr::get_current_device_resource());
auto const intersect_table = cudf::detail::copy_if(
rhs_table,
[contained = contained.begin()] __device__(auto const idx) { return contained[idx]; },
stream,
rmm::mr::get_current_device_resource());
// A stable algorithm is required to ensure that list labels remain contiguous.
auto out_table = cudf::detail::stable_distinct(intersect_table->view(),
{0, 1}, // indices of key columns
duplicate_keep_option::KEEP_ANY,
nulls_equal,
nans_equal,
stream,
mr);
auto const num_rows = lhs.size();
auto out_offsets = reconstruct_offsets(out_table->get_column(0).view(), num_rows, stream, mr);
auto [null_mask, null_count] =
cudf::detail::bitmask_and(table_view{{lhs.parent(), rhs.parent()}}, stream, mr);
auto output = make_lists_column(num_rows,
std::move(out_offsets),
std::move(out_table->release().back()),
null_count,
std::move(null_mask),
stream,
mr);
if (auto const output_cv = output->view(); cudf::detail::has_nonempty_nulls(output_cv, stream)) {
return cudf::detail::purge_nonempty_nulls(output_cv, stream, mr);
}
return output;
}
std::unique_ptr<column> union_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
check_compatibility(lhs, rhs);
// Algorithm: `return distinct(concatenate_rows(lhs, rhs))`.
auto const union_col =
lists::detail::concatenate_rows(table_view{{lhs.parent(), rhs.parent()}},
concatenate_null_policy::NULLIFY_OUTPUT_ROW,
stream,
rmm::mr::get_current_device_resource());
return cudf::lists::detail::distinct(
lists_column_view{union_col->view()}, nulls_equal, nans_equal, stream, mr);
}
std::unique_ptr<column> difference_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
check_compatibility(lhs, rhs);
// Algorithm:
// - Generate labels for lhs and rhs child elements.
// - Check existence for rows of the table {lhs_labels, lhs_child} in the table
// {rhs_labels, rhs_child}.
// - Invert the existence results computed in the previous step, resulting in difference results.
// - Extract rows of the lhs table using that difference results.
// - Remove duplicate rows, and build the output lists.
auto const lhs_child = lhs.get_sliced_child(stream);
auto const rhs_child = rhs.get_sliced_child(stream);
auto const lhs_labels =
generate_labels(lhs, lhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const rhs_labels =
generate_labels(rhs, rhs_child.size(), stream, rmm::mr::get_current_device_resource());
auto const lhs_table = table_view{{lhs_labels->view(), lhs_child}};
auto const rhs_table = table_view{{rhs_labels->view(), rhs_child}};
auto const contained = cudf::detail::contains(
rhs_table, lhs_table, nulls_equal, nans_equal, stream, rmm::mr::get_current_device_resource());
auto const difference_table = cudf::detail::copy_if(
lhs_table,
[contained = contained.begin()] __device__(auto const idx) { return !contained[idx]; },
stream,
rmm::mr::get_current_device_resource());
// A stable algorithm is required to ensure that list labels remain contiguous.
auto out_table = cudf::detail::stable_distinct(difference_table->view(),
{0, 1}, // indices of key columns
duplicate_keep_option::KEEP_ANY,
nulls_equal,
nans_equal,
stream,
mr);
auto const num_rows = lhs.size();
auto out_offsets = reconstruct_offsets(out_table->get_column(0).view(), num_rows, stream, mr);
auto [null_mask, null_count] =
cudf::detail::bitmask_and(table_view{{lhs.parent(), rhs.parent()}}, stream, mr);
auto output = make_lists_column(num_rows,
std::move(out_offsets),
std::move(out_table->release().back()),
null_count,
std::move(null_mask),
stream,
mr);
if (auto const output_cv = output->view(); cudf::detail::has_nonempty_nulls(output_cv, stream)) {
return cudf::detail::purge_nonempty_nulls(output_cv, stream, mr);
}
return output;
}
} // namespace detail
std::unique_ptr<column> have_overlap(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::have_overlap(lhs, rhs, nulls_equal, nans_equal, stream, mr);
}
std::unique_ptr<column> intersect_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::intersect_distinct(lhs, rhs, nulls_equal, nans_equal, stream, mr);
}
std::unique_ptr<column> union_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::union_distinct(lhs, rhs, nulls_equal, nans_equal, stream, mr);
}
std::unique_ptr<column> difference_distinct(lists_column_view const& lhs,
lists_column_view const& rhs,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::difference_distinct(lhs, rhs, nulls_equal, nans_equal, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/reverse.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "utilities.hpp"
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/lists/reverse.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf::lists {
namespace detail {
std::unique_ptr<column> reverse(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input.parent()); }
auto const child = input.get_sliced_child(stream);
// The labels are also a map from each list element to its corresponding zero-based list index.
auto const labels =
generate_labels(input, child.size(), stream, rmm::mr::get_current_device_resource());
// The offsets of the output lists column.
auto out_offsets = get_normalized_offsets(input, stream, mr);
// Build a gather map to copy the output list elements from the input list elements.
auto gather_map = rmm::device_uvector<size_type>(child.size(), stream);
// Build a segmented reversed order for the child column.
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<size_type>(0),
child.size(),
[list_offsets = out_offsets->view().begin<size_type>(),
list_indices = labels->view().begin<size_type>(),
gather_map = gather_map.begin()] __device__(auto const idx) {
auto const list_idx = list_indices[idx];
auto const begin_offset = list_offsets[list_idx];
auto const end_offset = list_offsets[list_idx + 1];
// Reverse the order of elements within each list.
gather_map[idx] = begin_offset + (end_offset - idx - 1);
});
auto child_segmented_reversed =
cudf::detail::gather(table_view{{child}},
device_span<size_type const>{gather_map.data(), gather_map.size()},
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return cudf::make_lists_column(input.size(),
std::move(out_offsets),
std::move(child_segmented_reversed->release().front()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> reverse(lists_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::reverse(input, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/lists/sequences.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sizes_to_offsets_iterator.cuh>
#include <cudf/lists/detail/lists_column_factories.hpp>
#include <cudf/lists/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include <limits>
#include <optional>
#include <stdexcept>
namespace cudf::lists {
namespace detail {
namespace {
template <typename T>
struct tabulator {
size_type const n_lists;
size_type const n_elements;
T const* const starts;
T const* const steps;
size_type const* const offsets;
template <typename U>
static std::enable_if_t<!cudf::is_duration<U>(), T> __device__ multiply(U x, size_type times)
{
return x * static_cast<T>(times);
}
template <typename U>
static std::enable_if_t<cudf::is_duration<U>(), T> __device__ multiply(U x, size_type times)
{
return T{x.count() * times};
}
auto __device__ operator()(size_type idx) const
{
auto const list_idx_end = thrust::upper_bound(thrust::seq, offsets, offsets + n_lists, idx);
auto const list_idx = thrust::distance(offsets, list_idx_end) - 1;
auto const list_offset = offsets[list_idx];
auto const list_step = steps ? steps[list_idx] : T{1};
return starts[list_idx] + multiply(list_step, idx - list_offset);
}
};
template <typename T, typename Enable = void>
struct sequences_functor {
template <typename... Args>
static std::unique_ptr<column> invoke(Args&&...)
{
CUDF_FAIL("Unsupported per-list sequence type-agg combination.");
}
};
struct sequences_dispatcher {
template <typename T>
std::unique_ptr<column> operator()(size_type n_lists,
size_type n_elements,
column_view const& starts,
std::optional<column_view> const& steps,
size_type const* offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return sequences_functor<T>::invoke(n_lists, n_elements, starts, steps, offsets, stream, mr);
}
};
template <typename T>
static constexpr bool is_supported()
{
return (cudf::is_numeric<T>() && !cudf::is_boolean<T>()) || cudf::is_duration<T>();
}
template <typename T>
struct sequences_functor<T, std::enable_if_t<is_supported<T>()>> {
static std::unique_ptr<column> invoke(size_type n_lists,
size_type n_elements,
column_view const& starts,
std::optional<column_view> const& steps,
size_type const* offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto result =
make_fixed_width_column(starts.type(), n_elements, mask_state::UNALLOCATED, stream, mr);
if (starts.is_empty()) { return result; }
auto const result_begin = result->mutable_view().template begin<T>();
// Use pointers instead of column_device_view to access start and step values should be enough.
// This is because we don't need to check for nulls and only support numeric and duration types.
auto const starts_begin = starts.template begin<T>();
auto const steps_begin = steps ? steps.value().template begin<T>() : nullptr;
auto const op = tabulator<T>{n_lists, n_elements, starts_begin, steps_begin, offsets};
thrust::tabulate(rmm::exec_policy(stream), result_begin, result_begin + n_elements, op);
return result;
}
};
std::unique_ptr<column> sequences(column_view const& starts,
std::optional<column_view> const& steps,
column_view const& sizes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(!starts.has_nulls() && !sizes.has_nulls(),
"starts and sizes input columns must not have nulls.");
CUDF_EXPECTS(starts.size() == sizes.size(),
"starts and sizes input columns must have the same number of rows.");
CUDF_EXPECTS(cudf::is_index_type(sizes.type()), "Input sizes column must be of integer types.");
if (steps) {
auto const& steps_cv = steps.value();
CUDF_EXPECTS(!steps_cv.has_nulls(), "steps input column must not have nulls.");
CUDF_EXPECTS(starts.size() == steps_cv.size(),
"starts and steps input columns must have the same number of rows.");
CUDF_EXPECTS(starts.type() == steps_cv.type(),
"starts and steps input columns must have the same type.");
}
auto const n_lists = starts.size();
if (n_lists == 0) { return make_empty_lists_column(starts.type(), stream, mr); }
// Generate list offsets for the output.
auto list_offsets = make_numeric_column(
data_type(type_to_id<size_type>()), n_lists + 1, mask_state::UNALLOCATED, stream, mr);
auto const offsets_begin = list_offsets->mutable_view().template begin<size_type>();
auto const sizes_input_it = cudf::detail::indexalator_factory::make_input_iterator(sizes);
// First copy the sizes since the exclusive_scan tries to read (n_lists+1) values
thrust::copy_n(rmm::exec_policy(stream), sizes_input_it, sizes.size(), offsets_begin);
auto const n_elements = cudf::detail::sizes_to_offsets(
offsets_begin, offsets_begin + list_offsets->size(), offsets_begin, stream);
CUDF_EXPECTS(n_elements <= std::numeric_limits<size_type>::max(),
"Size of output exceeds the column size limit",
std::overflow_error);
auto child = type_dispatcher(starts.type(),
sequences_dispatcher{},
n_lists,
static_cast<size_type>(n_elements),
starts,
steps,
offsets_begin,
stream,
mr);
return make_lists_column(n_lists,
std::move(list_offsets),
std::move(child),
0,
rmm::device_buffer(0, stream, mr),
stream,
mr);
}
} // anonymous namespace
std::unique_ptr<column> sequences(column_view const& starts,
column_view const& sizes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return sequences(starts, std::nullopt, sizes, stream, mr);
}
std::unique_ptr<column> sequences(column_view const& starts,
column_view const& steps,
column_view const& sizes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return sequences(starts, std::optional<column_view>{steps}, sizes, stream, mr);
}
} // namespace detail
std::unique_ptr<column> sequences(column_view const& starts,
column_view const& sizes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sequences(starts, sizes, stream, mr);
}
std::unique_ptr<column> sequences(column_view const& starts,
column_view const& steps,
column_view const& sizes,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sequences(starts, steps, sizes, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/stream_compaction/distinct.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <lists/utilities.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <memory>
#include <utility>
namespace cudf::lists {
namespace detail {
std::unique_ptr<column> distinct(lists_column_view const& input,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Algorithm:
// - Generate labels for the child elements.
// - Get distinct rows of the table {labels, child} using `stable_distinct`.
// - Build the output lists column from the output distinct rows above.
if (input.is_empty()) { return empty_like(input.parent()); }
auto const child = input.get_sliced_child(stream);
auto const labels =
generate_labels(input, child.size(), stream, rmm::mr::get_current_device_resource());
auto const distinct_table =
cudf::detail::stable_distinct(table_view{{labels->view(), child}}, // input table
std::vector<size_type>{0, 1}, // keys
duplicate_keep_option::KEEP_ANY,
nulls_equal,
nans_equal,
stream,
mr);
auto out_offsets =
reconstruct_offsets(distinct_table->get_column(0).view(), input.size(), stream, mr);
return make_lists_column(input.size(),
std::move(out_offsets),
std::move(distinct_table->release().back()),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> distinct(lists_column_view const& input,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::distinct(input, nulls_equal, nans_equal, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/stream_compaction/apply_boolean_mask.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/fill.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/lists/detail/stream_compaction.hpp>
#include <cudf/lists/stream_compaction.hpp>
#include <cudf/reduction/detail/segmented_reduction_functions.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/reduce.h>
#include <thrust/scan.h>
namespace cudf::lists {
namespace detail {
std::unique_ptr<column> apply_boolean_mask(lists_column_view const& input,
lists_column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(boolean_mask.child().type().id() == type_id::BOOL8, "Mask must be of type BOOL8.");
CUDF_EXPECTS(input.size() == boolean_mask.size(),
"Boolean masks column must have same number of rows as input.");
auto const num_rows = input.size();
if (num_rows == 0) { return cudf::empty_like(input.parent()); }
auto constexpr offset_data_type = data_type{type_id::INT32};
auto const boolean_mask_sliced_child = boolean_mask.get_sliced_child(stream);
auto const make_filtered_child = [&] {
auto filtered =
cudf::detail::apply_boolean_mask(
cudf::table_view{{input.get_sliced_child(stream)}}, boolean_mask_sliced_child, stream, mr)
->release();
return std::move(filtered.front());
};
auto const make_output_offsets = [&] {
auto boolean_mask_sliced_offsets =
cudf::detail::slice(
boolean_mask.offsets(), {boolean_mask.offset(), boolean_mask.size() + 1}, stream)
.front();
auto const sizes =
cudf::reduction::detail::segmented_sum(boolean_mask_sliced_child,
boolean_mask_sliced_offsets,
offset_data_type,
null_policy::EXCLUDE,
std::nullopt,
stream,
rmm::mr::get_current_device_resource());
auto const d_sizes = column_device_view::create(*sizes, stream);
auto const sizes_begin = cudf::detail::make_null_replacement_iterator(*d_sizes, size_type{0});
auto const sizes_end = sizes_begin + sizes->size();
auto output_offsets = cudf::make_numeric_column(
offset_data_type, num_rows + 1, mask_state::UNALLOCATED, stream, mr);
auto output_offsets_view = output_offsets->mutable_view();
// Could have attempted an exclusive_scan(), but it would not compute the last entry.
// Instead, inclusive_scan(), followed by writing `0` to the head of the offsets column.
thrust::inclusive_scan(
rmm::exec_policy(stream), sizes_begin, sizes_end, output_offsets_view.begin<size_type>() + 1);
CUDF_CUDA_TRY(cudaMemsetAsync(
output_offsets_view.begin<size_type>(), 0, sizeof(size_type), stream.value()));
return output_offsets;
};
return cudf::make_lists_column(input.size(),
make_output_offsets(),
make_filtered_child(),
input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> apply_boolean_mask(lists_column_view const& input,
lists_column_view const& boolean_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::apply_boolean_mask(input, boolean_mask, stream, mr);
}
} // namespace cudf::lists
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/copying/gather.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/gather.cuh>
#include <cudf/lists/detail/gather.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
namespace cudf {
namespace lists {
namespace detail {
/**
* @brief List gatherer function object.
*
* The iterator needed for gathering at level N+1 needs to reference the offsets
* from level N and the "base" offsets used from level N-1. An example of
* the gather map needed for level N+1 (see documentation for make_gather_data for
* the full example)
*
* @code{.pseudo}
* level N-1 offsets : [0, 2, 5, 10], gather map[0, 2]
*
* level N offsets : [0, 2, 7]
* "base" offsets from level N-1 : [0, 5]
*
* desired output sequence for the level N+1 gather map
* [0, 1, 5, 6, 7, 8, 9]
*
* The generation of this sequence in this functor works as follows
*
* step 1, generate row index sequence
* [0, 0, 1, 1, 1, 1, 1]
* step 2, generate row subindex sequence
* [0, 1, 0, 1, 2, 3, 4]
* step 3, add base offsets to get the final sequence
* [0, 1, 5, 6, 7, 8, 9]
* @endcode
*/
struct list_gatherer {
using argument_type = size_type;
using result_type = size_type;
size_t offset_count;
size_type const* base_offsets;
size_type const* offsets;
list_gatherer(gather_data const& gd)
: offset_count{gd.base_offsets.size()},
base_offsets{gd.base_offsets.data()},
offsets{gd.offsets->mutable_view().data<size_type>()}
{
}
__device__ result_type operator()(argument_type index)
{
// the "upper bound" of the span for a given offset is always offsets+1;
size_type const* upper_bound_start = offsets + 1;
// "step 1" from above
auto const bound =
thrust::upper_bound(thrust::seq, upper_bound_start, upper_bound_start + offset_count, index);
size_type offset_index = thrust::distance(upper_bound_start, bound);
// "step 2" from above
size_type offset_subindex = offset_index == 0 ? index : index - offsets[offset_index];
// "step 3" from above
return offset_subindex + base_offsets[offset_index];
}
};
/**
* @copydoc cudf::lists::detail::gather_list_leaf
*/
std::unique_ptr<column> gather_list_leaf(column_view const& column,
gather_data const& gd,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// gather map iterator for this level (N)
auto gather_map_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), list_gatherer{gd});
size_type gather_map_size = gd.gather_map_size;
// call the normal gather
// note : we don't need to bother checking for out-of-bounds here since
// our inputs at this stage aren't coming from the user.
auto gather_table = cudf::detail::gather(cudf::table_view({column}),
gather_map_begin,
gather_map_begin + gather_map_size,
out_of_bounds_policy::DONT_CHECK,
stream,
mr);
auto leaf_column = std::move(gather_table->release().front());
if (column.null_count() == 0) { leaf_column->set_null_mask(rmm::device_buffer{}, 0); }
return leaf_column;
}
/**
* @copydoc cudf::lists::detail::gather_list_nested
*/
std::unique_ptr<column> gather_list_nested(cudf::lists_column_view const& list,
gather_data& gd,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// gather map iterator for this level (N)
auto gather_map_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), list_gatherer{gd});
size_type gather_map_size = gd.gather_map_size;
// if the gather map is empty, return an empty column
if (gather_map_size == 0) { return empty_like(list.parent()); }
// gather the bitmask, if relevant
rmm::device_buffer null_mask{0, stream, mr};
size_type null_count = list.null_count();
if (null_count > 0) {
auto list_cdv = column_device_view::create(list.parent(), stream);
auto validity = cudf::detail::valid_if(
gather_map_begin,
gather_map_begin + gather_map_size,
[cdv = *list_cdv] __device__(int index) { return cdv.is_valid(index); },
stream,
mr);
null_mask = std::move(validity.first);
null_count = validity.second;
}
// generate gather_data for next level (N+1), potentially recycling the temporary
// base_offsets buffer.
gather_data child_gd = make_gather_data<false>(
list, gather_map_begin, gather_map_size, std::move(gd.base_offsets), stream, mr);
// the nesting case.
if (list.child().type() == cudf::data_type{type_id::LIST}) {
// gather children.
auto child = gather_list_nested(list.get_sliced_child(stream), child_gd, stream, mr);
// return the nested column
return make_lists_column(gather_map_size,
std::move(child_gd.offsets),
std::move(child),
null_count,
std::move(null_mask),
stream,
mr);
}
// it's a leaf. do a regular gather
auto child = gather_list_leaf(list.get_sliced_child(stream), child_gd, stream, mr);
// assemble final column
return make_lists_column(gather_map_size,
std::move(child_gd.offsets),
std::move(child),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/copying/concatenate.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/concatenate_masks.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
#include <algorithm>
#include <memory>
#include <numeric>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Merges the offsets child columns of multiple list columns into one.
*
* Since offsets are all relative to the start of their respective column,
* all offsets are shifted to account for the new starting position
*
* @param[in] columns Vector of lists columns to concatenate
* @param[in] total_list_count Total number of lists contained in the columns
* @param[in] stream CUDA stream used for device memory operations
* and kernel launches.
* @param[in] mr Device memory resource used to allocate the
* returned column's device memory.
*/
std::unique_ptr<column> merge_offsets(host_span<lists_column_view const> columns,
size_type total_list_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// outgoing offsets
auto merged_offsets = cudf::make_fixed_width_column(
data_type{type_id::INT32}, total_list_count + 1, mask_state::UNALLOCATED, stream, mr);
mutable_column_device_view d_merged_offsets(*merged_offsets, 0, 0);
// merge offsets
// TODO : this could probably be done as a single gpu operation if done as a kernel.
size_type shift = 0;
size_type count = 0;
std::for_each(columns.begin(), columns.end(), [&](lists_column_view const& c) {
if (c.size() > 0) {
// handle sliced columns
int const local_shift =
shift -
(c.offset() > 0 ? cudf::detail::get_value<size_type>(c.offsets(), c.offset(), stream) : 0);
column_device_view offsets(c.offsets(), nullptr, nullptr);
thrust::transform(
rmm::exec_policy(stream),
offsets.begin<size_type>() + c.offset(),
offsets.begin<size_type>() + c.offset() + c.size() + 1,
d_merged_offsets.begin<size_type>() + count,
[local_shift] __device__(size_type offset) { return offset + local_shift; });
shift += c.get_sliced_child(stream).size();
count += c.size();
}
});
return merged_offsets;
}
} // namespace
/**
* @copydoc cudf::lists::detail::concatenate
*/
std::unique_ptr<column> concatenate(host_span<column_view const> columns,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::vector<lists_column_view> lists_columns;
lists_columns.reserve(columns.size());
std::transform(
columns.begin(), columns.end(), std::back_inserter(lists_columns), [](column_view const& c) {
return lists_column_view(c);
});
// concatenate children. also prep data needed for offset merging
std::vector<column_view> children;
children.reserve(columns.size());
size_type total_list_count = 0;
std::for_each(lists_columns.begin(),
lists_columns.end(),
[&total_list_count, &children, stream](lists_column_view const& l) {
// count total # of lists
total_list_count += l.size();
children.push_back(l.get_sliced_child(stream));
});
auto data = cudf::detail::concatenate(children, stream, mr);
// merge offsets
auto offsets = merge_offsets(lists_columns, total_list_count, stream, mr);
// if any of the input columns have nulls, construct the output mask
bool const has_nulls =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.has_nulls(); });
rmm::device_buffer null_mask = cudf::detail::create_null_mask(
total_list_count, has_nulls ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream, mr);
auto null_mask_data = static_cast<bitmask_type*>(null_mask.data());
auto const null_count =
has_nulls ? cudf::detail::concatenate_masks(columns, null_mask_data, stream) : size_type{0};
// assemble into outgoing list column
return make_lists_column(total_list_count,
std::move(offsets),
std::move(data),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/copying/segmented_gather.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/lists/detail/gather.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace lists {
namespace detail {
std::unique_ptr<column> segmented_gather(lists_column_view const& value_column,
lists_column_view const& gather_map,
out_of_bounds_policy bounds_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_index_type(gather_map.child().type()),
"Gather map should be list column of index type");
CUDF_EXPECTS(!gather_map.has_nulls(), "Gather map contains nulls", std::invalid_argument);
CUDF_EXPECTS(value_column.size() == gather_map.size(),
"Gather map and list column should be same size");
auto const gather_map_sliced_child = gather_map.get_sliced_child(stream);
auto const gather_map_size = gather_map_sliced_child.size();
auto const gather_index_begin = gather_map.offsets_begin() + 1;
auto const gather_index_end = gather_map.offsets_end();
auto const value_offsets = value_column.offsets_begin();
auto const value_device_view = column_device_view::create(value_column.parent(), stream);
auto const map_begin =
cudf::detail::indexalator_factory::make_input_iterator(gather_map_sliced_child);
auto const out_of_bounds = [] __device__(auto const index, auto const list_size) {
return index >= list_size || (index < 0 && -index > list_size);
};
// Calculate Flattened gather indices (value_offset[row]+sub_index
auto transformer = [values_lists_view = *value_device_view,
value_offsets,
map_begin,
gather_index_begin,
gather_index_end,
bounds_policy,
out_of_bounds] __device__(size_type index) -> size_type {
// Get each row's offset. (Each row is a list).
auto offset_idx =
thrust::upper_bound(
thrust::seq, gather_index_begin, gather_index_end, gather_index_begin[-1] + index) -
gather_index_begin;
// Get each sub_index in list in each row of gather_map.
auto sub_index = map_begin[index];
auto list_is_null = values_lists_view.is_null(offset_idx);
auto list_size = list_is_null ? 0 : (value_offsets[offset_idx + 1] - value_offsets[offset_idx]);
auto wrapped_sub_index = sub_index < 0 ? sub_index + list_size : sub_index;
auto constexpr null_idx = cuda::std::numeric_limits<cudf::size_type>::max();
// Add sub_index to value_column offsets, to get gather indices of child of value_column
return (bounds_policy == out_of_bounds_policy::NULLIFY && out_of_bounds(sub_index, list_size))
? null_idx
: value_offsets[offset_idx] + wrapped_sub_index - value_offsets[0];
};
auto child_gather_index_begin = cudf::detail::make_counting_transform_iterator(0, transformer);
// Call gather on child of value_column
auto child_table = cudf::detail::gather(table_view({value_column.get_sliced_child(stream)}),
child_gather_index_begin,
child_gather_index_begin + gather_map_size,
bounds_policy,
stream,
mr);
auto child = std::move(child_table->release().front());
// Create list offsets from gather_map.
auto output_offset = cudf::detail::allocate_like(
gather_map.offsets(), gather_map.size() + 1, mask_allocation_policy::RETAIN, stream, mr);
auto output_offset_view = output_offset->mutable_view();
cudf::detail::copy_range_in_place(gather_map.offsets(),
output_offset_view,
gather_map.offset(),
gather_map.offset() + output_offset_view.size(),
0,
stream);
// Assemble list column & return
auto null_mask = cudf::detail::copy_bitmask(value_column.parent(), stream, mr);
size_type null_count = value_column.null_count();
return make_lists_column(gather_map.size(),
std::move(output_offset),
std::move(child),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> segmented_gather(lists_column_view const& source_column,
lists_column_view const& gather_map_list,
out_of_bounds_policy bounds_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::segmented_gather(source_column, gather_map_list, bounds_policy, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/copying/scatter_helper.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/detail/copying.hpp>
#include <cudf/lists/detail/scatter_helper.cuh>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/utilities/span.hpp>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace lists {
namespace detail {
/**
* @brief Constructs null mask for a scattered list's child column
*
* @param parent_list_vector Vector of unbound_list_view, for parent lists column
* @param parent_list_offsets List column offsets for parent lists column
* @param source_lists Source lists column for scatter operation
* @param target_lists Target lists column for scatter operation
* @param num_child_rows Number of rows in child column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate child column's null mask
* @return std::pair<rmm::device_buffer, size_type> Child column's null mask and null row count
*/
std::pair<rmm::device_buffer, size_type> construct_child_nullmask(
rmm::device_uvector<unbound_list_view> const& parent_list_vector,
column_view const& parent_list_offsets,
cudf::detail::lists_column_device_view const& source_lists,
cudf::detail::lists_column_device_view const& target_lists,
size_type num_child_rows,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto is_valid_predicate = [d_list_vector = parent_list_vector.begin(),
d_offsets = parent_list_offsets.template data<size_type>(),
d_offsets_size = parent_list_offsets.size(),
source_lists,
target_lists] __device__(auto const& i) {
auto list_start =
thrust::upper_bound(thrust::seq, d_offsets, d_offsets + d_offsets_size, i) - 1;
auto list_index = list_start - d_offsets;
auto element_index = i - *list_start;
auto list_row = d_list_vector[list_index];
return !list_row.bind_to_column(source_lists, target_lists).is_null(element_index);
};
return cudf::detail::valid_if(thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_child_rows),
is_valid_predicate,
stream,
mr);
}
/**
* @brief (type_dispatch endpoint) Functor that constructs the child column result
* of `scatter()`ing a list column.
*
* The protocol is as follows:
*
* Inputs:
* 1. list_vector: A device_uvector of unbound_list_view, with each element
* indicating the position, size, and which column the list
* row came from.
* 2. list_offsets: The offsets column for the (outer) lists column, each offset
* marking the beginning of a list row.
* 3. source_list: The lists-column that is the source of the scatter().
* 4. target_list: The lists-column that is the target of the scatter().
*
* Output: A (possibly non-list) child column, which may be used in combination
* with list_offsets to fully construct the outer list.
*
* Example:
*
* Consider the following scatter operation of two `list<int>` columns:
*
* 1. Source: [{9,9,9,9}, {8,8,8}], i.e.
* a. Child: [9,9,9,9,8,8,8]
* b. Offsets: [0, 4, 7]
*
* 2. Target: [{1,1}, {2,2}, {3,3}], i.e.
* a. Child: [1,1,2,2,3,3]
* b. Offsets: [0, 2, 4, 6]
*
* 3. Scatter-map: [2, 0]
*
* 4. Expected output: [{8,8,8}, {2,2}, {9,9,9,9}], i.e.
* a. Child: [8,8,8,2,2,9,9,9,9] <--- THIS
* b. Offsets: [0, 3, 5, 9]
*
* `list_child_constructor` constructs the Expected Child column indicated above.
*
* `list_child_constructor` expects to be called with the `Source`/`Target`
* lists columns, along with the following:
*
* 1. list_vector: [ S[1](3), T[1](2), S[0](4) ]
* Each unbound_list_view (e.g. S[1](3)) indicates:
* a. Which column the row is bound to: S == Source, T == Target
* b. The list index. E.g. S[1] indicates the 2nd list row of the Source column.
* c. The row size. E.g. S[1](3) indicates that the row has 3 elements.
*
* 2. list_offsets: [0, 3, 5, 9]
* The caller may construct this with an `inclusive_scan()` on `list_vector`
* element sizes.
*/
struct list_child_constructor {
private:
/**
* @brief Determine whether the child column type is supported with scattering lists.
*
* @tparam T The data type of the child column of the list being scattered.
*/
template <typename T>
struct is_supported_child_type {
static bool const value = cudf::is_fixed_width<T>() || std::is_same_v<T, string_view> ||
std::is_same_v<T, list_view> || std::is_same_v<T, struct_view>;
};
public:
// SFINAE catch-all, for unsupported child column types.
template <typename T, typename... Args>
std::enable_if_t<!is_supported_child_type<T>::value, std::unique_ptr<column>> operator()(
Args&&... args)
{
CUDF_FAIL("list_child_constructor unsupported!");
}
/**
* @brief Implementation for fixed_width child column types.
*/
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(
rmm::device_uvector<unbound_list_view> const& list_vector,
cudf::column_view const& list_offsets,
cudf::lists_column_view const& source_lists_column_view,
cudf::lists_column_view const& target_lists_column_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto source_column_device_view =
column_device_view::create(source_lists_column_view.parent(), stream);
auto target_column_device_view =
column_device_view::create(target_lists_column_view.parent(), stream);
auto source_lists = cudf::detail::lists_column_device_view(*source_column_device_view);
auto target_lists = cudf::detail::lists_column_device_view(*target_column_device_view);
auto const num_child_rows{
cudf::detail::get_value<size_type>(list_offsets, list_offsets.size() - 1, stream)};
auto child_null_mask =
source_lists_column_view.child().nullable() || target_lists_column_view.child().nullable()
? construct_child_nullmask(
list_vector, list_offsets, source_lists, target_lists, num_child_rows, stream, mr)
: std::pair(rmm::device_buffer{}, 0);
auto child_column = cudf::make_fixed_width_column(source_lists_column_view.child().type(),
num_child_rows,
std::move(child_null_mask.first),
child_null_mask.second,
stream,
mr);
thrust::transform(
rmm::exec_policy_nosync(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(child_column->size()),
child_column->mutable_view().begin<T>(),
[offset_begin = list_offsets.begin<size_type>(),
offset_size = list_offsets.size(),
d_list_vector = list_vector.begin(),
source_lists,
target_lists] __device__(auto index) {
auto const list_index_iter =
thrust::upper_bound(thrust::seq, offset_begin, offset_begin + offset_size, index);
auto const list_index =
static_cast<size_type>(thrust::distance(offset_begin, list_index_iter) - 1);
auto const intra_index = static_cast<size_type>(index - offset_begin[list_index]);
auto actual_list_row = d_list_vector[list_index].bind_to_column(source_lists, target_lists);
return actual_list_row.template element<T>(intra_index);
});
child_column->set_null_count(child_null_mask.second);
return child_column;
}
/**
* @brief Implementation for list child columns that contain strings.
*/
template <typename T>
std::enable_if_t<std::is_same_v<T, string_view>, std::unique_ptr<column>> operator()(
rmm::device_uvector<unbound_list_view> const& list_vector,
cudf::column_view const& list_offsets,
cudf::lists_column_view const& source_lists_column_view,
cudf::lists_column_view const& target_lists_column_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto source_column_device_view =
column_device_view::create(source_lists_column_view.parent(), stream);
auto target_column_device_view =
column_device_view::create(target_lists_column_view.parent(), stream);
auto source_lists = cudf::detail::lists_column_device_view(*source_column_device_view);
auto target_lists = cudf::detail::lists_column_device_view(*target_column_device_view);
auto const num_child_rows{
cudf::detail::get_value<size_type>(list_offsets, list_offsets.size() - 1, stream)};
if (num_child_rows == 0) { return make_empty_column(type_id::STRING); }
auto string_views = rmm::device_uvector<string_view>(num_child_rows, stream);
auto const null_string_view = string_view{nullptr, 0}; // placeholder for factory function
thrust::transform(
rmm::exec_policy_nosync(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(string_views.size()),
string_views.begin(),
[offset_begin = list_offsets.begin<size_type>(),
offset_size = list_offsets.size(),
d_list_vector = list_vector.begin(),
source_lists,
target_lists,
null_string_view] __device__(auto index) {
auto const list_index_iter =
thrust::upper_bound(thrust::seq, offset_begin, offset_begin + offset_size, index);
auto const list_index =
static_cast<size_type>(thrust::distance(offset_begin, list_index_iter) - 1);
auto const intra_index = static_cast<size_type>(index - offset_begin[list_index]);
auto row_index = d_list_vector[list_index].row_index();
auto actual_list_row = d_list_vector[list_index].bind_to_column(source_lists, target_lists);
auto lists_column = actual_list_row.get_column();
auto lists_offsets_ptr = lists_column.offsets().template data<size_type>();
auto child_strings_column = lists_column.child();
auto strings_offset = lists_offsets_ptr[row_index] + intra_index;
if (child_strings_column.is_null(strings_offset)) { return null_string_view; }
auto const d_str = child_strings_column.template element<string_view>(strings_offset);
// ensure a string from an all-empty column is not mapped to the null placeholder
auto const empty_string_view = string_view{};
return d_str.empty() ? empty_string_view : d_str;
});
// string_views should now have been populated with source and target references.
auto sv_span = cudf::device_span<string_view const>(string_views);
return cudf::make_strings_column(sv_span, null_string_view, stream, mr);
}
/**
* @brief (Recursively) Constructs a child column that is itself a list column.
*/
template <typename T>
std::enable_if_t<std::is_same_v<T, list_view>, std::unique_ptr<column>> operator()(
rmm::device_uvector<unbound_list_view> const& list_vector,
cudf::column_view const& list_offsets,
cudf::lists_column_view const& source_lists_column_view,
cudf::lists_column_view const& target_lists_column_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto source_column_device_view =
column_device_view::create(source_lists_column_view.parent(), stream);
auto target_column_device_view =
column_device_view::create(target_lists_column_view.parent(), stream);
auto source_lists = cudf::detail::lists_column_device_view(*source_column_device_view);
auto target_lists = cudf::detail::lists_column_device_view(*target_column_device_view);
auto const num_child_rows{
cudf::detail::get_value<size_type>(list_offsets, list_offsets.size() - 1, stream)};
if (num_child_rows == 0) {
// make an empty lists column using the input child type
return empty_like(source_lists_column_view.child());
}
auto child_list_views = rmm::device_uvector<unbound_list_view>(num_child_rows, stream, mr);
// Convert from parent list_device_view instances to child list_device_views.
// For instance, if a parent list_device_view has 3 elements, it should have 3 corresponding
// child list_device_view instances.
thrust::transform(
rmm::exec_policy_nosync(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(child_list_views.size()),
child_list_views.begin(),
[offset_begin = list_offsets.begin<size_type>(),
offset_size = list_offsets.size(),
d_list_vector = list_vector.begin(),
source_lists,
target_lists] __device__(auto index) {
auto const list_index_iter =
thrust::upper_bound(thrust::seq, offset_begin, offset_begin + offset_size, index);
auto const list_index =
static_cast<size_type>(thrust::distance(offset_begin, list_index_iter) - 1);
auto const intra_index = static_cast<size_type>(index - offset_begin[list_index]);
auto label = d_list_vector[list_index].label();
auto row_index = d_list_vector[list_index].row_index();
auto actual_list_row = d_list_vector[list_index].bind_to_column(source_lists, target_lists);
auto lists_column = actual_list_row.get_column();
auto child_lists_column = lists_column.child();
auto lists_offsets_ptr = lists_column.offsets().template data<size_type>();
auto child_lists_offsets_ptr =
child_lists_column.child(lists_column_view::offsets_column_index)
.template data<size_type>();
auto child_row_index = lists_offsets_ptr[row_index] + intra_index;
auto size =
child_lists_offsets_ptr[child_row_index + 1] - child_lists_offsets_ptr[child_row_index];
return unbound_list_view{label, child_row_index, size};
});
// child_list_views should now have been populated, with source and target references.
auto begin = thrust::make_transform_iterator(
child_list_views.begin(), [] __device__(auto const& row) { return row.size(); });
auto child_offsets = std::get<0>(
cudf::detail::make_offsets_child_column(begin, begin + child_list_views.size(), stream, mr));
auto child_column = cudf::type_dispatcher<dispatch_storage_type>(
source_lists_column_view.child().child(1).type(),
list_child_constructor{},
child_list_views,
child_offsets->view(),
cudf::lists_column_view(source_lists_column_view.child()),
cudf::lists_column_view(target_lists_column_view.child()),
stream,
mr);
auto child_null_mask =
source_lists_column_view.child().nullable() || target_lists_column_view.child().nullable()
? construct_child_nullmask(
list_vector, list_offsets, source_lists, target_lists, num_child_rows, stream, mr)
: std::pair(rmm::device_buffer{}, 0);
return cudf::make_lists_column(num_child_rows,
std::move(child_offsets),
std::move(child_column),
child_null_mask.second, // Null count
std::move(child_null_mask.first), // Null mask
stream,
mr);
}
/**
* @brief (Recursively) constructs child columns that are structs.
*/
template <typename T>
std::enable_if_t<std::is_same_v<T, struct_view>, std::unique_ptr<column>> operator()(
rmm::device_uvector<unbound_list_view> const& list_vector,
cudf::column_view const& list_offsets,
cudf::lists_column_view const& source_lists_column_view,
cudf::lists_column_view const& target_lists_column_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
auto const source_column_device_view =
column_device_view::create(source_lists_column_view.parent(), stream);
auto const target_column_device_view =
column_device_view::create(target_lists_column_view.parent(), stream);
auto const source_lists = cudf::detail::lists_column_device_view(*source_column_device_view);
auto const target_lists = cudf::detail::lists_column_device_view(*target_column_device_view);
auto const source_structs = source_lists_column_view.child();
auto const target_structs = target_lists_column_view.child();
auto const num_child_rows{
cudf::detail::get_value<size_type>(list_offsets, list_offsets.size() - 1, stream)};
auto const num_struct_members =
std::distance(source_structs.child_begin(), source_structs.child_end());
std::vector<std::unique_ptr<column>> child_columns;
child_columns.reserve(num_struct_members);
auto project_member_as_list_view = [](column_view const& structs_member,
cudf::size_type const& structs_list_num_rows,
column_view const& structs_list_offsets,
bitmask_type const* structs_list_nullmask,
cudf::size_type const& structs_list_null_count) {
return lists_column_view(column_view(data_type{type_id::LIST},
structs_list_num_rows,
nullptr,
structs_list_nullmask,
structs_list_null_count,
0,
{structs_list_offsets, structs_member}));
};
auto const iter_source_member_as_list = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0), [&](auto child_idx) {
return project_member_as_list_view(source_structs.child(child_idx),
source_lists_column_view.size(),
source_lists_column_view.offsets(),
source_lists_column_view.null_mask(),
source_lists_column_view.null_count());
});
auto const iter_target_member_as_list = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0), [&](auto child_idx) {
return project_member_as_list_view(target_structs.child(child_idx),
target_lists_column_view.size(),
target_lists_column_view.offsets(),
target_lists_column_view.null_mask(),
target_lists_column_view.null_count());
});
std::transform(iter_source_member_as_list,
iter_source_member_as_list + num_struct_members,
iter_target_member_as_list,
std::back_inserter(child_columns),
[&](auto source_struct_member_list_view, auto target_struct_member_list_view) {
return cudf::type_dispatcher<dispatch_storage_type>(
source_struct_member_list_view.child().type(),
list_child_constructor{},
list_vector,
list_offsets,
source_struct_member_list_view,
target_struct_member_list_view,
stream,
mr);
});
auto child_null_mask =
source_lists_column_view.child().nullable() || target_lists_column_view.child().nullable()
? construct_child_nullmask(
list_vector, list_offsets, source_lists, target_lists, num_child_rows, stream, mr)
: std::pair(rmm::device_buffer{}, 0);
return cudf::make_structs_column(num_child_rows,
std::move(child_columns),
child_null_mask.second,
std::move(child_null_mask.first),
stream,
mr);
}
};
std::unique_ptr<column> build_lists_child_column_recursive(
data_type child_column_type,
rmm::device_uvector<unbound_list_view> const& list_vector,
cudf::column_view const& list_offsets,
cudf::lists_column_view const& source_lists_column_view,
cudf::lists_column_view const& target_lists_column_view,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher<dispatch_storage_type>(child_column_type,
list_child_constructor{},
list_vector,
list_offsets,
source_lists_column_view,
target_lists_column_view,
stream,
mr);
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/copying/copying.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <iostream>
namespace cudf {
namespace lists {
namespace detail {
// New lists column from a subset of a lists_column_view
std::unique_ptr<cudf::column> copy_slice(lists_column_view const& lists,
size_type start,
size_type end,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (lists.is_empty() or start == end) { return cudf::empty_like(lists.parent()); }
if (end < 0 || end > lists.size()) end = lists.size();
CUDF_EXPECTS(((start >= 0) && (start < end)), "Invalid slice range.");
auto lists_count = end - start;
auto offsets_count = lists_count + 1; // num_offsets always 1 more than num_lists
// Account for the offset of the view:
start += lists.offset();
end += lists.offset();
// Offsets at the beginning and end of the slice:
auto offsets_data = lists.offsets().data<cudf::size_type>();
auto start_offset = cudf::detail::get_value<size_type>(lists.offsets(), start, stream);
auto end_offset = cudf::detail::get_value<size_type>(lists.offsets(), end, stream);
rmm::device_uvector<cudf::size_type> out_offsets(offsets_count, stream);
// Compute the offsets column of the result:
thrust::transform(
rmm::exec_policy(stream),
offsets_data + start,
offsets_data + end + 1, // size of offsets column is 1 greater than slice length
out_offsets.data(),
[start_offset] __device__(cudf::size_type i) { return i - start_offset; });
auto offsets = std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT32},
offsets_count,
out_offsets.release(),
rmm::device_buffer{},
0);
// Compute the child column of the result.
// If the child of this lists column is itself a lists column, we call copy_slice() on it.
// Otherwise, it is a column of the leaf type, so we call slice() on it and copy the resulting
// view into a cudf::column:
auto child =
(lists.child().type() == cudf::data_type{type_id::LIST})
? copy_slice(lists_column_view(lists.child()), start_offset, end_offset, stream, mr)
: std::make_unique<cudf::column>(
cudf::detail::slice(lists.child(), {start_offset, end_offset}, stream).front(),
stream,
mr);
// Compute the null mask of the result:
auto null_mask = cudf::detail::copy_bitmask(lists.null_mask(), start, end, stream, mr);
auto null_count = cudf::detail::null_count(
static_cast<bitmask_type const*>(null_mask.data()), 0, end - start, stream);
return make_lists_column(lists_count,
std::move(offsets),
std::move(child),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/combine/concatenate_rows.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/lists/combine.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_checks.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Generates the new set of offsets that regroups the concatenated-by-column inputs
* into concatenated-by-rows inputs, and the associated null mask.
*
* If we have the following input columns:
*
* s1 = [{0, 1}, {2, 3, 4}, {5}, {}, {6, 7}]
* s2 = [{8}, {9}, {}, {10, 11, 12}, {13, 14, 15, 16}]
*
* We can rearrange the child data using a normal concatenate and a gather such that
* the resulting values are in the correct order. For the above example, the
* child column would look like:
*
* {0, 1, 8, 2, 3, 4, 9, 5, 10, 11, 12, 6, 7, 13, 14, 15}
*
* Because we did a regular concatenate (and a subsequent gather to reorder the rows),
* the top level rows of the list column would look like:
*
* (2N rows)
* [{0, 1}, {8}, {2, 3, 4}, {9}, {5}, {10, 11, 12}, {6, 7}, {13, 14, 15, 16}]
*
* What we really want is:
*
* (N rows)
* [{0, 1, 8}, {2, 3, 4, 9}, {5}, {10, 11, 12}, {6, 7, 13, 14, 15, 16}]
*
* We can do this by recomputing a new offsets column that does this regrouping.
*
*/
std::tuple<std::unique_ptr<column>, rmm::device_buffer, size_type>
generate_regrouped_offsets_and_null_mask(table_device_view const& input,
bool build_null_mask,
concatenate_null_policy null_policy,
device_span<size_type const> row_null_counts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// outgoing offsets.
auto offsets = cudf::make_fixed_width_column(
data_type{type_to_id<size_type>()}, input.num_rows() + 1, mask_state::UNALLOCATED, stream, mr);
auto keys = thrust::make_transform_iterator(thrust::make_counting_iterator(size_t{0}),
[num_columns = input.num_columns()] __device__(
size_t i) -> size_type { return i / num_columns; });
// generate sizes for the regrouped rows
auto values = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_t{0}),
[input, row_null_counts = row_null_counts.data(), null_policy] __device__(
size_t i) -> size_type {
auto const col_index = i % input.num_columns();
auto const row_index = i / input.num_columns();
// nullify the whole output row
if (row_null_counts) {
if ((null_policy == concatenate_null_policy::NULLIFY_OUTPUT_ROW &&
row_null_counts[row_index] > 0) ||
(null_policy == concatenate_null_policy::IGNORE &&
row_null_counts[row_index] == input.num_columns())) {
return 0;
}
}
auto offsets =
input.column(col_index).child(lists_column_view::offsets_column_index).data<size_type>() +
input.column(col_index).offset();
return offsets[row_index + 1] - offsets[row_index];
});
thrust::reduce_by_key(rmm::exec_policy(stream),
keys,
keys + (input.num_rows() * input.num_columns()),
values,
thrust::make_discard_iterator(),
offsets->mutable_view().begin<size_type>());
// convert to offsets
thrust::exclusive_scan(rmm::exec_policy(stream),
offsets->view().begin<size_type>(),
offsets->view().begin<size_type>() + input.num_rows() + 1,
offsets->mutable_view().begin<size_type>(),
0);
// generate appropriate null mask
auto [null_mask, null_count] = [&]() {
// if the input doesn't contain nulls, no work to do
if (!build_null_mask) {
return std::pair<rmm::device_buffer, size_type>{rmm::device_buffer{}, 0};
}
// row is null if -all- input rows are null
if (null_policy == concatenate_null_policy::IGNORE) {
return cudf::detail::valid_if(
row_null_counts.begin(),
row_null_counts.begin() + input.num_rows(),
[num_columns = input.num_columns()] __device__(size_type null_count) {
return null_count != num_columns;
},
stream,
mr);
}
// row is null if -any- input rows are null
return cudf::detail::valid_if(
row_null_counts.begin(),
row_null_counts.begin() + input.num_rows(),
[] __device__(size_type null_count) { return null_count == 0; },
stream,
mr);
}();
return {std::move(offsets), std::move(null_mask), null_count};
}
rmm::device_uvector<size_type> generate_null_counts(table_device_view const& input,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<size_type> null_counts(input.num_rows(), stream);
auto keys = thrust::make_transform_iterator(thrust::make_counting_iterator(size_t{0}),
[num_columns = input.num_columns()] __device__(
size_t i) -> size_type { return i / num_columns; });
auto null_values = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_t{0}), [input] __device__(size_t i) -> size_type {
auto const col_index = i % input.num_columns();
auto const row_index = i / input.num_columns();
auto const& col = input.column(col_index);
return col.null_mask() ? (bit_is_set(col.null_mask(), row_index + col.offset()) ? 0 : 1) : 0;
});
thrust::reduce_by_key(rmm::exec_policy(stream),
keys,
keys + (input.num_rows() * input.num_columns()),
null_values,
thrust::make_discard_iterator(),
null_counts.data());
return null_counts;
}
} // anonymous namespace
/**
* @copydoc cudf::lists::concatenate_rows
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> concatenate_rows(table_view const& input,
concatenate_null_policy null_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.num_columns() > 0, "The input table must have at least one column.");
auto const entry_type = lists_column_view(*input.begin()).child().type();
CUDF_EXPECTS(
std::all_of(input.begin(),
input.end(),
[](column_view const& col) { return col.type().id() == cudf::type_id::LIST; }),
"All columns of the input table must be of lists column type.");
CUDF_EXPECTS(
std::all_of(std::next(input.begin()),
input.end(),
[a = *input.begin()](column_view const& b) { return column_types_equal(a, b); }),
"The types of entries in the input columns must be the same.");
auto const num_rows = input.num_rows();
auto const num_cols = input.num_columns();
if (num_rows == 0) { return cudf::empty_like(input.column(0)); }
if (num_cols == 1) { return std::make_unique<column>(*(input.begin()), stream, mr); }
// concatenate the input table into one column.
std::vector<column_view> cols(input.num_columns());
std::copy(input.begin(), input.end(), cols.begin());
auto concat = cudf::detail::concatenate(cols, stream, rmm::mr::get_current_device_resource());
// whether or not we should be generating a null mask at all
auto const build_null_mask = concat->has_nulls();
auto input_dv = table_device_view::create(input, stream);
// if the output needs a null mask, generate a vector of null counts per row of input, where the
// count is the number of columns that contain a null for a given row.
auto row_null_counts = build_null_mask ? generate_null_counts(*input_dv, stream)
: rmm::device_uvector<size_type>{0, stream};
// if we have nulls, overlay an appropriate null mask onto the
// concatenated column so that gather() sanitizes out the child data of rows that will ultimately
// be nullified.
if (build_null_mask) {
auto [null_mask, null_count] = [&]() {
auto iter = thrust::make_counting_iterator(size_t{0});
// IGNORE. Output row is nullified if all input rows are null.
if (null_policy == concatenate_null_policy::IGNORE) {
return cudf::detail::valid_if(
iter,
iter + (input.num_rows() * input.num_columns()),
[num_rows = input.num_rows(),
num_columns = input.num_columns(),
row_null_counts = row_null_counts.data()] __device__(size_t i) -> size_type {
auto const row_index = i % num_rows;
return row_null_counts[row_index] != num_columns;
},
stream,
rmm::mr::get_current_device_resource());
}
// NULLIFY_OUTPUT_ROW. Output row is nullfied if any input row is null
return cudf::detail::valid_if(
iter,
iter + (input.num_rows() * input.num_columns()),
[num_rows = input.num_rows(),
row_null_counts = row_null_counts.data()] __device__(size_t i) -> size_type {
auto const row_index = i % num_rows;
return row_null_counts[row_index] == 0;
},
stream,
rmm::mr::get_current_device_resource());
}();
concat->set_null_mask(std::move(null_mask), null_count);
}
// perform the gather to rearrange the rows in desired child order. this will produce -almost-
// what we want. the data of the children will be exactly what we want, but will be grouped as if
// we had concatenated all the rows together instead of concatenating within the rows. To fix
// this we can simply swap in a new set of offsets that re-groups them. bmo
auto iter = thrust::make_transform_iterator(
thrust::make_counting_iterator(size_t{0}),
[num_columns = input.num_columns(),
num_rows = input.num_rows()] __device__(size_t i) -> size_type {
auto const src_col_index = i % num_columns;
auto const src_row_index = i / num_columns;
auto const concat_row_index = (src_col_index * num_rows) + src_row_index;
return concat_row_index;
});
auto gathered = cudf::detail::gather(table_view({*concat}),
iter,
iter + (input.num_columns() * input.num_rows()),
out_of_bounds_policy::DONT_CHECK,
stream,
mr);
// generate regrouped offsets and null mask
auto [offsets, null_mask, null_count] = generate_regrouped_offsets_and_null_mask(
*input_dv, build_null_mask, null_policy, row_null_counts, stream, mr);
// reassemble the underlying child data with the regrouped offsets and null mask
column& col = gathered->get_column(0);
auto contents = col.release();
return cudf::make_lists_column(
input.num_rows(),
std::move(offsets),
std::move(contents.children[lists_column_view::child_column_index]),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
/**
* @copydoc cudf::lists::concatenate_rows
*/
std::unique_ptr<column> concatenate_rows(table_view const& input,
concatenate_null_policy null_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate_rows(input, null_policy, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/lists
|
rapidsai_public_repos/cudf/cpp/src/lists/combine/concatenate_list_elements.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sizes_to_offsets_iterator.cuh>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/combine.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
namespace cudf {
namespace lists {
namespace detail {
namespace {
/**
* @brief Concatenate lists within the same row into one list, ignoring any null list during
* concatenation.
*/
std::unique_ptr<column> concatenate_lists_ignore_null(column_view const& input,
bool build_null_mask,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = input.size();
auto out_offsets = make_numeric_column(
data_type{type_to_id<size_type>()}, num_rows + 1, mask_state::UNALLOCATED, stream, mr);
auto const d_out_offsets = out_offsets->mutable_view().template begin<size_type>();
auto const d_row_offsets = lists_column_view(input).offsets_begin();
auto const d_list_offsets = lists_column_view(lists_column_view(input).child()).offsets_begin();
// Concatenating the lists at the same row by converting the entry offsets from the child column
// into row offsets of the root column. Those entry offsets are subtracted by the first entry
// offset to output zero-based offsets.
auto const iter = thrust::make_counting_iterator<size_type>(0);
thrust::transform(rmm::exec_policy(stream),
iter,
iter + num_rows + 1,
d_out_offsets,
[d_row_offsets, d_list_offsets] __device__(auto const idx) {
auto const start_offset = d_list_offsets[d_row_offsets[0]];
return d_list_offsets[d_row_offsets[idx]] - start_offset;
});
// The child column of the output lists column is just copied from the input column.
auto out_entries = std::make_unique<column>(
lists_column_view(lists_column_view(input).get_sliced_child(stream)).get_sliced_child(stream),
stream,
mr);
auto [null_mask, null_count] = [&] {
if (!build_null_mask)
return std::pair(cudf::detail::copy_bitmask(input, stream, mr), input.null_count());
// The output row will be null only if all lists on the input row are null.
auto const lists_dv_ptr = column_device_view::create(lists_column_view(input).child(), stream);
return cudf::detail::valid_if(
iter,
iter + num_rows,
[d_row_offsets, lists_dv = *lists_dv_ptr, iter] __device__(auto const idx) {
return thrust::any_of(
thrust::seq,
iter + d_row_offsets[idx],
iter + d_row_offsets[idx + 1],
[&] __device__(auto const list_idx) { return lists_dv.is_valid(list_idx); });
},
stream,
mr);
}();
return make_lists_column(num_rows,
std::move(out_offsets),
std::move(out_entries),
null_count,
null_count > 0 ? std::move(null_mask) : rmm::device_buffer{},
stream,
mr);
}
/**
* @brief Generate list offsets and list validities for the output lists column.
*
* This function is called only when (has_null_list == true and null_policy == NULLIFY_OUTPUT_ROW).
*/
std::pair<std::unique_ptr<column>, rmm::device_uvector<int8_t>>
generate_list_offsets_and_validities(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = input.size();
auto const lists_of_lists_dv_ptr = column_device_view::create(input, stream);
auto const lists_dv_ptr = column_device_view::create(lists_column_view(input).child(), stream);
auto const d_row_offsets = lists_column_view(input).offsets_begin();
auto const d_list_offsets = lists_column_view(lists_column_view(input).child()).offsets_begin();
// The array of int8_t stores validities for the output list elements.
auto validities = rmm::device_uvector<int8_t>(num_rows, stream);
// Compute output list sizes and validities.
auto sizes_itr = cudf::detail::make_counting_transform_iterator(
0,
[lists_of_lists_dv = *lists_of_lists_dv_ptr,
lists_dv = *lists_dv_ptr,
d_row_offsets,
d_list_offsets,
d_validities = validities.begin()] __device__(auto const idx) {
if (d_row_offsets[idx] == d_row_offsets[idx + 1]) { // This is a null/empty row.
d_validities[idx] = static_cast<int8_t>(lists_of_lists_dv.is_valid(idx));
return size_type{0};
}
// The output row will not be null only if all lists on the input row are not null.
auto const iter = thrust::make_counting_iterator<size_type>(0);
auto const is_valid =
thrust::all_of(thrust::seq,
iter + d_row_offsets[idx],
iter + d_row_offsets[idx + 1],
[&] __device__(auto const list_idx) { return lists_dv.is_valid(list_idx); });
d_validities[idx] = static_cast<int8_t>(is_valid);
if (!is_valid) { return size_type{0}; }
// Compute size of the output list as sum of sizes of all lists in the current input row.
return d_list_offsets[d_row_offsets[idx + 1]] - d_list_offsets[d_row_offsets[idx]];
});
// Compute offsets from sizes.
auto out_offsets = std::get<0>(
cudf::detail::make_offsets_child_column(sizes_itr, sizes_itr + num_rows, stream, mr));
return {std::move(out_offsets), std::move(validities)};
}
/**
* @brief Gather entries from the input lists column, ignoring rows that have null list elements.
*
* This function is called only when (has_null_list == true and null_policy == NULLIFY_OUTPUT_ROW).
*/
std::unique_ptr<column> gather_list_entries(column_view const& input,
column_view const& output_list_offsets,
size_type num_rows,
size_type num_output_entries,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const child_col = lists_column_view(input).child();
auto const entry_col = lists_column_view(child_col).child();
auto const d_row_offsets = lists_column_view(input).offsets_begin();
auto const d_list_offsets = lists_column_view(child_col).offsets_begin();
auto gather_map = rmm::device_uvector<size_type>(num_output_entries, stream);
// Fill the gather map with indices of the lists from the child column of the input column.
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
num_rows,
[d_row_offsets,
d_list_offsets,
d_indices = gather_map.begin(),
d_out_list_offsets =
output_list_offsets.template begin<size_type>()] __device__(size_type const idx) {
// The output row has been identified as a null/empty list during list size computation.
if (d_out_list_offsets[idx + 1] == d_out_list_offsets[idx]) { return; }
// The indices of the list elements on the row `idx` of the input column.
thrust::sequence(thrust::seq,
d_indices + d_out_list_offsets[idx],
d_indices + d_out_list_offsets[idx + 1],
d_list_offsets[d_row_offsets[idx]]);
});
auto result = cudf::detail::gather(table_view{{entry_col}},
gather_map,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(result->release()[0]);
}
std::unique_ptr<column> concatenate_lists_nullifying_rows(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Generate offsets and validities of the output lists column.
auto [list_offsets, list_validities] = generate_list_offsets_and_validities(input, stream, mr);
auto const offsets_view = list_offsets->view();
auto const num_rows = input.size();
auto const num_output_entries =
cudf::detail::get_value<size_type>(offsets_view, num_rows, stream);
auto list_entries =
gather_list_entries(input, offsets_view, num_rows, num_output_entries, stream, mr);
auto [null_mask, null_count] = cudf::detail::valid_if(
list_validities.begin(), list_validities.end(), thrust::identity{}, stream, mr);
return make_lists_column(num_rows,
std::move(list_offsets),
std::move(list_entries),
null_count,
null_count ? std::move(null_mask) : rmm::device_buffer{},
stream,
mr);
}
} // namespace
/**
* @copydoc cudf::lists::concatenate_list_elements
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> concatenate_list_elements(column_view const& input,
concatenate_null_policy null_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type().id() == type_id::LIST,
"Input column must be a lists column.",
std::invalid_argument);
auto const child = lists_column_view(input).child();
CUDF_EXPECTS(child.type().id() == type_id::LIST,
"Child of the input lists column must also be a lists column.",
std::invalid_argument);
if (input.size() == 0) { return cudf::empty_like(input); }
bool const has_null_list = child.has_nulls();
return (null_policy == concatenate_null_policy::IGNORE || !has_null_list)
? concatenate_lists_ignore_null(input, has_null_list, stream, mr)
: concatenate_lists_nullifying_rows(input, stream, mr);
}
} // namespace detail
/**
* @copydoc cudf::lists::concatenate_list_elements
*/
std::unique_ptr<column> concatenate_list_elements(column_view const& input,
concatenate_null_policy null_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate_list_elements(input, null_policy, stream, mr);
}
} // namespace lists
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/murmurhash3_x64_128.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/algorithm.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/hashing/detail/murmurhash3_x64_128.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
namespace cudf {
namespace hashing {
namespace detail {
namespace {
using hash_value_type = thrust::pair<uint64_t, uint64_t>;
/**
* @brief Computes the hash value of a row in the given table.
*
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
*/
template <typename Nullate>
class murmur_device_row_hasher {
public:
murmur_device_row_hasher(Nullate nulls,
table_device_view const& t,
uint64_t seed,
uint64_t* d_output1,
uint64_t* d_output2)
: _check_nulls(nulls), _input(t), _seed(seed), _output1(d_output1), _output2(d_output2)
{
}
/**
* @brief Return the hash value of a row in the given table.
*
* @param row_index The row index to compute the hash value of
* @return The hash value of the row
*/
__device__ void operator()(size_type row_index) const noexcept
{
auto h = cudf::detail::accumulate(
_input.begin(),
_input.end(),
hash_value_type{_seed, 0},
[row_index, nulls = this->_check_nulls] __device__(auto hash, auto column) {
return cudf::type_dispatcher(
column.type(), element_hasher_adapter{}, column, row_index, nulls, hash);
});
_output1[row_index] = h.first;
_output2[row_index] = h.second;
}
/**
* @brief Computes the hash value of an element in the given column.
*/
class element_hasher_adapter {
public:
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type row_index,
Nullate const _check_nulls,
hash_value_type const _seed) const noexcept
{
if (_check_nulls && col.is_null(row_index)) {
return {std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()};
}
auto const hasher = MurmurHash3_x64_128<T>{_seed.first};
return hasher(col.element<T>(row_index));
}
template <typename T, CUDF_ENABLE_IF(not column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const&,
size_type,
Nullate const,
hash_value_type const) const noexcept
{
CUDF_UNREACHABLE("Unsupported type for MurmurHash3_x64_128");
}
};
Nullate const _check_nulls;
table_device_view const _input;
uint64_t const _seed;
uint64_t* _output1;
uint64_t* _output2;
};
} // namespace
std::unique_ptr<table> murmurhash3_x64_128(table_view const& input,
uint64_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output1 = make_numeric_column(
data_type(type_id::UINT64), input.num_rows(), mask_state::UNALLOCATED, stream, mr);
auto output2 = make_numeric_column(
data_type(type_id::UINT64), input.num_rows(), mask_state::UNALLOCATED, stream, mr);
if (!input.is_empty()) {
bool const nullable = has_nulls(input);
auto const input_view = table_device_view::create(input, stream);
auto d_output1 = output1->mutable_view().data<uint64_t>();
auto d_output2 = output2->mutable_view().data<uint64_t>();
// Compute the hash value for each row
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<size_type>(0),
input.num_rows(),
murmur_device_row_hasher(nullable, *input_view, seed, d_output1, d_output2));
}
std::vector<std::unique_ptr<column>> out_columns(2);
out_columns.front() = std::move(output1);
out_columns.back() = std::move(output2);
return std::make_unique<table>(std::move(out_columns));
}
} // namespace detail
std::unique_ptr<table> murmurhash3_x64_128(table_view const& input,
uint64_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::murmurhash3_x64_128(input, seed, stream, mr);
}
} // namespace hashing
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/managed.cuh
|
/*
* Copyright (c) 2017-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <new>
struct managed {
static void* operator new(size_t n)
{
void* ptr = nullptr;
cudaError_t result = cudaMallocManaged(&ptr, n);
if (cudaSuccess != result || 0 == ptr) throw std::bad_alloc();
return ptr;
}
static void operator delete(void* ptr) noexcept
{
auto const free_result = cudaFree(ptr);
assert(free_result == cudaSuccess);
}
};
inline bool isPtrManaged(cudaPointerAttributes attr)
{
#if CUDART_VERSION >= 10000
return (attr.type == cudaMemoryTypeManaged);
#else
return attr.isManaged;
#endif
}
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/md5_hash.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/hashing/detail/hash_functions.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/string_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <iterator>
namespace cudf {
namespace hashing {
namespace detail {
namespace {
template <int capacity, typename hash_step_callable>
struct hash_circular_buffer {
uint8_t storage[capacity];
uint8_t* cur;
int available_space{capacity};
hash_step_callable hash_step;
__device__ inline hash_circular_buffer(hash_step_callable hash_step)
: cur{storage}, hash_step{hash_step}
{
}
__device__ inline void put(uint8_t const* in, int size)
{
int copy_start = 0;
while (size >= available_space) {
// The buffer will be filled by this chunk of data. Copy a chunk of the
// data to fill the buffer and trigger a hash step.
memcpy(cur, in + copy_start, available_space);
hash_step(storage);
size -= available_space;
copy_start += available_space;
cur = storage;
available_space = capacity;
}
// The buffer will not be filled by the remaining data. That is, `size >= 0
// && size < capacity`. We copy the remaining data into the buffer but do
// not trigger a hash step.
memcpy(cur, in + copy_start, size);
cur += size;
available_space -= size;
}
__device__ inline void pad(int const space_to_leave)
{
if (space_to_leave > available_space) {
memset(cur, 0x00, available_space);
hash_step(storage);
cur = storage;
available_space = capacity;
}
memset(cur, 0x00, available_space - space_to_leave);
cur += available_space - space_to_leave;
available_space = space_to_leave;
}
__device__ inline uint8_t const& operator[](int idx) const { return storage[idx]; }
};
// Get a uint8_t pointer to a column element and its size as a pair.
template <typename Element>
auto __device__ inline get_element_pointer_and_size(Element const& element)
{
if constexpr (is_fixed_width<Element>() && !is_chrono<Element>()) {
return thrust::make_pair(reinterpret_cast<uint8_t const*>(&element), sizeof(Element));
} else {
CUDF_UNREACHABLE("Unsupported type.");
}
}
template <>
auto __device__ inline get_element_pointer_and_size(string_view const& element)
{
return thrust::make_pair(reinterpret_cast<uint8_t const*>(element.data()), element.size_bytes());
}
/**
* Modified GPU implementation of
* https://johnnylee-sde.github.io/Fast-unsigned-integer-to-hex-string/
* Copyright (c) 2015 Barry Clark
* Licensed under the MIT license.
* See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
void __device__ inline uint32ToLowercaseHexString(uint32_t num, char* destination)
{
// Transform 0xABCD'1234 => 0x0000'ABCD'0000'1234 => 0x0B0A'0D0C'0201'0403
uint64_t x = num;
x = ((x & 0xFFFF'0000u) << 16) | ((x & 0xFFFF));
x = ((x & 0x000F'0000'000Fu) << 8) | ((x & 0x00F0'0000'00F0u) >> 4) |
((x & 0x0F00'0000'0F00u) << 16) | ((x & 0xF000'0000'F000) << 4);
// Calculate a mask of ascii value offsets for bytes that contain alphabetical hex digits
uint64_t offsets = (((x + 0x0606'0606'0606'0606) >> 4) & 0x0101'0101'0101'0101) * 0x27;
x |= 0x3030'3030'3030'3030;
x += offsets;
std::memcpy(destination, reinterpret_cast<uint8_t*>(&x), 8);
}
// The MD5 algorithm and its hash/shift constants are officially specified in
// RFC 1321. For convenience, these values can also be found on Wikipedia:
// https://en.wikipedia.org/wiki/MD5
const __constant__ uint32_t md5_shift_constants[16] = {
7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21};
const __constant__ uint32_t md5_hash_constants[64] = {
0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
};
struct MD5Hasher {
static constexpr int message_chunk_size = 64;
__device__ inline MD5Hasher(char* result_location)
: result_location(result_location), buffer(md5_hash_step{hash_values})
{
}
__device__ inline ~MD5Hasher()
{
// On destruction, finalize the message buffer and write out the current
// hexadecimal hash value to the result location.
// Add a one byte flag 0b10000000 to signal the end of the message.
uint8_t constexpr end_of_message = 0x80;
// The message length is appended to the end of the last chunk processed.
uint64_t const message_length_in_bits = message_length * 8;
buffer.put(&end_of_message, sizeof(end_of_message));
buffer.pad(sizeof(message_length_in_bits));
buffer.put(reinterpret_cast<uint8_t const*>(&message_length_in_bits),
sizeof(message_length_in_bits));
for (int i = 0; i < 4; ++i) {
uint32ToLowercaseHexString(hash_values[i], result_location + (8 * i));
}
}
MD5Hasher(MD5Hasher const&) = delete;
MD5Hasher& operator=(MD5Hasher const&) = delete;
MD5Hasher(MD5Hasher&&) = delete;
MD5Hasher& operator=(MD5Hasher&&) = delete;
template <typename Element>
void __device__ inline process(Element const& element)
{
auto const normalized_element = normalize_nans_and_zeros(element);
auto const [element_ptr, size] = get_element_pointer_and_size(normalized_element);
buffer.put(element_ptr, size);
message_length += size;
}
/**
* @brief Core MD5 algorithm implementation. Processes a single 64-byte chunk,
* updating the hash value so far. Does not zero out the buffer contents.
*/
struct md5_hash_step {
uint32_t (&hash_values)[4];
void __device__ inline operator()(uint8_t const (&buffer)[message_chunk_size])
{
uint32_t A = hash_values[0];
uint32_t B = hash_values[1];
uint32_t C = hash_values[2];
uint32_t D = hash_values[3];
for (int j = 0; j < message_chunk_size; j++) {
uint32_t F;
uint32_t g;
// No default case is needed because j < 64. j / 16 is always 0, 1, 2, or 3.
switch (j / 16) {
case 0:
F = (B & C) | ((~B) & D);
g = j;
break;
case 1:
F = (D & B) | ((~D) & C);
g = (5 * j + 1) % 16;
break;
case 2:
F = B ^ C ^ D;
g = (3 * j + 5) % 16;
break;
case 3:
F = C ^ (B | (~D));
g = (7 * j) % 16;
break;
}
uint32_t buffer_element_as_int;
memcpy(&buffer_element_as_int, &buffer[g * 4], 4);
F = F + A + md5_hash_constants[j] + buffer_element_as_int;
A = D;
D = C;
C = B;
B = B + rotate_bits_left(F, md5_shift_constants[((j / 16) * 4) + (j % 4)]);
}
hash_values[0] += A;
hash_values[1] += B;
hash_values[2] += C;
hash_values[3] += D;
}
};
char* result_location;
hash_circular_buffer<message_chunk_size, md5_hash_step> buffer;
uint64_t message_length = 0;
uint32_t hash_values[4] = {0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476};
};
template <typename Hasher>
struct HasherDispatcher {
Hasher* hasher;
column_device_view const& input_col;
__device__ inline HasherDispatcher(Hasher* hasher, column_device_view const& input_col)
: hasher{hasher}, input_col{input_col}
{
}
template <typename Element>
void __device__ inline operator()(size_type const row_index) const
{
if constexpr ((is_fixed_width<Element>() && !is_chrono<Element>()) ||
std::is_same_v<Element, string_view>) {
hasher->process(input_col.element<Element>(row_index));
} else {
(void)row_index;
CUDF_UNREACHABLE("Unsupported type for hash function.");
}
}
};
template <typename Hasher>
struct ListHasherDispatcher {
Hasher* hasher;
column_device_view const& input_col;
__device__ inline ListHasherDispatcher(Hasher* hasher, column_device_view const& input_col)
: hasher{hasher}, input_col{input_col}
{
}
template <typename Element>
void __device__ inline operator()(size_type const offset_begin, size_type const offset_end) const
{
if constexpr ((is_fixed_width<Element>() && !is_chrono<Element>()) ||
std::is_same_v<Element, string_view>) {
for (size_type i = offset_begin; i < offset_end; i++) {
if (input_col.is_valid(i)) { hasher->process(input_col.element<Element>(i)); }
}
} else {
(void)offset_begin;
(void)offset_end;
CUDF_UNREACHABLE("Unsupported type for hash function.");
}
}
};
// MD5 supported leaf data type check
inline bool md5_leaf_type_check(data_type dt)
{
return (is_fixed_width(dt) && !is_chrono(dt)) || (dt.id() == type_id::STRING);
}
} // namespace
std::unique_ptr<column> md5(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.num_columns() == 0 || input.num_rows() == 0) {
// Return the MD5 hash of a zero-length input.
string_scalar const string_128bit("d41d8cd98f00b204e9orig98ecf8427e");
return make_column_from_scalar(string_128bit, input.num_rows(), stream, mr);
}
// Accepts string and fixed width columns, or single layer list columns holding those types
CUDF_EXPECTS(std::all_of(input.begin(),
input.end(),
[](auto const& col) {
if (col.type().id() == type_id::LIST) {
return md5_leaf_type_check(lists_column_view(col).child().type());
}
return md5_leaf_type_check(col.type());
}),
"Unsupported column type for hash function.");
// Digest size in bytes
auto constexpr digest_size = 32;
// Result column allocation and creation
auto begin = thrust::make_constant_iterator(digest_size);
auto [offsets_column, bytes] =
cudf::detail::make_offsets_child_column(begin, begin + input.num_rows(), stream, mr);
auto chars_column = strings::detail::create_chars_child_column(bytes, stream, mr);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
auto const device_input = table_device_view::create(input, stream);
// Hash each row, hashing each element sequentially left to right
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.num_rows()),
[d_chars, device_input = *device_input] __device__(auto row_index) {
MD5Hasher hasher(d_chars + (row_index * digest_size));
for (auto const& col : device_input) {
if (col.is_valid(row_index)) {
if (col.type().id() == type_id::LIST) {
auto const data_col = col.child(lists_column_view::child_column_index);
auto const offsets = col.child(lists_column_view::offsets_column_index);
if (data_col.type().id() == type_id::LIST) {
CUDF_UNREACHABLE("Nested list unsupported");
}
auto const offset_begin = offsets.element<size_type>(row_index);
auto const offset_end = offsets.element<size_type>(row_index + 1);
cudf::type_dispatcher<dispatch_storage_type>(
data_col.type(), ListHasherDispatcher(&hasher, data_col), offset_begin, offset_end);
} else {
cudf::type_dispatcher<dispatch_storage_type>(
col.type(), HasherDispatcher(&hasher, col), row_index);
}
}
}
});
return make_strings_column(
input.num_rows(), std::move(offsets_column), std::move(chars_column), 0, {});
}
} // namespace detail
std::unique_ptr<column> md5(table_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::md5(input, stream, mr);
}
} // namespace hashing
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/murmurhash3_x86_32.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/hashing/detail/murmurhash3_x86_32.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/tabulate.h>
namespace cudf {
namespace hashing {
namespace detail {
std::unique_ptr<column> murmurhash3_x86_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output = make_numeric_column(data_type(type_to_id<hash_value_type>()),
input.num_rows(),
mask_state::UNALLOCATED,
stream,
mr);
// Return early if there's nothing to hash
if (input.num_columns() == 0 || input.num_rows() == 0) { return output; }
bool const nullable = has_nulls(input);
auto const row_hasher = cudf::experimental::row::hash::row_hasher(input, stream);
auto output_view = output->mutable_view();
// Compute the hash value for each row
thrust::tabulate(rmm::exec_policy(stream),
output_view.begin<hash_value_type>(),
output_view.end<hash_value_type>(),
row_hasher.device_hasher<MurmurHash3_x86_32>(nullable, seed));
return output;
}
} // namespace detail
std::unique_ptr<column> murmurhash3_x86_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::murmurhash3_x86_32(input, seed, stream, mr);
}
} // namespace hashing
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/hashing.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace hashing {
namespace detail {
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
switch (hash_function) {
case (hash_id::HASH_MURMUR3): return murmurhash3_x86_32(input, seed, stream, mr);
case (hash_id::HASH_SPARK_MURMUR3): return spark_murmurhash3_x86_32(input, seed, stream, mr);
case (hash_id::HASH_MD5): return md5(input, stream, mr);
default: CUDF_FAIL("Unsupported hash function.");
}
}
} // namespace detail
} // namespace hashing
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return hashing::detail::hash(input, hash_function, seed, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/spark_murmurhash3_x86_32.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/hashing/detail/hash_functions.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tabulate.h>
namespace cudf {
namespace hashing {
namespace detail {
namespace {
using spark_hash_value_type = int32_t;
template <typename Key, CUDF_ENABLE_IF(not cudf::is_nested<Key>())>
struct Spark_MurmurHash3_x86_32 {
using result_type = spark_hash_value_type;
constexpr Spark_MurmurHash3_x86_32() = default;
constexpr Spark_MurmurHash3_x86_32(uint32_t seed) : m_seed(seed) {}
[[nodiscard]] __device__ inline uint32_t fmix32(uint32_t h) const
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
[[nodiscard]] __device__ inline uint32_t getblock32(std::byte const* data,
cudf::size_type offset) const
{
// Read a 4-byte value from the data pointer as individual bytes for safe
// unaligned access (very likely for string types).
auto block = reinterpret_cast<uint8_t const*>(data + offset);
return block[0] | (block[1] << 8) | (block[2] << 16) | (block[3] << 24);
}
[[nodiscard]] result_type __device__ inline operator()(Key const& key) const
{
return compute(key);
}
template <typename T>
result_type __device__ inline compute(T const& key) const
{
return compute_bytes(reinterpret_cast<std::byte const*>(&key), sizeof(T));
}
result_type __device__ inline compute_remaining_bytes(std::byte const* data,
cudf::size_type len,
cudf::size_type tail_offset,
result_type h) const
{
// Process remaining bytes that do not fill a four-byte chunk using Spark's approach
// (does not conform to normal MurmurHash3).
for (auto i = tail_offset; i < len; i++) {
// We require a two-step cast to get the k1 value from the byte. First,
// we must cast to a signed int8_t. Then, the sign bit is preserved when
// casting to uint32_t under 2's complement. Java preserves the sign when
// casting byte-to-int, but C++ does not.
uint32_t k1 = static_cast<uint32_t>(std::to_integer<int8_t>(data[i]));
k1 *= c1;
k1 = rotate_bits_left(k1, rot_c1);
k1 *= c2;
h ^= k1;
h = rotate_bits_left(static_cast<uint32_t>(h), rot_c2);
h = h * 5 + c3;
}
return h;
}
result_type __device__ compute_bytes(std::byte const* data, cudf::size_type const len) const
{
constexpr cudf::size_type BLOCK_SIZE = 4;
cudf::size_type const nblocks = len / BLOCK_SIZE;
cudf::size_type const tail_offset = nblocks * BLOCK_SIZE;
result_type h = m_seed;
// Process all four-byte chunks.
for (cudf::size_type i = 0; i < nblocks; i++) {
uint32_t k1 = getblock32(data, i * BLOCK_SIZE);
k1 *= c1;
k1 = rotate_bits_left(k1, rot_c1);
k1 *= c2;
h ^= k1;
h = rotate_bits_left(static_cast<uint32_t>(h), rot_c2);
h = h * 5 + c3;
}
h = compute_remaining_bytes(data, len, tail_offset, h);
// Finalize hash.
h ^= len;
h = fmix32(h);
return h;
}
private:
uint32_t m_seed{cudf::DEFAULT_HASH_SEED};
static constexpr uint32_t c1 = 0xcc9e2d51;
static constexpr uint32_t c2 = 0x1b873593;
static constexpr uint32_t c3 = 0xe6546b64;
static constexpr uint32_t rot_c1 = 15;
static constexpr uint32_t rot_c2 = 13;
};
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<bool>::operator()(
bool const& key) const
{
return compute<uint32_t>(key);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<int8_t>::operator()(
int8_t const& key) const
{
return compute<uint32_t>(key);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<uint8_t>::operator()(
uint8_t const& key) const
{
return compute<uint32_t>(key);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<int16_t>::operator()(
int16_t const& key) const
{
return compute<uint32_t>(key);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<uint16_t>::operator()(
uint16_t const& key) const
{
return compute<uint32_t>(key);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<float>::operator()(
float const& key) const
{
return compute<float>(normalize_nans(key));
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<double>::operator()(
double const& key) const
{
return compute<double>(normalize_nans(key));
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<cudf::string_view>::operator()(
cudf::string_view const& key) const
{
auto const data = reinterpret_cast<std::byte const*>(key.data());
auto const len = key.size_bytes();
return compute_bytes(data, len);
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<numeric::decimal32>::operator()(
numeric::decimal32 const& key) const
{
return compute<uint64_t>(key.value());
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<numeric::decimal64>::operator()(
numeric::decimal64 const& key) const
{
return compute<uint64_t>(key.value());
}
template <>
spark_hash_value_type __device__ inline Spark_MurmurHash3_x86_32<numeric::decimal128>::operator()(
numeric::decimal128 const& key) const
{
// Generates the Spark MurmurHash3 hash value, mimicking the conversion:
// java.math.BigDecimal.valueOf(unscaled_value, _scale).unscaledValue().toByteArray()
// https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/hash.scala#L381
__int128_t const val = key.value();
constexpr cudf::size_type key_size = sizeof(__int128_t);
std::byte const* data = reinterpret_cast<std::byte const*>(&val);
// Small negative values start with 0xff..., small positive values start with 0x00...
bool const is_negative = val < 0;
std::byte const zero_value = is_negative ? std::byte{0xff} : std::byte{0x00};
// If the value can be represented with a shorter than 16-byte integer, the
// leading bytes of the little-endian value are truncated and are not hashed.
auto const reverse_begin = thrust::reverse_iterator(data + key_size);
auto const reverse_end = thrust::reverse_iterator(data);
auto const first_nonzero_byte =
thrust::find_if_not(thrust::seq, reverse_begin, reverse_end, [zero_value](std::byte const& v) {
return v == zero_value;
}).base();
// Max handles special case of 0 and -1 which would shorten to 0 length otherwise
cudf::size_type length =
std::max(1, static_cast<cudf::size_type>(thrust::distance(data, first_nonzero_byte)));
// Preserve the 2's complement sign bit by adding a byte back on if necessary.
// e.g. 0x0000ff would shorten to 0x00ff. The 0x00 byte is retained to
// preserve the sign bit, rather than leaving an "f" at the front which would
// change the sign bit. However, 0x00007f would shorten to 0x7f. No extra byte
// is needed because the leftmost bit matches the sign bit. Similarly for
// negative values: 0xffff00 --> 0xff00 and 0xffff80 --> 0x80.
if ((length < key_size) && (is_negative ^ bool(data[length - 1] & std::byte{0x80}))) { ++length; }
// Convert to big endian by reversing the range of nonzero bytes. Only those bytes are hashed.
__int128_t big_endian_value = 0;
auto big_endian_data = reinterpret_cast<std::byte*>(&big_endian_value);
thrust::reverse_copy(thrust::seq, data, data + length, big_endian_data);
return compute_bytes(big_endian_data, length);
}
/**
* @brief Computes the hash value of a row in the given table.
*
* This functor uses Spark conventions for Murmur hashing, which differs from
* the Murmur implementation used in the rest of libcudf. These differences
* include:
* - Serially using the output hash as an input seed for the next item
* - Ignorance of null values
*
* The serial use of hashes as seeds means that data of different nested types
* can exhibit hash collisions. For example, a row of an integer column
* containing a 1 will have the same hash as a lists column of integers
* containing a list of [1] and a struct column of a single integer column
* containing a struct of {1}.
*
* As a consequence of ignoring null values, inputs like [1], [1, null], and
* [null, 1] have the same hash (an expected hash collision). This kind of
* collision can also occur across a table of nullable columns and with nulls
* in structs ({1, null} and {null, 1} have the same hash). The seed value (the
* previous element's hash value) is returned as the hash if an element is
* null.
*
* For additional differences such as special tail processing and decimal type
* handling, refer to the Spark_MurmurHash3_x86_32 functor.
*
* @tparam hash_function Hash functor to use for hashing elements. Must be Spark_MurmurHash3_x86_32.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
*/
template <template <typename> class hash_function, typename Nullate>
class spark_murmur_device_row_hasher {
friend class cudf::experimental::row::hash::row_hasher; ///< Allow row_hasher to access private
///< members.
public:
/**
* @brief Return the hash value of a row in the given table.
*
* @param row_index The row index to compute the hash value of
* @return The hash value of the row
*/
__device__ auto operator()(size_type row_index) const noexcept
{
return cudf::detail::accumulate(
_table.begin(),
_table.end(),
_seed,
[row_index, nulls = this->_check_nulls] __device__(auto hash, auto column) {
return cudf::type_dispatcher(
column.type(), element_hasher_adapter<hash_function>{nulls, hash}, column, row_index);
});
}
private:
/**
* @brief Computes the hash value of an element in the given column.
*
* When the column is non-nested, this is a simple wrapper around the element_hasher.
* When the column is nested, this uses a seed value to serially compute each
* nested element, with the output hash becoming the seed for the next value.
* This requires constructing a new hash functor for each nested element,
* using the new seed from the previous element's hash. The hash of a null
* element is the input seed (the previous element's hash).
*/
template <template <typename> class hash_fn>
class element_hasher_adapter {
public:
__device__ element_hasher_adapter(Nullate check_nulls, uint32_t seed) noexcept
: _check_nulls(check_nulls), _seed(seed)
{
}
using hash_functor = cudf::experimental::row::hash::element_hasher<hash_fn, Nullate>;
template <typename T, CUDF_ENABLE_IF(not cudf::is_nested<T>())>
__device__ spark_hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
auto const hasher = hash_functor{_check_nulls, _seed, _seed};
return hasher.template operator()<T>(col, row_index);
}
template <typename T, CUDF_ENABLE_IF(cudf::is_nested<T>())>
__device__ spark_hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
column_device_view curr_col = col.slice(row_index, 1);
while (curr_col.type().id() == type_id::STRUCT || curr_col.type().id() == type_id::LIST) {
if (curr_col.type().id() == type_id::STRUCT) {
if (curr_col.num_child_columns() == 0) { return _seed; }
// Non-empty structs are assumed to be decomposed and contain only one child
curr_col = cudf::detail::structs_column_device_view(curr_col).get_sliced_child(0);
} else if (curr_col.type().id() == type_id::LIST) {
curr_col = cudf::detail::lists_column_device_view(curr_col).get_sliced_child();
}
}
return cudf::detail::accumulate(
thrust::counting_iterator(0),
thrust::counting_iterator(curr_col.size()),
_seed,
[curr_col, nulls = this->_check_nulls] __device__(auto hash, auto element_index) {
auto const hasher = hash_functor{nulls, hash, hash};
return cudf::type_dispatcher<cudf::experimental::dispatch_void_if_nested>(
curr_col.type(), hasher, curr_col, element_index);
});
}
Nullate const _check_nulls; ///< Whether to check for nulls
uint32_t const _seed; ///< The seed to use for hashing, also returned for null elements
};
CUDF_HOST_DEVICE spark_murmur_device_row_hasher(Nullate check_nulls,
table_device_view t,
uint32_t seed = DEFAULT_HASH_SEED) noexcept
: _check_nulls{check_nulls}, _table{t}, _seed(seed)
{
// Error out if passed an unsupported hash_function
static_assert(
std::is_base_of_v<Spark_MurmurHash3_x86_32<int>, hash_function<int>>,
"spark_murmur_device_row_hasher only supports the Spark_MurmurHash3_x86_32 hash function");
}
Nullate const _check_nulls;
table_device_view const _table;
uint32_t const _seed;
};
void check_hash_compatibility(table_view const& input)
{
using column_checker_fn_t = std::function<void(column_view const&)>;
column_checker_fn_t check_column = [&](column_view const& c) {
if (c.type().id() == type_id::LIST) {
auto const& list_col = lists_column_view(c);
CUDF_EXPECTS(list_col.child().type().id() != type_id::STRUCT,
"Cannot compute hash of a table with a LIST of STRUCT columns.");
check_column(list_col.child());
} else if (c.type().id() == type_id::STRUCT) {
for (auto child = c.child_begin(); child != c.child_end(); ++child) {
check_column(*child);
}
}
};
for (column_view const& c : input) {
check_column(c);
}
}
} // namespace
std::unique_ptr<column> spark_murmurhash3_x86_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output = make_numeric_column(data_type(type_to_id<spark_hash_value_type>()),
input.num_rows(),
mask_state::UNALLOCATED,
stream,
mr);
// Return early if there's nothing to hash
if (input.num_columns() == 0 || input.num_rows() == 0) { return output; }
// Lists of structs are not supported
check_hash_compatibility(input);
bool const nullable = has_nested_nulls(input);
auto const row_hasher = cudf::experimental::row::hash::row_hasher(input, stream);
auto output_view = output->mutable_view();
// Compute the hash value for each row
thrust::tabulate(
rmm::exec_policy(stream),
output_view.begin<spark_hash_value_type>(),
output_view.end<spark_hash_value_type>(),
row_hasher.device_hasher<Spark_MurmurHash3_x86_32, spark_murmur_device_row_hasher>(nullable,
seed));
return output;
}
} // namespace detail
std::unique_ptr<column> spark_murmurhash3_x86_32(table_view const& input,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::spark_murmurhash3_x86_32(input, seed, stream, mr);
}
} // namespace hashing
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/concurrent_unordered_map.cuh
|
/*
* Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <hash/managed.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/hashing/detail/default_hash.cuh>
#include <cudf/hashing/detail/hash_allocator.cuh>
#include <cudf/hashing/detail/helper_functions.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/pair.h>
#include <iostream>
#include <iterator>
#include <limits>
#include <type_traits>
#include <cuda/atomic>
namespace {
template <std::size_t N>
struct packed {
using type = void;
};
template <>
struct packed<sizeof(uint64_t)> {
using type = uint64_t;
};
template <>
struct packed<sizeof(uint32_t)> {
using type = uint32_t;
};
template <typename pair_type>
using packed_t = typename packed<sizeof(pair_type)>::type;
/**
* @brief Indicates if a pair type can be packed.
*
* When the size of the key,value pair being inserted into the hash table is
* equal in size to a type where atomicCAS is natively supported, it is more
* efficient to "pack" the pair and insert it with a single atomicCAS.
*
* Only integral key and value types may be packed because we use
* bitwise equality comparison, which may not be valid for non-integral
* types.
*
* Also, the `pair_type` must not contain any padding bits otherwise
* accessing the packed value would be undefined.
*
* @tparam pair_type The pair type that will be packed
* @return true If the pair type can be packed
* @return false If the pair type cannot be packed
*/
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
constexpr bool is_packable()
{
return std::is_integral_v<key_type> and std::is_integral_v<value_type> and
not std::is_void_v<packed_t<pair_type>> and
std::has_unique_object_representations_v<pair_type>;
}
/**
* @brief Allows viewing a pair in a packed representation
*
* Used as an optimization for inserting when a pair can be inserted with a
* single atomicCAS
*/
template <typename pair_type, typename Enable = void>
union pair_packer;
template <typename pair_type>
union pair_packer<pair_type, std::enable_if_t<is_packable<pair_type>()>> {
using packed_type = packed_t<pair_type>;
packed_type packed;
pair_type pair;
__device__ pair_packer(pair_type _pair) : pair{_pair} {}
__device__ pair_packer(packed_type _packed) : packed{_packed} {}
};
} // namespace
/**
* Supports concurrent insert, but not concurrent insert and find.
*
* @note The user is responsible for the following stream semantics:
* - Either the same stream should be used to create the map as is used by the kernels that access
* it, or
* - the stream used to create the map should be synchronized before it is accessed from a different
* stream or from host code.
*
* TODO:
* - add constructor that takes pointer to hash_table to avoid allocations
*/
template <typename Key,
typename Element,
typename Hasher = cudf::hashing::detail::default_hash<Key>,
typename Equality = equal_to<Key>,
typename Allocator = default_allocator<thrust::pair<Key, Element>>>
class concurrent_unordered_map {
public:
using size_type = size_t;
using hasher = Hasher;
using key_equal = Equality;
using allocator_type = Allocator;
using key_type = Key;
using mapped_type = Element;
using value_type = thrust::pair<Key, Element>;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = cycle_iterator_adapter<value_type*> const;
public:
/**
* @brief Factory to construct a new concurrent unordered map.
*
* Returns a `std::unique_ptr` to a new concurrent unordered map object. The
* map is non-owning and trivially copyable and should be passed by value into
* kernels. The `unique_ptr` contains a custom deleter that will free the
* map's contents.
*
* @note The implementation of this unordered_map uses sentinel values to
* indicate an entry in the hash table that is empty, i.e., if a hash bucket
* is empty, the pair residing there will be equal to (unused_key,
* unused_element). As a result, attempting to insert a key equal to
*`unused_key` results in undefined behavior.
*
* @note All allocations, kernels and copies in the constructor take place
* on stream but the constructor does not synchronize the stream. It is the user's
* responsibility to synchronize or use the same stream to access the map.
*
* @param capacity The maximum number of pairs the map may hold
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param unused_element The sentinel value to use for an empty value
* @param unused_key The sentinel value to use for an empty key
* @param hash_function The hash function to use for hashing keys
* @param equal The equality comparison function for comparing if two keys are
* equal
* @param allocator The allocator to use for allocation the hash table's
* storage
*/
static auto create(size_type capacity,
rmm::cuda_stream_view stream,
mapped_type const unused_element = std::numeric_limits<mapped_type>::max(),
key_type const unused_key = std::numeric_limits<key_type>::max(),
Hasher const& hash_function = hasher(),
Equality const& equal = key_equal(),
allocator_type const& allocator = allocator_type())
{
CUDF_FUNC_RANGE();
using Self = concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator>;
// Note: need `(*p).destroy` instead of `p->destroy` here
// due to compiler bug: https://github.com/rapidsai/cudf/pull/5692
auto deleter = [stream](Self* p) { (*p).destroy(stream); };
return std::unique_ptr<Self, std::function<void(Self*)>>{
new Self(capacity, unused_element, unused_key, hash_function, equal, allocator, stream),
deleter};
}
/**
* @brief Returns an iterator to the first element in the map
*
* @note `__device__` code that calls this function should either run in the
* same stream as `create()`, or the accessing stream either be running on the
* same stream as create(), or the accessing stream should be appropriately
* synchronized with the creating stream.
*
* @returns iterator to the first element in the map.
*/
__device__ iterator begin()
{
return iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, m_hashtbl_values);
}
/**
* @brief Returns a constant iterator to the first element in the map
*
* @note `__device__` code that calls this function should either run in the
* same stream as `create()`, or the accessing stream either be running on the
* same stream as create(), or the accessing stream should be appropriately
* synchronized with the creating stream.
*
* @returns constant iterator to the first element in the map.
*/
__device__ const_iterator begin() const
{
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, m_hashtbl_values);
}
/**
* @brief Returns an iterator to the one past the last element in the map
*
* @note `__device__` code that calls this function should either run in the
* same stream as `create()`, or the accessing stream either be running on the
* same stream as create(), or the accessing stream should be appropriately
* synchronized with the creating stream.
*
* @returns iterator to the one past the last element in the map.
*/
__device__ iterator end()
{
return iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, m_hashtbl_values + m_capacity);
}
/**
* @brief Returns a constant iterator to the one past the last element in the map
*
* @note When called in a device code, user should make sure that it should
* either be running on the same stream as create(), or the accessing stream
* should be appropriately synchronized with the creating stream.
*
* @returns constant iterator to the one past the last element in the map.
*/
__device__ const_iterator end() const
{
return const_iterator(
m_hashtbl_values, m_hashtbl_values + m_capacity, m_hashtbl_values + m_capacity);
}
__host__ __device__ value_type* data() const { return m_hashtbl_values; }
__host__ __device__ key_type get_unused_key() const { return m_unused_key; }
__host__ __device__ mapped_type get_unused_element() const { return m_unused_element; }
[[nodiscard]] __host__ __device__ size_type capacity() const { return m_capacity; }
private:
/**
* @brief Enumeration of the possible results of attempting to insert into
*a hash bucket
*/
enum class insert_result {
CONTINUE, ///< Insert did not succeed, continue trying to insert
///< (collision)
SUCCESS, ///< New pair inserted successfully
DUPLICATE ///< Insert did not succeed, key is already present
};
/**
* @brief Specialization for value types that can be packed.
*
* When the size of the key,value pair being inserted is equal in size to
*a type where atomicCAS is natively supported, this optimization path
*will insert the pair in a single atomicCAS operation.
*/
template <typename pair_type = value_type>
__device__ std::enable_if_t<is_packable<pair_type>(), insert_result> attempt_insert(
value_type* const __restrict__ insert_location, value_type const& insert_pair)
{
pair_packer<pair_type> expected{thrust::make_pair(m_unused_key, m_unused_element)};
pair_packer<pair_type> desired{insert_pair};
using packed_type = typename pair_packer<pair_type>::packed_type;
auto* insert_ptr = reinterpret_cast<packed_type*>(insert_location);
cuda::atomic_ref<packed_type, cuda::thread_scope_device> ref{*insert_ptr};
auto const success =
ref.compare_exchange_strong(expected.packed, desired.packed, cuda::std::memory_order_relaxed);
if (success) {
return insert_result::SUCCESS;
} else if (m_equal(expected.pair.first, insert_pair.first)) {
return insert_result::DUPLICATE;
}
return insert_result::CONTINUE;
}
/**
* @brief Attempts to insert a key,value pair at the specified hash bucket.
*
* @param[in] insert_location Pointer to hash bucket to attempt insert
* @param[in] insert_pair The pair to insert
* @return Enum indicating result of insert attempt.
*/
template <typename pair_type = value_type>
__device__ std::enable_if_t<not is_packable<pair_type>(), insert_result> attempt_insert(
value_type* const __restrict__ insert_location, value_type const& insert_pair)
{
auto expected = m_unused_key;
cuda::atomic_ref<key_type, cuda::thread_scope_device> ref{insert_location->first};
auto const key_success =
ref.compare_exchange_strong(expected, insert_pair.first, cuda::std::memory_order_relaxed);
// Hash bucket empty
if (key_success) {
insert_location->second = insert_pair.second;
return insert_result::SUCCESS;
}
// Key already exists
else if (m_equal(expected, insert_pair.first)) {
return insert_result::DUPLICATE;
}
return insert_result::CONTINUE;
}
public:
/**
* @brief Attempts to insert a key, value pair into the map.
*
* Returns an iterator, boolean pair.
*
* If the new key already present in the map, the iterator points to
* the location of the existing key and the boolean is `false` indicating
* that the insert did not succeed.
*
* If the new key was not present, the iterator points to the location
* where the insert occurred and the boolean is `true` indicating that the
*insert succeeded.
*
* @param insert_pair The key and value pair to insert
* @return Iterator, Boolean pair. Iterator is to the location of the
*newly inserted pair, or the existing pair that prevented the insert.
*Boolean indicates insert success.
*/
__device__ thrust::pair<iterator, bool> insert(value_type const& insert_pair)
{
size_type const key_hash{m_hf(insert_pair.first)};
size_type index{key_hash % m_capacity};
insert_result status{insert_result::CONTINUE};
value_type* current_bucket{nullptr};
while (status == insert_result::CONTINUE) {
current_bucket = &m_hashtbl_values[index];
status = attempt_insert(current_bucket, insert_pair);
index = (index + 1) % m_capacity;
}
bool const insert_success = status == insert_result::SUCCESS;
return thrust::make_pair(
iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, current_bucket), insert_success);
}
/**
* @brief Searches the map for the specified key.
*
* @note `find` is not threadsafe with `insert`. I.e., it is not safe to
*do concurrent `insert` and `find` operations.
*
* @param k The key to search for
* @return An iterator to the key if it exists, else map.end()
*/
__device__ const_iterator find(key_type const& k) const
{
size_type const key_hash = m_hf(k);
size_type index = key_hash % m_capacity;
value_type* current_bucket = &m_hashtbl_values[index];
while (true) {
key_type const existing_key = current_bucket->first;
if (m_unused_key == existing_key) { return this->end(); }
if (m_equal(k, existing_key)) {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, current_bucket);
}
index = (index + 1) % m_capacity;
current_bucket = &m_hashtbl_values[index];
}
}
/**
* @brief Searches the map for the specified key.
*
* This version of the find function specifies a hashing function and an
* equality comparison. This allows the caller to use different functions
* for insert and find (for example, when you want to insert keys from
* one table and use find to match keys from a different table with the
* keys from the first table).
*
* @note `find` is not threadsafe with `insert`. I.e., it is not safe to
* do concurrent `insert` and `find` operations.
*
* @tparam find_hasher Type of hashing function
* @tparam find_key_equal Type of equality comparison
*
* @param k The key to search for
* @param f_hash The hashing function to use to hash this key
* @param f_equal The equality function to use to compare this key with the
* contents of the hash table
* @return An iterator to the key if it exists, else map.end()
*/
template <typename find_hasher, typename find_key_equal>
__device__ const_iterator find(key_type const& k,
find_hasher f_hash,
find_key_equal f_equal) const
{
size_type const key_hash = f_hash(k);
size_type index = key_hash % m_capacity;
value_type* current_bucket = &m_hashtbl_values[index];
while (true) {
key_type const existing_key = current_bucket->first;
if (m_unused_key == existing_key) { return this->end(); }
if (f_equal(k, existing_key)) {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_capacity, current_bucket);
}
index = (index + 1) % m_capacity;
current_bucket = &m_hashtbl_values[index];
}
}
void assign_async(concurrent_unordered_map const& other, rmm::cuda_stream_view stream)
{
if (other.m_capacity <= m_capacity) {
m_capacity = other.m_capacity;
} else {
m_allocator.deallocate(m_hashtbl_values, m_capacity, stream);
m_capacity = other.m_capacity;
m_capacity = other.m_capacity;
m_hashtbl_values = m_allocator.allocate(m_capacity, stream);
}
CUDF_CUDA_TRY(cudaMemcpyAsync(m_hashtbl_values,
other.m_hashtbl_values,
m_capacity * sizeof(value_type),
cudaMemcpyDefault,
stream.value()));
}
void clear_async(rmm::cuda_stream_view stream)
{
constexpr int block_size = 128;
init_hashtbl<<<((m_capacity - 1) / block_size) + 1, block_size, 0, stream.value()>>>(
m_hashtbl_values, m_capacity, m_unused_key, m_unused_element);
}
void print()
{
for (size_type i = 0; i < m_capacity; ++i) {
std::cout << i << ": " << m_hashtbl_values[i].first << "," << m_hashtbl_values[i].second
<< std::endl;
}
}
void prefetch(int const dev_id, rmm::cuda_stream_view stream)
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(&hashtbl_values_ptr_attributes, m_hashtbl_values);
if (cudaSuccess == status && isPtrManaged(hashtbl_values_ptr_attributes)) {
CUDF_CUDA_TRY(cudaMemPrefetchAsync(
m_hashtbl_values, m_capacity * sizeof(value_type), dev_id, stream.value()));
}
CUDF_CUDA_TRY(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream.value()));
}
/**
* @brief Frees the contents of the map and destroys the map object.
*
* This function is invoked as the deleter of the `std::unique_ptr` returned
* from the `create()` factory function.
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
void destroy(rmm::cuda_stream_view stream)
{
m_allocator.deallocate(m_hashtbl_values, m_capacity, stream);
delete this;
}
concurrent_unordered_map() = delete;
concurrent_unordered_map(concurrent_unordered_map const&) = default;
concurrent_unordered_map(concurrent_unordered_map&&) = default;
concurrent_unordered_map& operator=(concurrent_unordered_map const&) = default;
concurrent_unordered_map& operator=(concurrent_unordered_map&&) = default;
~concurrent_unordered_map() = default;
private:
hasher m_hf;
key_equal m_equal;
mapped_type m_unused_element;
key_type m_unused_key;
allocator_type m_allocator;
size_type m_capacity;
value_type* m_hashtbl_values;
/**
* @brief Private constructor used by `create` factory function.
*
* @param capacity The desired m_capacity of the hash table
* @param unused_element The sentinel value to use for an empty value
* @param unused_key The sentinel value to use for an empty key
* @param hash_function The hash function to use for hashing keys
* @param equal The equality comparison function for comparing if two keys
*are equal
* @param allocator The allocator to use for allocation the hash table's
* storage
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
concurrent_unordered_map(size_type capacity,
mapped_type const unused_element,
key_type const unused_key,
Hasher const& hash_function,
Equality const& equal,
allocator_type const& allocator,
rmm::cuda_stream_view stream)
: m_hf(hash_function),
m_equal(equal),
m_allocator(allocator),
m_capacity(capacity),
m_unused_element(unused_element),
m_unused_key(unused_key)
{
m_hashtbl_values = m_allocator.allocate(m_capacity, stream);
constexpr int block_size = 128;
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status =
cudaPointerGetAttributes(&hashtbl_values_ptr_attributes, m_hashtbl_values);
if (cudaSuccess == status && isPtrManaged(hashtbl_values_ptr_attributes)) {
int dev_id = 0;
CUDF_CUDA_TRY(cudaGetDevice(&dev_id));
CUDF_CUDA_TRY(cudaMemPrefetchAsync(
m_hashtbl_values, m_capacity * sizeof(value_type), dev_id, stream.value()));
}
}
if (m_capacity > 0) {
init_hashtbl<<<((m_capacity - 1) / block_size) + 1, block_size, 0, stream.value()>>>(
m_hashtbl_values, m_capacity, m_unused_key, m_unused_element);
}
CUDF_CHECK_CUDA(stream.value());
}
};
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/hash/xxhash_64.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/algorithm.cuh>
#include <cudf/hashing/detail/hash_functions.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/tabulate.h>
namespace cudf {
namespace hashing {
namespace detail {
namespace {
using hash_value_type = uint64_t;
template <typename Key>
struct XXHash_64 {
using result_type = hash_value_type;
constexpr XXHash_64() = default;
constexpr XXHash_64(hash_value_type seed) : m_seed(seed) {}
__device__ inline uint32_t getblock32(std::byte const* data, std::size_t offset) const
{
// Read a 4-byte value from the data pointer as individual bytes for safe
// unaligned access (very likely for string types).
auto block = reinterpret_cast<uint8_t const*>(data + offset);
return block[0] | (block[1] << 8) | (block[2] << 16) | (block[3] << 24);
}
__device__ inline uint64_t getblock64(std::byte const* data, std::size_t offset) const
{
uint64_t result = getblock32(data, offset + 4);
result = result << 32;
return result | getblock32(data, offset);
}
result_type __device__ inline operator()(Key const& key) const { return compute(key); }
template <typename T>
result_type __device__ inline compute(T const& key) const
{
auto data = device_span<std::byte const>(reinterpret_cast<std::byte const*>(&key), sizeof(T));
return compute_bytes(data);
}
result_type __device__ inline compute_remaining_bytes(device_span<std::byte const>& in,
std::size_t offset,
result_type h64) const
{
// remaining data can be processed in 8-byte chunks
if ((in.size() % 32) >= 8) {
for (; offset <= in.size() - 8; offset += 8) {
uint64_t k1 = getblock64(in.data(), offset) * prime2;
k1 = rotate_bits_left(k1, 31) * prime1;
h64 ^= k1;
h64 = rotate_bits_left(h64, 27) * prime1 + prime4;
}
}
// remaining data can be processed in 4-byte chunks
if ((in.size() % 8) >= 4) {
for (; offset <= in.size() - 4; offset += 4) {
h64 ^= (getblock32(in.data(), offset) & 0xfffffffful) * prime1;
h64 = rotate_bits_left(h64, 23) * prime2 + prime3;
}
}
// and the rest
if (in.size() % 4) {
while (offset < in.size()) {
h64 ^= (std::to_integer<uint8_t>(in[offset]) & 0xff) * prime5;
h64 = rotate_bits_left(h64, 11) * prime1;
++offset;
}
}
return h64;
}
result_type __device__ compute_bytes(device_span<std::byte const>& in) const
{
uint64_t offset = 0;
uint64_t h64;
// data can be processed in 32-byte chunks
if (in.size() >= 32) {
auto limit = in.size() - 32;
uint64_t v1 = m_seed + prime1 + prime2;
uint64_t v2 = m_seed + prime2;
uint64_t v3 = m_seed;
uint64_t v4 = m_seed - prime1;
do {
// pipeline 4*8byte computations
v1 += getblock64(in.data(), offset) * prime2;
v1 = rotate_bits_left(v1, 31);
v1 *= prime1;
offset += 8;
v2 += getblock64(in.data(), offset) * prime2;
v2 = rotate_bits_left(v2, 31);
v2 *= prime1;
offset += 8;
v3 += getblock64(in.data(), offset) * prime2;
v3 = rotate_bits_left(v3, 31);
v3 *= prime1;
offset += 8;
v4 += getblock64(in.data(), offset) * prime2;
v4 = rotate_bits_left(v4, 31);
v4 *= prime1;
offset += 8;
} while (offset <= limit);
h64 = rotate_bits_left(v1, 1) + rotate_bits_left(v2, 7) + rotate_bits_left(v3, 12) +
rotate_bits_left(v4, 18);
v1 *= prime2;
v1 = rotate_bits_left(v1, 31);
v1 *= prime1;
h64 ^= v1;
h64 = h64 * prime1 + prime4;
v2 *= prime2;
v2 = rotate_bits_left(v2, 31);
v2 *= prime1;
h64 ^= v2;
h64 = h64 * prime1 + prime4;
v3 *= prime2;
v3 = rotate_bits_left(v3, 31);
v3 *= prime1;
h64 ^= v3;
h64 = h64 * prime1 + prime4;
v4 *= prime2;
v4 = rotate_bits_left(v4, 31);
v4 *= prime1;
h64 ^= v4;
h64 = h64 * prime1 + prime4;
} else {
h64 = m_seed + prime5;
}
h64 += in.size();
h64 = compute_remaining_bytes(in, offset, h64);
return finalize(h64);
}
constexpr __host__ __device__ std::uint64_t finalize(std::uint64_t h) const noexcept
{
h ^= h >> 33;
h *= prime2;
h ^= h >> 29;
h *= prime3;
h ^= h >> 32;
return h;
}
private:
hash_value_type m_seed{};
static constexpr uint64_t prime1 = 0x9e3779b185ebca87ul;
static constexpr uint64_t prime2 = 0xc2b2ae3d27d4eb4ful;
static constexpr uint64_t prime3 = 0x165667b19e3779f9ul;
static constexpr uint64_t prime4 = 0x85ebca77c2b2ae63ul;
static constexpr uint64_t prime5 = 0x27d4eb2f165667c5ul;
};
template <>
hash_value_type __device__ inline XXHash_64<bool>::operator()(bool const& key) const
{
return compute(static_cast<uint8_t>(key));
}
template <>
hash_value_type __device__ inline XXHash_64<float>::operator()(float const& key) const
{
return compute(normalize_nans(key));
}
template <>
hash_value_type __device__ inline XXHash_64<double>::operator()(double const& key) const
{
return compute(normalize_nans(key));
}
template <>
hash_value_type __device__ inline XXHash_64<cudf::string_view>::operator()(
cudf::string_view const& key) const
{
auto const len = key.size_bytes();
auto data = device_span<std::byte const>(reinterpret_cast<std::byte const*>(key.data()), len);
return compute_bytes(data);
}
template <>
hash_value_type __device__ inline XXHash_64<numeric::decimal32>::operator()(
numeric::decimal32 const& key) const
{
return compute(key.value());
}
template <>
hash_value_type __device__ inline XXHash_64<numeric::decimal64>::operator()(
numeric::decimal64 const& key) const
{
return compute(key.value());
}
template <>
hash_value_type __device__ inline XXHash_64<numeric::decimal128>::operator()(
numeric::decimal128 const& key) const
{
return compute(key.value());
}
/**
* @brief Computes the hash value of a row in the given table.
*
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
*/
template <typename Nullate>
class device_row_hasher {
public:
device_row_hasher(Nullate nulls, table_device_view const& t, hash_value_type seed)
: _check_nulls(nulls), _table(t), _seed(seed)
{
}
__device__ auto operator()(size_type row_index) const noexcept
{
return cudf::detail::accumulate(
_table.begin(),
_table.end(),
_seed,
[row_index, nulls = _check_nulls] __device__(auto hash, auto column) {
return cudf::type_dispatcher(
column.type(), element_hasher_adapter{}, column, row_index, nulls, hash);
});
}
/**
* @brief Computes the hash value of an element in the given column.
*/
class element_hasher_adapter {
public:
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type const row_index,
Nullate const _check_nulls,
hash_value_type const _seed) const noexcept
{
if (_check_nulls && col.is_null(row_index)) {
return std::numeric_limits<hash_value_type>::max();
}
auto const hasher = XXHash_64<T>{_seed};
return hasher(col.element<T>(row_index));
}
template <typename T, CUDF_ENABLE_IF(not column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const&,
size_type const,
Nullate const,
hash_value_type const) const noexcept
{
CUDF_UNREACHABLE("Unsupported type for XXHash_64");
}
};
Nullate const _check_nulls;
table_device_view const _table;
hash_value_type const _seed;
};
} // namespace
std::unique_ptr<column> xxhash_64(table_view const& input,
uint64_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output = make_numeric_column(data_type(type_to_id<hash_value_type>()),
input.num_rows(),
mask_state::UNALLOCATED,
stream,
mr);
// Return early if there's nothing to hash
if (input.num_columns() == 0 || input.num_rows() == 0) { return output; }
bool const nullable = has_nulls(input);
auto const input_view = table_device_view::create(input, stream);
auto output_view = output->mutable_view();
// Compute the hash value for each row
thrust::tabulate(rmm::exec_policy(stream),
output_view.begin<hash_value_type>(),
output_view.end<hash_value_type>(),
device_row_hasher(nullable, *input_view, seed));
return output;
}
} // namespace detail
std::unique_ptr<column> xxhash_64(table_view const& input,
uint64_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::xxhash_64(input, seed, stream, mr);
}
} // namespace hashing
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/one_hot_encode.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <algorithm>
namespace cudf {
namespace detail {
template <typename DeviceComparatorType>
struct ohe_equality_functor {
ohe_equality_functor(size_type input_size, DeviceComparatorType d_equal)
: _input_size(input_size), _d_equal(d_equal)
{
}
auto __device__ operator()(size_type i) const noexcept
{
auto const element_index = cudf::experimental::row::lhs_index_type{i % _input_size};
auto const category_index = cudf::experimental::row::rhs_index_type{i / _input_size};
return _d_equal(element_index, category_index);
}
private:
size_type _input_size;
DeviceComparatorType _d_equal;
};
std::pair<std::unique_ptr<column>, table_view> one_hot_encode(column_view const& input,
column_view const& categories,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == categories.type(), "Mismatch type between input and categories.");
if (categories.is_empty()) { return {make_empty_column(type_id::BOOL8), table_view{}}; }
if (input.is_empty()) {
auto empty_data = make_empty_column(type_id::BOOL8);
std::vector<column_view> views(categories.size(), empty_data->view());
return {std::move(empty_data), table_view{views}};
}
auto const total_size = input.size() * categories.size();
auto all_encodings =
make_numeric_column(data_type{type_id::BOOL8}, total_size, mask_state::UNALLOCATED, stream, mr);
auto const t_lhs = table_view{{input}};
auto const t_rhs = table_view{{categories}};
auto const comparator =
cudf::experimental::row::equality::two_table_comparator{t_lhs, t_rhs, stream};
auto const comparator_helper = [&](auto const d_equal) {
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(total_size),
all_encodings->mutable_view().begin<bool>(),
ohe_equality_functor<decltype(d_equal)>(input.size(), d_equal));
};
if (cudf::detail::has_nested_columns(t_lhs) or cudf::detail::has_nested_columns(t_rhs)) {
auto const d_equal = comparator.equal_to<true>(
nullate::DYNAMIC{has_nested_nulls(t_lhs) || has_nested_nulls(t_rhs)});
comparator_helper(d_equal);
} else {
auto const d_equal = comparator.equal_to<false>(
nullate::DYNAMIC{has_nested_nulls(t_lhs) || has_nested_nulls(t_rhs)});
comparator_helper(d_equal);
}
auto const split_iter =
make_counting_transform_iterator(1, [width = input.size()](auto i) { return i * width; });
std::vector<size_type> split_indices(split_iter, split_iter + categories.size() - 1);
auto encodings_view = table_view{detail::split(all_encodings->view(), split_indices, stream)};
return {std::move(all_encodings), encodings_view};
}
} // namespace detail
std::pair<std::unique_ptr<column>, table_view> one_hot_encode(column_view const& input,
column_view const& categories,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::one_hot_encode(input, categories, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/nans_to_nulls.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
namespace cudf {
namespace detail {
struct dispatch_nan_to_null {
template <typename T>
std::enable_if_t<std::is_floating_point_v<T>,
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>>
operator()(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input_device_view_ptr = column_device_view::create(input, stream);
auto input_device_view = *input_device_view_ptr;
if (input.nullable()) {
auto pred = [input_device_view] __device__(cudf::size_type idx) {
return not(std::isnan(input_device_view.element<T>(idx)) ||
input_device_view.is_null_nocheck(idx));
};
auto mask = detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(input.size()),
pred,
stream,
mr);
return std::pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
} else {
auto pred = [input_device_view] __device__(cudf::size_type idx) {
return not(std::isnan(input_device_view.element<T>(idx)));
};
auto mask = detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(input.size()),
pred,
stream,
mr);
return std::pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
}
}
template <typename T>
std::enable_if_t<!std::is_floating_point_v<T>,
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type>>
operator()(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("Input column can't be a non-floating type");
}
};
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> nans_to_nulls(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return std::pair(std::make_unique<rmm::device_buffer>(), 0); }
return cudf::type_dispatcher(input.type(), dispatch_nan_to_null{}, input, stream, mr);
}
} // namespace detail
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> nans_to_nulls(
column_view const& input, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::nans_to_nulls(input, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/compute_column.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
namespace cudf {
namespace detail {
/**
* @brief Kernel for evaluating an expression on a table to produce a new column.
*
* This evaluates an expression over a table to produce a new column. Also called an n-ary
* transform.
*
* @tparam max_block_size The size of the thread block, used to set launch
* bounds and minimize register usage.
* @tparam has_nulls whether or not the output column may contain nulls.
*
* @param table The table device view used for evaluation.
* @param device_expression_data Container of device data required to evaluate the desired
* expression.
* @param output_column The destination for the results of evaluating the expression.
*/
template <cudf::size_type max_block_size, bool has_nulls>
__launch_bounds__(max_block_size) __global__
void compute_column_kernel(table_device_view const table,
ast::detail::expression_device_view device_expression_data,
mutable_column_device_view output_column)
{
// The (required) extern storage of the shared memory array leads to
// conflicting declarations between different templates. The easiest
// workaround is to declare an arbitrary (here char) array type then cast it
// after the fact to the appropriate type.
extern __shared__ char raw_intermediate_storage[];
ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
&intermediate_storage[threadIdx.x * device_expression_data.num_intermediates];
auto start_idx = cudf::detail::grid_1d::global_thread_id();
auto const stride = cudf::detail::grid_1d::grid_stride();
auto evaluator =
cudf::ast::detail::expression_evaluator<has_nulls>(table, device_expression_data);
for (thread_index_type row_index = start_idx; row_index < table.num_rows(); row_index += stride) {
auto output_dest = ast::detail::mutable_column_expression_result<has_nulls>(output_column);
evaluator.evaluate(output_dest, row_index, thread_intermediate_storage);
}
}
std::unique_ptr<column> compute_column(table_view const& table,
ast::expression const& expr,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// If evaluating the expression may produce null outputs we create a nullable
// output column and follow the null-supporting expression evaluation code
// path.
auto const has_nulls = expr.may_evaluate_null(table, stream);
auto const parser = ast::detail::expression_parser{expr, table, has_nulls, stream, mr};
auto const output_column_mask_state =
has_nulls ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED;
auto output_column = cudf::make_fixed_width_column(
parser.output_type(), table.num_rows(), output_column_mask_state, stream, mr);
if (table.num_rows() == 0) { return output_column; }
auto mutable_output_device =
cudf::mutable_column_device_view::create(output_column->mutable_view(), stream);
// Configure kernel parameters
auto const& device_expression_data = parser.device_expression_data;
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int shmem_limit_per_block;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&shmem_limit_per_block, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
auto constexpr MAX_BLOCK_SIZE = 128;
auto const block_size =
parser.shmem_per_thread != 0
? std::min(MAX_BLOCK_SIZE, shmem_limit_per_block / parser.shmem_per_thread)
: MAX_BLOCK_SIZE;
auto const config = cudf::detail::grid_1d{table.num_rows(), block_size};
auto const shmem_per_block = parser.shmem_per_thread * config.num_threads_per_block;
// Execute the kernel
auto table_device = table_device_view::create(table, stream);
if (has_nulls) {
cudf::detail::compute_column_kernel<MAX_BLOCK_SIZE, true>
<<<config.num_blocks, config.num_threads_per_block, shmem_per_block, stream.value()>>>(
*table_device, device_expression_data, *mutable_output_device);
} else {
cudf::detail::compute_column_kernel<MAX_BLOCK_SIZE, false>
<<<config.num_blocks, config.num_threads_per_block, shmem_per_block, stream.value()>>>(
*table_device, device_expression_data, *mutable_output_device);
}
CUDF_CHECK_CUDA(stream.value());
output_column->set_null_count(
cudf::detail::null_count(mutable_output_device->null_mask(), 0, output_column->size(), stream));
return output_column;
}
} // namespace detail
std::unique_ptr<column> compute_column(table_view const& table,
ast::expression const& expr,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::compute_column(table, expr, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/bools_to_mask.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(
column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type().id() == type_id::BOOL8, "Input is not of type bool");
if (input.is_empty()) { return std::pair(std::make_unique<rmm::device_buffer>(), 0); }
auto input_device_view_ptr = column_device_view::create(input, stream);
auto input_device_view = *input_device_view_ptr;
auto pred = [] __device__(bool element) { return element; };
if (input.nullable()) {
// Nulls are considered false
auto input_begin = make_null_replacement_iterator<bool>(input_device_view, false);
auto mask = detail::valid_if(input_begin, input_begin + input.size(), pred, stream, mr);
return std::pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
} else {
auto mask = detail::valid_if(
input_device_view.begin<bool>(), input_device_view.end<bool>(), pred, stream, mr);
return std::pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second);
}
}
} // namespace detail
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(
column_view const& input, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::bools_to_mask(input, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/encode.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::unique_ptr<column>> encode(
table_view const& input_table, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr)
{
auto const num_cols = input_table.num_columns();
std::vector<size_type> drop_keys(num_cols);
std::iota(drop_keys.begin(), drop_keys.end(), 0);
auto distinct_keys = cudf::detail::distinct(input_table,
drop_keys,
duplicate_keep_option::KEEP_ANY,
null_equality::EQUAL,
nan_equality::ALL_EQUAL,
stream,
mr);
std::vector<order> column_order(num_cols, order::ASCENDING);
std::vector<null_order> null_precedence(num_cols, null_order::AFTER);
auto sorted_unique_keys =
cudf::detail::sort(distinct_keys->view(), column_order, null_precedence, stream, mr);
auto indices_column = cudf::detail::lower_bound(
sorted_unique_keys->view(), input_table, column_order, null_precedence, stream, mr);
return std::pair(std::move(sorted_unique_keys), std::move(indices_column));
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::column>> encode(
cudf::table_view const& input, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::encode(input, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/mask_to_bools.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
std::unique_ptr<column> mask_to_bools(bitmask_type const* bitmask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const length = end_bit - begin_bit;
CUDF_EXPECTS(length >= 0, "begin_bit should be less than or equal to end_bit");
CUDF_EXPECTS((bitmask != nullptr) or (length == 0), "nullmask is null");
auto out_col =
make_fixed_width_column(data_type(type_id::BOOL8), length, mask_state::UNALLOCATED, stream, mr);
if (length > 0) {
auto mutable_view = out_col->mutable_view();
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(begin_bit),
thrust::make_counting_iterator<cudf::size_type>(end_bit),
mutable_view.begin<bool>(),
[bitmask] __device__(auto index) { return bit_is_set(bitmask, index); });
}
return out_col;
}
} // namespace detail
std::unique_ptr<column> mask_to_bools(bitmask_type const* bitmask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mask_to_bools(bitmask, begin_bit, end_bit, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/row_bit_count.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/fill.h>
#include <thrust/optional.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Struct which contains per-column information necessary to
* traverse a column hierarchy on the gpu.
*
* When `row_bit_count` is called, the input column hierarchy is flattened into a
* vector of column_device_views. For each one of them, we store a column_info
* struct. The `depth` field represents the depth of the column in the original
* hierarchy.
*
* As we traverse the hierarchy for each input row, we maintain a span representing
* the start and end rows for the current nesting depth. At depth 0, this span is
* always just 1 row. As we cross list boundaries int the hierarchy, this span
* grows. So for each column we visit we always know how many rows of it are relevant
* and can compute it's contribution to the overall size.
*
* An example using a list<list<int>> column, computing the size of row 1.
*
* { {{1, 2}, {3, 4}, {5, 6}}, {{7}, {8, 9, 10}, {11, 12, 13, 14}} }
*
* L0 = List<List<int32_t>>:
* Length : 2
* Offsets : 0, 3, 6
* L1 = List<int32_t>:
* Length : 6
* Offsets : 0, 2, 4, 6, 7, 10, 14
* I = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*
*
* span0 = [1, 2] row 1 is represented by the span [1, 2]
* span1 = [L0.offsets[span0[0]], L0.offsets[span0[1]]] expand by the offsets of L0
* span1 = [3, 6] span applied to children of L0
* span2 = [L1.offsets[span1[0]], L1.offsets[span1[1]]] expand by the offsets of L1
* span2 = [6, 14] span applied to children of L1
*
* The total size of our row is computed as:
* (span0[1] - span0[0]) * sizeof(int) the cost of the offsets for L0
* +
* (span1[1] - span1[0]) * sizeof(int) the cost of the offsets for L1
* +
* (span2[1] - span2[0]) * sizeof(int) the cost of the integers in I
*
* `depth` represents our depth in the source column hierarchy.
*
* "branches" within the spans can occur when we have lists inside of structs.
* consider a case where we are entering a struct<list, float> with a span of [4, 8].
* The internal list column will change that span to something else, say [5, 9].
* But when we finish processing the list column, the final float column wants to
* go back and use the original span [4, 8].
*
* [4, 8] [5, 9] [4, 8]
* struct< list<> float>
*
* To accomplish this we maintain a stack of spans. Pushing the current span
* whenever we enter a branch, and popping a span whenever we leave a branch.
*
* `branch_depth_start` represents the branch depth as we reach a new column.
* if `branch_depth_start` is < the last branch depth we saw, we are returning
* from a branch and should pop off the stack.
*
* `branch_depth_end` represents the new branch depth caused by this column.
* if branch_depth_end > branch_depth_start, we are branching and need to
* push the current span on the stack.
*
*/
struct column_info {
size_type depth;
size_type branch_depth_start;
size_type branch_depth_end;
};
/**
* @brief Struct which contains hierarchy information precomputed on the host.
*
* If the input data contains only fixed-width types, this preprocess step
* produces the value `simple_per_row_size` which is a constant for every
* row in the output. We can use this value and skip the more complicated
* processing for lists, structs and strings entirely if `complex_type_count`
* is 0.
*
*/
struct hierarchy_info {
hierarchy_info() {}
// These two fields act as an optimization. If we find that the entire table
// is just fixed-width types, we do not need to do the more expensive kernel call that
// traverses the individual columns. So if complex_type_count is 0, we can just
// return a column where every row contains the value simple_per_row_size
size_type simple_per_row_size{0}; // in bits
size_type complex_type_count{0};
// max depth of span branches present in the hierarchy.
size_type max_branch_depth{0};
};
/**
* @brief Function which flattens the incoming column hierarchy into a vector
* of column_views and produces accompanying column_info and hierarchy_info
* metadata.
*
* @param begin: Beginning of a range of column views
* @param end: End of a range of column views
* @param out: (output) Flattened vector of output column_views
* @param info: (output) Additional per-output column_view metadata needed by the gpu
* @param h_info: (output) Information about the hierarchy
* @param cur_depth: Current absolute depth in the hierarchy
* @param cur_branch_depth: Current branch depth
* @param parent_index: Index into `out` representing our owning parent column
*/
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth = 0,
size_type cur_branch_depth = 0,
thrust::optional<int> parent_index = {});
/**
* @brief Type-dispatched functor called by flatten_hierarchy.
*
*/
struct flatten_functor {
// fixed width
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size +=
(sizeof(device_storage_type_t<T>) * CHAR_BIT) + (col.nullable() ? 1 : 0);
}
// strings
template <typename T, std::enable_if_t<std::is_same_v<T, string_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.complex_type_count++;
}
// lists
template <typename T, std::enable_if_t<std::is_same_v<T, list_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
// track branch depth as we reach this list and after we pass it
auto const branch_depth_start = cur_branch_depth;
auto const is_list_inside_struct =
parent_index && out[parent_index.value()].type().id() == type_id::STRUCT;
if (is_list_inside_struct) {
cur_branch_depth++;
h_info.max_branch_depth = max(h_info.max_branch_depth, cur_branch_depth);
}
size_type const branch_depth_end = cur_branch_depth;
out.push_back(col);
info.push_back({cur_depth, branch_depth_start, branch_depth_end});
lists_column_view lcv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [col = lcv.get_sliced_child(stream)](auto) { return col; });
h_info.complex_type_count++;
flatten_hierarchy(
iter, iter + 1, out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1);
}
// structs
template <typename T, std::enable_if_t<std::is_same_v<T, struct_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size += col.nullable() ? 1 : 0;
structs_column_view scv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&scv, &stream](auto i) { return scv.get_sliced_child(i, stream); });
flatten_hierarchy(iter,
iter + scv.num_children(),
out,
info,
h_info,
stream,
cur_depth + 1,
cur_branch_depth,
out.size() - 1);
}
// everything else
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>() && !std::is_same_v<T, string_view> &&
!std::is_same_v<T, list_view> && !std::is_same_v<T, struct_view>,
void>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported column type in row_bit_count");
}
};
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
std::for_each(begin, end, [&](column_view const& col) {
cudf::type_dispatcher(col.type(),
flatten_functor{},
col,
out,
info,
h_info,
stream,
cur_depth,
cur_branch_depth,
parent_index);
});
}
/**
* @brief Struct representing a span of rows.
*
*/
struct row_span {
size_type row_start, row_end;
};
/**
* @brief Functor for computing the size, in bits, of a `row_span` of rows for a given
* `column_device_view`
*
*/
struct row_size_functor {
/**
* @brief Computes size in bits of a span of rows in a fixed-width column.
*
* Computed as : ((# of rows) * sizeof(data type) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <typename T>
__device__ size_type operator()(column_device_view const& col, row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const element_size = sizeof(device_storage_type_t<T>) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (element_size + validity_size) * num_rows;
}
};
/**
* @brief Computes size in bits of a span of rows in a strings column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8) + (total # of characters * 8))
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<string_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
if (num_rows == 0) {
// For empty columns, the `span` cannot have a row size.
return 0;
}
auto const& offsets = col.child(strings_column_view::offsets_column_index);
auto const row_start{span.row_start + col.offset()};
auto const row_end{span.row_end + col.offset()};
if (row_start == row_end) {
// Empty row contributes 0 bits to row_bit_count().
// Note: Validity bit doesn't count either. There are no rows in the child column
// corresponding to this span.
return 0;
}
auto const offsets_size = sizeof(size_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
auto const chars_size =
(offsets.data<size_type>()[row_end] - offsets.data<size_type>()[row_start]) * CHAR_BIT;
return ((offsets_size + validity_size) * num_rows) + chars_size;
}
/**
* @brief Computes size in bits of a span of rows in a list column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<list_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const offsets_size = sizeof(size_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (offsets_size + validity_size) * num_rows;
}
/**
* @brief Computes size in bits of a span of rows in a struct column.
*
* Computed as : 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<struct_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
return (col.nullable() ? 1 : 0) * num_rows; // cost of validity
}
/**
* @brief Kernel for computing per-row sizes in bits.
*
* @param cols An span of column_device_views representing a column hierarchy
* @param info An span of column_info structs corresponding the elements in `cols`
* @param output Output span of size (# rows) where per-row bit sizes are stored
* @param max_branch_depth Maximum depth of the span stack needed per-thread
*/
__global__ void compute_row_sizes(device_span<column_device_view const> cols,
device_span<column_info const> info,
device_span<size_type> output,
size_type max_branch_depth)
{
extern __shared__ row_span thread_branch_stacks[];
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const num_rows = output.size();
if (tid >= num_rows) { return; }
// my_branch_stack points to the last span prior to branching. a branch occurs only
// when we are inside of a list contained within a struct column.
row_span* my_branch_stack = thread_branch_stacks + (threadIdx.x * max_branch_depth);
size_type branch_depth{0};
// current row span - always starts at 1 row.
row_span cur_span{tid, tid + 1};
// output size
size_type& size = output[tid];
size = 0;
size_type last_branch_depth{0};
for (size_type idx = 0; idx < cols.size(); idx++) {
column_device_view const& col = cols[idx];
// if we've returned from a branch, pop to the proper span
if (info[idx].branch_depth_start < last_branch_depth) {
branch_depth = info[idx].branch_depth_start;
cur_span = my_branch_stack[branch_depth];
}
// if we're entering a new branch, push the current span
// NOTE: this case can happen (a pop and a push by the same column)
// when we have a struct<list, list>
if (info[idx].branch_depth_end > info[idx].branch_depth_start) {
my_branch_stack[branch_depth++] = cur_span;
}
// if we're back at depth 0, this is a new top-level column, so reset
// span info
if (info[idx].depth == 0) {
branch_depth = 0;
last_branch_depth = 0;
cur_span = row_span{tid, tid + 1};
}
// add the contributing size of this row
size += cudf::type_dispatcher(col.type(), row_size_functor{}, col, cur_span);
// if this is a list column, update the working span from our offsets
if (col.type().id() == type_id::LIST && col.size() > 0) {
column_device_view const& offsets = col.child(lists_column_view::offsets_column_index);
auto const base_offset = offsets.data<size_type>()[col.offset()];
cur_span.row_start =
offsets.data<size_type>()[cur_span.row_start + col.offset()] - base_offset;
cur_span.row_end = offsets.data<size_type>()[cur_span.row_end + col.offset()] - base_offset;
}
last_branch_depth = info[idx].branch_depth_end;
}
}
} // anonymous namespace
/**
* @copydoc cudf::detail::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// no rows
if (t.num_rows() <= 0) { return cudf::make_empty_column(type_id::INT32); }
// flatten the hierarchy and determine some information about it.
std::vector<cudf::column_view> cols;
std::vector<column_info> info;
hierarchy_info h_info;
flatten_hierarchy(t.begin(), t.end(), cols, info, h_info, stream);
CUDF_EXPECTS(info.size() == cols.size(), "Size/info mismatch");
// create output buffer and view
auto output = cudf::make_fixed_width_column(
data_type{type_id::INT32}, t.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mcv = output->mutable_view();
// simple case. if we have no complex types (lists, strings, etc), the per-row size is already
// trivially computed
if (h_info.complex_type_count <= 0) {
thrust::fill(rmm::exec_policy(stream),
mcv.begin<size_type>(),
mcv.end<size_type>(),
h_info.simple_per_row_size);
return output;
}
// create a contiguous block of column_device_views
auto d_cols = contiguous_copy_column_device_views<column_device_view>(cols, stream);
// move stack info to the gpu
rmm::device_uvector<column_info> d_info =
cudf::detail::make_device_uvector_async(info, stream, rmm::mr::get_current_device_resource());
// each thread needs to maintain a stack of row spans of size max_branch_depth. we will use
// shared memory to do this rather than allocating a potentially gigantic temporary buffer
// of memory of size (# input rows * sizeof(row_span) * max_branch_depth).
auto const shmem_per_thread = sizeof(row_span) * h_info.max_branch_depth;
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int shmem_limit_per_block;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&shmem_limit_per_block, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
constexpr int max_block_size = 256;
auto const block_size =
shmem_per_thread != 0
? std::min(max_block_size, shmem_limit_per_block / static_cast<int>(shmem_per_thread))
: max_block_size;
auto const shared_mem_size = shmem_per_thread * block_size;
// should we be aborting if we reach some extremely small block size, or just if we hit 0?
CUDF_EXPECTS(block_size > 0, "Encountered a column hierarchy too complex for row_bit_count");
cudf::detail::grid_1d grid{t.num_rows(), block_size, 1};
compute_row_sizes<<<grid.num_blocks, block_size, shared_mem_size, stream.value()>>>(
{std::get<1>(d_cols), cols.size()},
{d_info.data(), info.size()},
{mcv.data<size_type>(), static_cast<std::size_t>(t.num_rows())},
h_info.max_branch_depth);
return output;
}
} // namespace detail
/**
* @copydoc cudf::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::row_bit_count(t, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/transform/transform.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <jit_preprocessed_files/transform/jit/kernel.cu.jit.hpp>
#include <jit/cache.hpp>
#include <jit/parser.hpp>
#include <jit/util.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace transformation {
namespace jit {
void unary_operation(mutable_column_view output,
column_view input,
std::string const& udf,
data_type output_type,
bool is_ptx,
rmm::cuda_stream_view stream)
{
std::string kernel_name =
jitify2::reflection::Template("cudf::transformation::jit::kernel") //
.instantiate(cudf::type_to_name(output.type()), // list of template arguments
cudf::type_to_name(input.type()));
std::string cuda_source =
is_ptx ? cudf::jit::parse_single_function_ptx(udf, //
"GENERIC_UNARY_OP",
cudf::type_to_name(output_type),
{0})
: cudf::jit::parse_single_function_cuda(udf, //
"GENERIC_UNARY_OP");
cudf::jit::get_program_cache(*transform_jit_kernel_cu_jit)
.get_kernel(
kernel_name, {}, {{"transform/jit/operation-udf.hpp", cuda_source}}, {"-arch=sm_."}) //
->configure_1d_max_occupancy(0, 0, 0, stream.value()) //
->launch(output.size(), //
cudf::jit::get_data_ptr(output),
cudf::jit::get_data_ptr(input));
}
} // namespace jit
} // namespace transformation
namespace detail {
std::unique_ptr<column> transform(column_view const& input,
std::string const& unary_udf,
data_type output_type,
bool is_ptx,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(input.type()), "Unexpected non-fixed-width type.");
std::unique_ptr<column> output = make_fixed_width_column(
output_type, input.size(), copy_bitmask(input), input.null_count(), stream, mr);
if (input.is_empty()) { return output; }
mutable_column_view output_view = *output;
// transform
transformation::jit::unary_operation(output_view, input, unary_udf, output_type, is_ptx, stream);
return output;
}
} // namespace detail
std::unique_ptr<column> transform(column_view const& input,
std::string const& unary_udf,
data_type output_type,
bool is_ptx,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::transform(input, unary_udf, output_type, is_ptx, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/transform
|
rapidsai_public_repos/cudf/cpp/src/transform/jit/kernel.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Include Jitify's cstddef header first
#include <cstddef>
#include <cuda/std/climits>
#include <cuda/std/cstddef>
#include <cuda/std/limits>
#include <cuda/std/type_traits>
#include <cudf/wrappers/durations.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <transform/jit/operation-udf.hpp>
#include <cudf/types.hpp>
#include <cudf/wrappers/timestamps.hpp>
namespace cudf {
namespace transformation {
namespace jit {
template <typename TypeOut, typename TypeIn>
__global__ void kernel(cudf::size_type size, TypeOut* out_data, TypeIn* in_data)
{
// cannot use global_thread_id utility due to a JIT build issue by including
// the `cudf/detail/utilities/cuda.cuh` header
thread_index_type const start = threadIdx.x + blockIdx.x * blockDim.x;
thread_index_type const stride = blockDim.x * gridDim.x;
for (auto i = start; i < static_cast<thread_index_type>(size); i += stride) {
GENERIC_UNARY_OP(&out_data[i], in_data[i]);
}
}
} // namespace jit
} // namespace transformation
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/transform
|
rapidsai_public_repos/cudf/cpp/src/transform/jit/operation-udf.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// This file serves as a placeholder for user defined functions, so jitify can choose to override it
// at runtime.
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/filling/sequence.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/sequence.h>
#include <thrust/tabulate.h>
namespace cudf {
namespace detail {
namespace {
// This functor only exists here because using a lambda directly in the tabulate() call generates
// the cryptic
// __T289 link error. This seems to be related to lambda usage within functions using SFINAE.
template <typename T>
struct tabulator {
cudf::numeric_scalar_device_view<T> const n_init;
cudf::numeric_scalar_device_view<T> const n_step;
T __device__ operator()(cudf::size_type i)
{
return n_init.value() + (static_cast<T>(i) * n_step.value());
}
};
template <typename T>
struct const_tabulator {
cudf::numeric_scalar_device_view<T> const n_init;
T __device__ operator()(cudf::size_type i) { return n_init.value() + static_cast<T>(i); }
};
/**
* @brief Functor called by the `type_dispatcher` to generate the sequence specified
* by init and step.
*/
struct sequence_functor {
template <typename T,
std::enable_if_t<cudf::is_numeric<T>() and not cudf::is_boolean<T>()>* = nullptr>
std::unique_ptr<column> operator()(size_type size,
scalar const& init,
scalar const& step,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto result = make_fixed_width_column(init.type(), size, mask_state::UNALLOCATED, stream, mr);
auto result_device_view = mutable_column_device_view::create(*result, stream);
auto n_init =
get_scalar_device_view(static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(init)));
auto n_step =
get_scalar_device_view(static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(step)));
// not using thrust::sequence because it requires init and step to be passed as
// constants, not iterators. to do that we would have to retrieve the scalar values off the gpu,
// which is undesirable from a performance perspective.
thrust::tabulate(rmm::exec_policy(stream),
result_device_view->begin<T>(),
result_device_view->end<T>(),
tabulator<T>{n_init, n_step});
return result;
}
template <typename T,
std::enable_if_t<cudf::is_numeric<T>() and not cudf::is_boolean<T>()>* = nullptr>
std::unique_ptr<column> operator()(size_type size,
scalar const& init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto result = make_fixed_width_column(init.type(), size, mask_state::UNALLOCATED, stream, mr);
auto result_device_view = mutable_column_device_view::create(*result, stream);
auto n_init =
get_scalar_device_view(static_cast<cudf::scalar_type_t<T>&>(const_cast<scalar&>(init)));
// not using thrust::sequence because it requires init and step to be passed as
// constants, not iterators. to do that we would have to retrieve the scalar values off the gpu,
// which is undesirable from a performance perspective.
thrust::tabulate(rmm::exec_policy(stream),
result_device_view->begin<T>(),
result_device_view->end<T>(),
const_tabulator<T>{n_init});
return result;
}
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_numeric<T>() or cudf::is_boolean<T>(), std::unique_ptr<column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported sequence scalar type");
}
};
} // anonymous namespace
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
scalar const& step,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(init.type() == step.type(), "init and step must be of the same type.");
CUDF_EXPECTS(size >= 0, "size must be >= 0");
CUDF_EXPECTS(is_numeric(init.type()), "Input scalar types must be numeric");
return type_dispatcher(init.type(), sequence_functor{}, size, init, step, stream, mr);
}
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(size >= 0, "size must be >= 0");
CUDF_EXPECTS(is_numeric(init.type()), "init scalar type must be numeric");
return type_dispatcher(init.type(), sequence_functor{}, size, init, stream, mr);
}
} // namespace detail
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
scalar const& step,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sequence(size, init, step, stream, mr);
}
std::unique_ptr<column> sequence(size_type size,
scalar const& init,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sequence(size, init, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/filling/repeat.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/repeat.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <limits>
#include <memory>
namespace {
struct count_accessor {
cudf::scalar const* p_scalar = nullptr;
template <typename T>
std::enable_if_t<std::is_integral_v<T>, cudf::size_type> operator()(rmm::cuda_stream_view stream)
{
using ScalarType = cudf::scalar_type_t<T>;
#if 1
// TODO: temporary till cudf::scalar's value() function is marked as const
auto p_count = const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar));
#else
auto p_count = static_cast<ScalarType const*>(this->p_scalar);
#endif
auto count = p_count->value(stream);
// static_cast is necessary due to bool
CUDF_EXPECTS(static_cast<int64_t>(count) <= std::numeric_limits<cudf::size_type>::max(),
"count should not exceed the column size limit",
std::overflow_error);
return static_cast<cudf::size_type>(count);
}
template <typename T>
std::enable_if_t<not std::is_integral_v<T>, cudf::size_type> operator()(rmm::cuda_stream_view)
{
CUDF_FAIL("count value should be a integral type.");
}
};
struct count_checker {
cudf::column_view const& count;
template <typename T>
std::enable_if_t<std::is_integral_v<T>, void> operator()(rmm::cuda_stream_view stream)
{
// static_cast is necessary due to bool
if (static_cast<int64_t>(std::numeric_limits<T>::max()) >
std::numeric_limits<cudf::size_type>::max()) {
auto max = thrust::reduce(
rmm::exec_policy(stream), count.begin<T>(), count.end<T>(), 0, thrust::maximum<T>());
CUDF_EXPECTS(max <= std::numeric_limits<cudf::size_type>::max(),
"count exceeds the column size limit",
std::overflow_error);
}
}
template <typename T>
std::enable_if_t<not std::is_integral_v<T>, void> operator()(rmm::cuda_stream_view)
{
CUDF_FAIL("count value type should be integral.");
}
};
} // namespace
namespace cudf {
namespace detail {
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input_table.num_rows() == count.size(), "in and count must have equal size");
CUDF_EXPECTS(not count.has_nulls(), "count cannot contain nulls");
if (input_table.num_rows() == 0) { return cudf::empty_like(input_table); }
auto count_iter = cudf::detail::indexalator_factory::make_input_iterator(count);
rmm::device_uvector<cudf::size_type> offsets(count.size(), stream);
thrust::inclusive_scan(
rmm::exec_policy(stream), count_iter, count_iter + count.size(), offsets.begin());
size_type output_size{offsets.back_element(stream)};
rmm::device_uvector<size_type> indices(output_size, stream);
thrust::upper_bound(rmm::exec_policy(stream),
offsets.begin(),
offsets.end(),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(output_size),
indices.begin());
return gather(
input_table, indices.begin(), indices.end(), out_of_bounds_policy::DONT_CHECK, stream, mr);
}
std::unique_ptr<table> repeat(table_view const& input_table,
size_type count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if ((input_table.num_rows() == 0) || (count == 0)) { return cudf::empty_like(input_table); }
CUDF_EXPECTS(count >= 0, "count value should be non-negative");
CUDF_EXPECTS(input_table.num_rows() <= std::numeric_limits<size_type>::max() / count,
"The resulting table exceeds the column size limit",
std::overflow_error);
auto output_size = input_table.num_rows() * count;
auto map_begin = cudf::detail::make_counting_transform_iterator(
0, [count] __device__(auto i) { return i / count; });
auto map_end = map_begin + output_size;
return gather(input_table, map_begin, map_end, out_of_bounds_policy::DONT_CHECK, stream, mr);
}
} // namespace detail
std::unique_ptr<table> repeat(table_view const& input_table,
column_view const& count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::repeat(input_table, count, stream, mr);
}
std::unique_ptr<table> repeat(table_view const& input_table,
size_type count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::repeat(input_table, count, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/filling/fill.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/fill.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/detail/fill.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <memory>
namespace {
template <typename T>
void in_place_fill(cudf::mutable_column_view& destination,
cudf::size_type begin,
cudf::size_type end,
cudf::scalar const& value,
rmm::cuda_stream_view stream)
{
using ScalarType = cudf::scalar_type_t<T>;
auto p_scalar = static_cast<ScalarType const*>(&value);
T fill_value = p_scalar->value(stream);
bool is_valid = p_scalar->is_valid(stream);
cudf::detail::copy_range(thrust::make_constant_iterator(fill_value),
thrust::make_constant_iterator(is_valid),
destination,
begin,
end,
stream);
}
struct in_place_fill_range_dispatch {
cudf::scalar const& value;
cudf::mutable_column_view& destination;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>() && not cudf::is_fixed_point<T>(), void> operator()(
cudf::size_type begin, cudf::size_type end, rmm::cuda_stream_view stream)
{
in_place_fill<T>(destination, begin, end, value, stream);
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()(cudf::size_type begin,
cudf::size_type end,
rmm::cuda_stream_view stream)
{
auto unscaled = static_cast<cudf::fixed_point_scalar<T> const&>(value).value(stream);
using RepType = typename T::rep;
auto s = cudf::numeric_scalar<RepType>(unscaled, value.is_valid(stream));
in_place_fill<RepType>(destination, begin, end, s, stream);
}
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(Args&&...)
{
CUDF_FAIL("in-place fill does not work for variable width types.");
}
};
struct out_of_place_fill_range_dispatch {
cudf::scalar const& value;
cudf::column_view const& input;
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_rep_layout_compatible<T>() and not cudf::is_fixed_point<T>(),
std::unique_ptr<cudf::column>>
operator()(Args...)
{
CUDF_FAIL("Unsupported type in fill.");
}
template <typename T,
CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>() or cudf::is_fixed_point<T>())>
std::unique_ptr<cudf::column> operator()(cudf::size_type begin,
cudf::size_type end,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == value.type(), "Data type mismatch.");
auto p_ret = std::make_unique<cudf::column>(input, stream, mr);
if (end != begin) { // otherwise no fill
if (!p_ret->nullable() && !value.is_valid(stream)) {
p_ret->set_null_mask(
cudf::detail::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr),
0);
}
auto ret_view = p_ret->mutable_view();
using DeviceType = cudf::device_storage_type_t<T>;
in_place_fill<DeviceType>(ret_view, begin, end, value, stream);
p_ret->set_null_count(ret_view.null_count());
}
return p_ret;
}
};
template <>
std::unique_ptr<cudf::column> out_of_place_fill_range_dispatch::operator()<cudf::string_view>(
cudf::size_type begin,
cudf::size_type end,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == value.type(), "Data type mismatch.");
using ScalarType = cudf::scalar_type_t<cudf::string_view>;
auto p_scalar = static_cast<ScalarType const*>(&value);
return cudf::strings::detail::fill(
cudf::strings_column_view(input), begin, end, *p_scalar, stream, mr);
}
template <>
std::unique_ptr<cudf::column> out_of_place_fill_range_dispatch::operator()<cudf::dictionary32>(
cudf::size_type begin,
cudf::size_type end,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return std::make_unique<cudf::column>(input, stream, mr);
cudf::dictionary_column_view const target(input);
CUDF_EXPECTS(target.keys().type() == value.type(), "Data type mismatch.");
// if the scalar is invalid, then just copy the column and fill the null mask
if (!value.is_valid(stream)) {
auto result = std::make_unique<cudf::column>(input, stream, mr);
auto mview = result->mutable_view();
cudf::detail::set_null_mask(mview.null_mask(), begin, end, false, stream);
result->set_null_count(input.null_count() + (end - begin));
return result;
}
// add the scalar to get the output dictionary key-set
auto scalar_column = cudf::make_column_from_scalar(value, 1, stream);
auto target_matched =
cudf::dictionary::detail::add_keys(target, scalar_column->view(), stream, mr);
cudf::column_view const target_indices =
cudf::dictionary_column_view(target_matched->view()).get_indices_annotated();
// get the index of the key just added
auto index_of_value = cudf::dictionary::detail::get_index(
target_matched->view(), value, stream, rmm::mr::get_current_device_resource());
// now call fill using just the indices column and the new index
auto new_indices =
cudf::type_dispatcher(target_indices.type(),
out_of_place_fill_range_dispatch{*index_of_value, target_indices},
begin,
end,
stream,
mr);
auto const indices_type = new_indices->type();
auto const output_size = new_indices->size(); // record these
auto const null_count = new_indices->null_count(); // before the release()
auto contents = new_indices->release();
// create the new indices column from the result
auto indices_column = std::make_unique<cudf::column>(indices_type,
static_cast<cudf::size_type>(output_size),
std::move(*(contents.data.release())),
rmm::device_buffer{0, stream, mr},
0);
// take the keys from matched column
std::unique_ptr<cudf::column> keys_column(std::move(target_matched->release().children.back()));
// create column with keys_column and indices_column
return cudf::make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(contents.null_mask.release())),
null_count);
}
} // namespace
namespace cudf {
namespace detail {
void fill_in_place(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(cudf::is_fixed_width(destination.type()),
"In-place fill does not support variable-sized types.");
CUDF_EXPECTS((begin >= 0) && (end <= destination.size()) && (begin <= end),
"Range is out of bounds.");
CUDF_EXPECTS(destination.nullable() || value.is_valid(stream),
"destination should be nullable or value should be non-null.");
CUDF_EXPECTS(destination.type() == value.type(), "Data type mismatch.");
if (end != begin) { // otherwise no-op
cudf::type_dispatcher(
destination.type(), in_place_fill_range_dispatch{value, destination}, begin, end, stream);
}
return;
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS((begin >= 0) && (end <= input.size()) && (begin <= end), "Range is out of bounds.");
return cudf::type_dispatcher(
input.type(), out_of_place_fill_range_dispatch{value, input}, begin, end, stream, mr);
}
} // namespace detail
void fill_in_place(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
return detail::fill_in_place(destination, begin, end, value, stream);
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::fill(input, begin, end, value, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/filling/calendrical_month_sequence.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/calendrical_month_sequence.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> calendrical_month_sequence(size_type size,
scalar const& init,
size_type months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(
init.type(), calendrical_month_sequence_functor{}, size, init, months, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> calendrical_month_sequence(size_type size,
scalar const& init,
size_type months,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::calendrical_month_sequence(size, init, months, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/sort_column.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_column_impl.cuh"
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/sequence.h>
namespace cudf {
namespace detail {
/**
* @copydoc
* sorted_order(column_view&,order,null_order,rmm::cuda_stream_view,rmm::mr::device_memory_resource*)
*/
template <>
std::unique_ptr<column> sorted_order<false>(column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto sorted_indices = cudf::make_numeric_column(
data_type(type_to_id<size_type>()), input.size(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view indices_view = sorted_indices->mutable_view();
thrust::sequence(
rmm::exec_policy(stream), indices_view.begin<size_type>(), indices_view.end<size_type>(), 0);
cudf::type_dispatcher<dispatch_storage_type>(input.type(),
column_sorted_order_fn<false>{},
input,
indices_view,
column_order == order::ASCENDING,
null_precedence,
stream);
return sorted_indices;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/segmented_sort.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "segmented_sort_impl.cuh"
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf {
namespace detail {
rmm::device_uvector<size_type> get_segment_indices(size_type num_rows,
column_view const& offsets,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<size_type> segment_ids(num_rows, stream);
auto offset_begin = offsets.begin<size_type>();
auto offset_end = offsets.end<size_type>();
auto counting_iter = thrust::make_counting_iterator<size_type>(0);
thrust::transform(rmm::exec_policy(stream),
counting_iter,
counting_iter + segment_ids.size(),
segment_ids.begin(),
[offset_begin, offset_end] __device__(auto idx) {
if (offset_begin == offset_end || idx < *offset_begin) { return idx; }
if (idx >= *(offset_end - 1)) { return idx + 1; }
return static_cast<size_type>(
*thrust::upper_bound(thrust::seq, offset_begin, offset_end, idx));
});
return segment_ids;
}
std::unique_ptr<column> segmented_sorted_order(table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sorted_order_common<sort_method::UNSTABLE>(
keys, segment_offsets, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sort_by_key_common<sort_method::UNSTABLE>(
values, keys, segment_offsets, column_order, null_precedence, stream, mr);
}
} // namespace detail
std::unique_ptr<column> segmented_sorted_order(table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::segmented_sorted_order(
keys, segment_offsets, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::segmented_sort_by_key(
values, keys, segment_offsets, column_order, null_precedence, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/stable_segmented_sort.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "segmented_sort_impl.cuh"
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
namespace cudf {
namespace detail {
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sorted_order_common<sort_method::STABLE>(
keys, segment_offsets, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> stable_segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sort_by_key_common<sort_method::STABLE>(
values, keys, segment_offsets, column_order, null_precedence, stream, mr);
}
} // namespace detail
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_segmented_sorted_order(
keys, segment_offsets, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> stable_segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_segmented_sort_by_key(
values, keys, segment_offsets, column_order, null_precedence, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/sort_impl.cuh
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "sort_column_impl.cuh"
#include <cudf/column/column_factories.hpp>
namespace cudf {
namespace detail {
/**
* @copydoc
* sorted_order(table_view&,std::vector<order>,std::vector<null_order>,rmm::mr::device_memory_resource*)
*
* @tparam stable Whether to use stable sort
* @param stream CUDA stream used for device memory operations and kernel launches
*/
template <bool stable>
std::unique_ptr<column> sorted_order(table_view input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.num_rows() == 0 or input.num_columns() == 0) {
return cudf::make_numeric_column(data_type(type_to_id<size_type>()), 0);
}
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(input.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(input.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null_precedence size.");
}
// fast-path for single column sort
if (input.num_columns() == 1 and not cudf::is_nested(input.column(0).type())) {
auto const single_col = input.column(0);
auto const col_order = column_order.empty() ? order::ASCENDING : column_order.front();
auto const null_prec = null_precedence.empty() ? null_order::BEFORE : null_precedence.front();
return sorted_order<stable>(single_col, col_order, null_prec, stream, mr);
}
std::unique_ptr<column> sorted_indices = cudf::make_numeric_column(
data_type(type_to_id<size_type>()), input.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mutable_indices_view = sorted_indices->mutable_view();
thrust::sequence(rmm::exec_policy(stream),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.end<size_type>(),
0);
auto const do_sort = [&](auto const comparator) {
// Compiling `thrust::*sort*` APIs is expensive.
// Thus, we should optimize that by using constexpr condition to only compile what we need.
if constexpr (stable) {
thrust::stable_sort(rmm::exec_policy(stream),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.end<size_type>(),
comparator);
} else {
thrust::sort(rmm::exec_policy(stream),
mutable_indices_view.begin<size_type>(),
mutable_indices_view.end<size_type>(),
comparator);
}
};
auto const comp = cudf::experimental::row::lexicographic::self_comparator(
input, column_order, null_precedence, stream);
if (cudf::detail::has_nested_columns(input)) {
auto const comparator = comp.less<true>(nullate::DYNAMIC{has_nested_nulls(input)});
do_sort(comparator);
} else {
auto const comparator = comp.less<false>(nullate::DYNAMIC{has_nested_nulls(input)});
do_sort(comparator);
}
return sorted_indices;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/stable_sort_column.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_column_impl.cuh"
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/sequence.h>
namespace cudf {
namespace detail {
/**
* @copydoc
* sorted_order(column_view&,order,null_order,rmm::cuda_stream_view,rmm::mr::device_memory_resource*)
*/
template <>
std::unique_ptr<column> sorted_order<true>(column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto sorted_indices = cudf::make_numeric_column(
data_type(type_to_id<size_type>()), input.size(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view indices_view = sorted_indices->mutable_view();
thrust::sequence(
rmm::exec_policy(stream), indices_view.begin<size_type>(), indices_view.end<size_type>(), 0);
cudf::type_dispatcher<dispatch_storage_type>(input.type(),
column_sorted_order_fn<true>{},
input,
indices_view,
column_order == order::ASCENDING,
null_precedence,
stream);
return sorted_indices;
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/segmented_sort_impl.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/sorting.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <cub/device/device_segmented_sort.cuh>
namespace cudf {
namespace detail {
/**
* @brief The enum specifying which sorting method to use (stable or unstable).
*/
enum class sort_method { STABLE, UNSTABLE };
/**
* @brief Functor performs faster segmented sort on eligible columns
*/
template <sort_method method>
struct column_fast_sort_fn {
/**
* @brief Run-time check for faster segmented sort on an eligible column
*
* Fast segmented sort can handle integral types including
* decimal types if dispatch_storage_type is used but it does not support int128.
*/
static bool is_fast_sort_supported(column_view const& col)
{
return !col.has_nulls() and
(cudf::is_integral(col.type()) ||
(cudf::is_fixed_point(col.type()) and (col.type().id() != type_id::DECIMAL128)));
}
/**
* @brief Compile-time check for supporting fast segmented sort for a specific type
*
* The dispatch_storage_type means we can check for integral types to
* include fixed-point types but the CUB limitation means we need to exclude int128.
*/
template <typename T>
static constexpr bool is_fast_sort_supported()
{
return cudf::is_integral<T>() and !std::is_same_v<__int128, T>;
}
template <typename T>
void fast_sort(column_view const& input,
column_view const& segment_offsets,
mutable_column_view& indices,
bool ascending,
rmm::cuda_stream_view stream)
{
// CUB's segmented sort functions cannot accept iterators.
// We create a temporary column here for it to use.
auto temp_col = cudf::detail::allocate_like(input,
input.size(),
mask_allocation_policy::NEVER,
stream,
rmm::mr::get_current_device_resource());
mutable_column_view output_view = temp_col->mutable_view();
// DeviceSegmentedSort is faster than DeviceSegmentedRadixSort at this time
auto fast_sort_impl = [stream](bool ascending, [[maybe_unused]] auto&&... args) {
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
if (ascending) {
if constexpr (method == sort_method::STABLE) {
cub::DeviceSegmentedSort::StableSortPairs(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::StableSortPairs(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
} else {
cub::DeviceSegmentedSort::SortPairs(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::SortPairs(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
}
} else {
if constexpr (method == sort_method::STABLE) {
cub::DeviceSegmentedSort::StableSortPairsDescending(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::StableSortPairsDescending(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
} else {
cub::DeviceSegmentedSort::SortPairsDescending(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::SortPairsDescending(
d_temp_storage.data(), temp_storage_bytes, std::forward<decltype(args)>(args)...);
}
}
};
fast_sort_impl(ascending,
input.begin<T>(),
output_view.begin<T>(),
indices.begin<size_type>(),
indices.begin<size_type>(),
input.size(),
segment_offsets.size() - 1,
segment_offsets.begin<size_type>(),
segment_offsets.begin<size_type>() + 1,
stream.value());
}
template <typename T, CUDF_ENABLE_IF(is_fast_sort_supported<T>())>
void operator()(column_view const& input,
column_view const& segment_offsets,
mutable_column_view& indices,
bool ascending,
rmm::cuda_stream_view stream)
{
fast_sort<T>(input, segment_offsets, indices, ascending, stream);
}
template <typename T, CUDF_ENABLE_IF(!is_fast_sort_supported<T>())>
void operator()(
column_view const&, column_view const&, mutable_column_view&, bool, rmm::cuda_stream_view)
{
CUDF_FAIL("Column type cannot be used with fast-sort function");
}
};
/**
* @brief Performs faster sort on eligible columns
*
* Check the `is_fast_sort_supported()==true` on the input column before using this function.
*
* @param input Column to sort
* @param segment_offsets Identifies segments to sort within
* @param column_order Sort ascending or descending
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*/
template <sort_method method>
std::unique_ptr<column> fast_segmented_sorted_order(column_view const& input,
column_view const& segment_offsets,
order const& column_order,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Unfortunately, CUB's segmented sort functions cannot accept iterators.
// We have to build a pre-filled sequence of indices as input.
auto sorted_indices =
cudf::detail::sequence(input.size(), numeric_scalar<size_type>{0, true, stream}, stream, mr);
auto indices_view = sorted_indices->mutable_view();
cudf::type_dispatcher<dispatch_storage_type>(input.type(),
column_fast_sort_fn<method>{},
input,
segment_offsets,
indices_view,
column_order == order::ASCENDING,
stream);
return sorted_indices;
}
/**
* @brief Builds indices to identify segments to sort
*
* The segments are added to the input table-view keys so they
* are lexicographically sorted within the segmented groups.
*
* ```
* Example 1:
* num_rows = 10
* offsets = {0, 3, 7, 10}
* segment-indices -> { 3,3,3, 7,7,7,7, 10,10,10 }
* ```
*
* ```
* Example 2: (offsets do not cover all indices)
* num_rows = 10
* offsets = {3, 7}
* segment-indices -> { 0,1,2, 7,7,7,7, 8,9,10 }
* ```
*
* @param num_rows Total number of rows in the input keys to sort
* @param offsets The offsets identifying the segments
* @param stream CUDA stream used for device memory operations and kernel launches
*/
rmm::device_uvector<size_type> get_segment_indices(size_type num_rows,
column_view const& offsets,
rmm::cuda_stream_view stream);
/**
* @brief Segmented sorted-order utility
*
* Returns the indices that map the column to a segmented sorted table.
* Automatically handles calling accelerated code paths as appropriate.
*
* @tparam method Specifies sort is stable or not
* @param keys Table to sort
* @param segment_offsets Identifies the segments within the keys
* @param column_order Sort order for each column in the keys
* @param null_precedence Where to place the null entries for each column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource to allocate any returned objects
*/
template <sort_method method>
std::unique_ptr<column> segmented_sorted_order_common(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (keys.num_rows() == 0 || keys.num_columns() == 0) {
return cudf::make_empty_column(type_to_id<size_type>());
}
CUDF_EXPECTS(segment_offsets.type() == data_type(type_to_id<size_type>()),
"segment offsets should be size_type");
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(keys.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(keys.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null_precedence size.");
}
// the average row size for which to prefer fast sort
constexpr cudf::size_type MAX_AVG_LIST_SIZE_FOR_FAST_SORT{100};
// the maximum row count for which to prefer fast sort
constexpr cudf::size_type MAX_LIST_SIZE_FOR_FAST_SORT{1 << 18};
// fast-path for single column sort:
// - single-column table
// - not stable-sort
// - no nulls and allowable fixed-width type
// - size and width are limited -- based on benchmark results
if (keys.num_columns() == 1 and
column_fast_sort_fn<method>::is_fast_sort_supported(keys.column(0)) and
(segment_offsets.size() > 0) and
(((keys.num_rows() / segment_offsets.size()) < MAX_AVG_LIST_SIZE_FOR_FAST_SORT) or
(keys.num_rows() < MAX_LIST_SIZE_FOR_FAST_SORT))) {
auto const col_order = column_order.empty() ? order::ASCENDING : column_order.front();
return fast_segmented_sorted_order<method>(
keys.column(0), segment_offsets, col_order, stream, mr);
}
// Get segment id of each element in all segments.
auto segment_ids = get_segment_indices(keys.num_rows(), segment_offsets, stream);
// insert segment id before all columns.
std::vector<column_view> keys_with_segid;
keys_with_segid.reserve(keys.num_columns() + 1);
keys_with_segid.push_back(column_view(
data_type(type_to_id<size_type>()), segment_ids.size(), segment_ids.data(), nullptr, 0));
keys_with_segid.insert(keys_with_segid.end(), keys.begin(), keys.end());
auto segid_keys = table_view(keys_with_segid);
auto prepend_default = [](auto const& vector, auto default_value) {
if (vector.empty()) return vector;
std::remove_cv_t<std::remove_reference_t<decltype(vector)>> pre_vector;
pre_vector.reserve(pre_vector.size() + 1);
pre_vector.push_back(default_value);
pre_vector.insert(pre_vector.end(), vector.begin(), vector.end());
return pre_vector;
};
auto child_column_order = prepend_default(column_order, order::ASCENDING);
auto child_null_precedence = prepend_default(null_precedence, null_order::AFTER);
// return sorted order of child columns
if constexpr (method == sort_method::STABLE) {
return detail::stable_sorted_order(
segid_keys, child_column_order, child_null_precedence, stream, mr);
} else {
return detail::sorted_order(segid_keys, child_column_order, child_null_precedence, stream, mr);
}
}
template <sort_method method>
std::unique_ptr<table> segmented_sort_by_key_common(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values.num_rows() == keys.num_rows(),
"Mismatch in number of rows for values and keys");
auto sorted_order = segmented_sorted_order_common<method>(keys,
segment_offsets,
column_order,
null_precedence,
stream,
rmm::mr::get_current_device_resource());
// Gather segmented sort of child value columns
return detail::gather(values,
sorted_order->view(),
out_of_bounds_policy::DONT_CHECK,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/sort.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_impl.cuh"
#include <cudf/column/column.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/functional.h>
#include <thrust/sort.h>
namespace cudf {
namespace detail {
std::unique_ptr<column> sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return sorted_order<false>(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values.num_rows() == keys.num_rows(),
"Mismatch in number of rows for values and keys");
auto sorted_order = detail::sorted_order(
keys, column_order, null_precedence, stream, rmm::mr::get_current_device_resource());
return detail::gather(values,
sorted_order->view(),
out_of_bounds_policy::DONT_CHECK,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
struct inplace_column_sort_fn {
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
void operator()(mutable_column_view& col, bool ascending, rmm::cuda_stream_view stream) const
{
CUDF_EXPECTS(!col.has_nulls(), "Nulls not supported for in-place sort");
if (ascending) {
thrust::sort(rmm::exec_policy(stream), col.begin<T>(), col.end<T>(), thrust::less<T>());
} else {
thrust::sort(rmm::exec_policy(stream), col.begin<T>(), col.end<T>(), thrust::greater<T>());
}
}
template <typename T, std::enable_if_t<!cudf::is_fixed_width<T>()>* = nullptr>
void operator()(mutable_column_view&, bool, rmm::cuda_stream_view) const
{
CUDF_FAIL("Column type must be relationally comparable and fixed-width");
}
};
std::unique_ptr<table> sort(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
// fast-path sort conditions: single, non-floating-point, fixed-width column with no nulls
if (input.num_columns() == 1 && !input.column(0).has_nulls() &&
cudf::is_fixed_width(input.column(0).type()) &&
!cudf::is_floating_point(input.column(0).type())) {
auto output = std::make_unique<column>(input.column(0), stream, mr);
auto view = output->mutable_view();
bool ascending = (column_order.empty() ? true : column_order.front() == order::ASCENDING);
cudf::type_dispatcher<dispatch_storage_type>(
output->type(), inplace_column_sort_fn{}, view, ascending, stream);
std::vector<std::unique_ptr<column>> columns;
columns.emplace_back(std::move(output));
return std::make_unique<table>(std::move(columns));
}
return detail::sort_by_key(
input, input, column_order, null_precedence, cudf::get_default_stream(), mr);
}
} // namespace detail
std::unique_ptr<column> sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sorted_order(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> sort(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sort(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::sort_by_key(values, keys, column_order, null_precedence, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/sort_column_impl.cuh
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace cudf {
namespace detail {
/**
* @brief Sort indices of a single column.
*
* This API offers fast sorting for primitive types. It cannot handle nested types and will not
* consider `NaN` as equivalent to other `NaN`.
*
* @tparam stable Whether to use stable sort
* @param input Column to sort. The column data is not modified.
* @param column_order Ascending or descending sort order
* @param null_precedence How null rows are to be ordered
* @param stable True if sort should be stable
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Sorted indices for the input column.
*/
template <bool stable>
std::unique_ptr<column> sorted_order(column_view const& input,
order column_order,
null_order null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
/**
* @brief Comparator functor needed for single column sort.
*
* @tparam Column element type.
*/
template <typename T>
struct simple_comparator {
__device__ bool operator()(size_type lhs, size_type rhs)
{
if (has_nulls) {
bool lhs_null{d_column.is_null(lhs)};
bool rhs_null{d_column.is_null(rhs)};
if (lhs_null || rhs_null) {
if (!ascending) thrust::swap(lhs_null, rhs_null);
return (null_precedence == cudf::null_order::BEFORE ? !rhs_null : !lhs_null);
}
}
return relational_compare(d_column.element<T>(lhs), d_column.element<T>(rhs)) ==
(ascending ? weak_ordering::LESS : weak_ordering::GREATER);
}
column_device_view const d_column;
bool has_nulls;
bool ascending;
null_order null_precedence{};
};
template <bool stable>
struct column_sorted_order_fn {
/**
* @brief Compile time check for allowing faster sort.
*
* Faster sort is defined for fixed-width types where only
* the primitive comparators thrust::greater or thrust::less
* are needed.
*
* Floating point is removed here for special handling of NaNs
* which require the row-comparator.
*/
template <typename T>
static constexpr bool is_faster_sort_supported()
{
return cudf::is_fixed_width<T>() && !cudf::is_floating_point<T>();
}
/**
* @brief Sorts fixed-width columns using faster thrust sort.
*
* Should not be called if `input.has_nulls()==true`
*
* @param input Column to sort
* @param indices Output sorted indices
* @param ascending True if sort order is ascending
* @param stream CUDA stream used for device memory operations and kernel launches
*/
template <typename T>
void faster_sort(column_view const& input,
mutable_column_view& indices,
bool ascending,
rmm::cuda_stream_view stream)
{
// A thrust sort on a column of primitive types will use a radix sort.
// For other fixed-width types, thrust will use merge-sort.
// But this also requires making a copy of the input data.
auto temp_col = column(input, stream);
auto d_col = temp_col.mutable_view();
auto const do_sort = [&](auto const comp) {
// Compiling `thrust::*sort*` APIs is expensive.
// Thus, we should optimize that by using constexpr condition to only compile what we need.
if constexpr (stable) {
thrust::stable_sort_by_key(rmm::exec_policy(stream),
d_col.begin<T>(),
d_col.end<T>(),
indices.begin<size_type>(),
comp);
} else {
thrust::sort_by_key(rmm::exec_policy(stream),
d_col.begin<T>(),
d_col.end<T>(),
indices.begin<size_type>(),
comp);
}
};
if (ascending) {
do_sort(thrust::less<T>{});
} else {
do_sort(thrust::greater<T>{});
}
}
/**
* @brief Sorts a single column with a relationally comparable type.
*
* This is used when a comparator is required.
*
* @param input Column to sort
* @param indices Output sorted indices
* @param ascending True if sort order is ascending
* @param null_precedence How null rows are to be ordered
* @param stream CUDA stream used for device memory operations and kernel launches
*/
template <typename T>
void sorted_order(column_view const& input,
mutable_column_view& indices,
bool ascending,
null_order null_precedence,
rmm::cuda_stream_view stream)
{
auto keys = column_device_view::create(input, stream);
auto comp = simple_comparator<T>{*keys, input.has_nulls(), ascending, null_precedence};
// Compiling `thrust::*sort*` APIs is expensive.
// Thus, we should optimize that by using constexpr condition to only compile what we need.
if constexpr (stable) {
thrust::stable_sort(
rmm::exec_policy(stream), indices.begin<size_type>(), indices.end<size_type>(), comp);
} else {
thrust::sort(
rmm::exec_policy(stream), indices.begin<size_type>(), indices.end<size_type>(), comp);
}
}
template <typename T, CUDF_ENABLE_IF(cudf::is_relationally_comparable<T, T>())>
void operator()(column_view const& input,
mutable_column_view& indices,
bool ascending,
null_order null_precedence,
rmm::cuda_stream_view stream)
{
if constexpr (is_faster_sort_supported<T>()) {
if (input.has_nulls()) {
sorted_order<T>(input, indices, ascending, null_precedence, stream);
} else {
faster_sort<T>(input, indices, ascending, stream);
}
} else {
sorted_order<T>(input, indices, ascending, null_precedence, stream);
}
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_relationally_comparable<T, T>())>
void operator()(column_view const&, mutable_column_view&, bool, null_order, rmm::cuda_stream_view)
{
CUDF_FAIL("Column type must be relationally comparable");
}
};
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/rank.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace detail {
namespace {
template <typename PermutationIteratorType, typename DeviceComparatorType>
struct unique_functor {
unique_functor(PermutationIteratorType permute, DeviceComparatorType device_comparator)
: _permute(permute), _device_comparator(device_comparator)
{
}
auto __device__ operator()(size_type index) const noexcept
{
return static_cast<size_type>(index == 0 ||
not _device_comparator(_permute[index], _permute[index - 1]));
}
private:
PermutationIteratorType _permute;
DeviceComparatorType _device_comparator;
};
// Assign rank from 1 to n unique values. Equal values get same rank value.
rmm::device_uvector<size_type> sorted_dense_rank(column_view input_col,
column_view sorted_order_view,
rmm::cuda_stream_view stream)
{
auto const t_input = table_view{{input_col}};
auto const comparator = cudf::experimental::row::equality::self_comparator{t_input, stream};
auto const sorted_index_order = thrust::make_permutation_iterator(
sorted_order_view.begin<size_type>(), thrust::make_counting_iterator<size_type>(0));
auto const input_size = input_col.size();
rmm::device_uvector<size_type> dense_rank_sorted(input_size, stream);
auto const comparator_helper = [&](auto const device_comparator) {
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input_size),
dense_rank_sorted.data(),
unique_functor<decltype(sorted_index_order), decltype(device_comparator)>{
sorted_index_order, device_comparator});
};
if (cudf::detail::has_nested_columns(t_input)) {
auto const device_comparator =
comparator.equal_to<true>(nullate::DYNAMIC{has_nested_nulls(t_input)});
comparator_helper(device_comparator);
} else {
auto const device_comparator =
comparator.equal_to<false>(nullate::DYNAMIC{has_nested_nulls(t_input)});
comparator_helper(device_comparator);
}
thrust::inclusive_scan(rmm::exec_policy(stream),
dense_rank_sorted.begin(),
dense_rank_sorted.end(),
dense_rank_sorted.data());
return dense_rank_sorted;
}
/**
* @brief Breaks the ties among equal value groups using binary operator and
* transform this tied value to final rank.
*
* @param dense_rank dense rank of sorted input column (acts as key for value
* groups).
* @param tie_iter iterator of rank to break ties among equal value groups.
* @param sorted_order_view sorted order indices of input column
* @param rank_iter output rank iterator
* @param tie_breaker tie breaking operator. For example, maximum & minimum.
* @param transformer transform after tie breaking (useful for average).
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
template <typename TieType,
typename outputIterator,
typename TieBreaker,
typename Transformer,
typename TieIterator>
void tie_break_ranks_transform(cudf::device_span<size_type const> dense_rank_sorted,
TieIterator tie_iter,
column_view const& sorted_order_view,
outputIterator rank_iter,
TieBreaker tie_breaker,
Transformer transformer,
rmm::cuda_stream_view stream)
{
auto const input_size = sorted_order_view.size();
// algorithm: reduce_by_key(dense_rank, 1, n, reduction_tie_breaker)
// reduction_tie_breaker = min, max, min_count
rmm::device_uvector<TieType> tie_sorted(sorted_order_view.size(), stream);
thrust::reduce_by_key(rmm::exec_policy(stream),
dense_rank_sorted.begin(),
dense_rank_sorted.end(),
tie_iter,
thrust::make_discard_iterator(),
tie_sorted.begin(),
thrust::equal_to{},
tie_breaker);
auto sorted_tied_rank = thrust::make_transform_iterator(
dense_rank_sorted.begin(),
[tied_rank = tie_sorted.begin(), transformer] __device__(auto dense_pos) {
return transformer(tied_rank[dense_pos - 1]);
});
thrust::scatter(rmm::exec_policy(stream),
sorted_tied_rank,
sorted_tied_rank + input_size,
sorted_order_view.begin<size_type>(),
rank_iter);
}
template <typename outputType>
void rank_first(column_view sorted_order_view,
mutable_column_view rank_mutable_view,
rmm::cuda_stream_view stream)
{
// stable sort order ranking (no ties)
thrust::scatter(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(1),
thrust::make_counting_iterator<size_type>(rank_mutable_view.size() + 1),
sorted_order_view.begin<size_type>(),
rank_mutable_view.begin<outputType>());
}
template <typename outputType>
void rank_dense(cudf::device_span<size_type const> dense_rank_sorted,
column_view sorted_order_view,
mutable_column_view rank_mutable_view,
rmm::cuda_stream_view stream)
{
// All equal values have same rank and rank always increases by 1 between groups
thrust::scatter(rmm::exec_policy(stream),
dense_rank_sorted.begin(),
dense_rank_sorted.end(),
sorted_order_view.begin<size_type>(),
rank_mutable_view.begin<outputType>());
}
template <typename outputType>
void rank_min(cudf::device_span<size_type const> group_keys,
column_view sorted_order_view,
mutable_column_view rank_mutable_view,
rmm::cuda_stream_view stream)
{
// min of first in the group
// All equal values have min of ranks among them.
// algorithm: reduce_by_key(dense_rank, 1, n, min), scatter
tie_break_ranks_transform<size_type>(group_keys,
thrust::make_counting_iterator<size_type>(1),
sorted_order_view,
rank_mutable_view.begin<outputType>(),
thrust::minimum{},
thrust::identity{},
stream);
}
template <typename outputType>
void rank_max(cudf::device_span<size_type const> group_keys,
column_view sorted_order_view,
mutable_column_view rank_mutable_view,
rmm::cuda_stream_view stream)
{
// max of first in the group
// All equal values have max of ranks among them.
// algorithm: reduce_by_key(dense_rank, 1, n, max), scatter
tie_break_ranks_transform<size_type>(group_keys,
thrust::make_counting_iterator<size_type>(1),
sorted_order_view,
rank_mutable_view.begin<outputType>(),
thrust::maximum{},
thrust::identity{},
stream);
}
// Returns index, count
template <typename T>
struct index_counter {
__device__ T operator()(size_type i) { return T{i, 1}; }
};
void rank_average(cudf::device_span<size_type const> group_keys,
column_view sorted_order_view,
mutable_column_view rank_mutable_view,
rmm::cuda_stream_view stream)
{
// k, k+1, .. k+n-1
// average = (n*k+ n*(n-1)/2)/n
// average = k + (n-1)/2 = min + (count-1)/2
// Calculate Min of ranks and Count of equal values
// algorithm: reduce_by_key(dense_rank, 1, n, min_count)
// transform(min+(count-1)/2), scatter
using MinCount = thrust::pair<size_type, size_type>;
tie_break_ranks_transform<MinCount>(
group_keys,
// Use device functor with return type. Cannot use device lambda due to limitation.
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#extended-lambda-restrictions
cudf::detail::make_counting_transform_iterator(1, index_counter<MinCount>{}),
sorted_order_view,
rank_mutable_view.begin<double>(),
[] __device__(auto rank_count1, auto rank_count2) {
return MinCount{std::min(rank_count1.first, rank_count2.first),
rank_count1.second + rank_count2.second};
},
[] __device__(MinCount minrank_count) { // min+(count-1)/2
return static_cast<double>(thrust::get<0>(minrank_count)) +
(static_cast<double>(thrust::get<1>(minrank_count)) - 1) / 2.0;
},
stream);
}
} // anonymous namespace
std::unique_ptr<column> rank(column_view const& input,
rank_method method,
order column_order,
null_policy null_handling,
null_order null_precedence,
bool percentage,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
data_type const output_type = (percentage or method == rank_method::AVERAGE)
? data_type(type_id::FLOAT64)
: data_type(type_to_id<size_type>());
std::unique_ptr<column> rank_column = [&null_handling, &output_type, &input, &stream, &mr] {
// na_option=keep assign NA to NA values
if (null_handling == null_policy::EXCLUDE)
return make_numeric_column(output_type,
input.size(),
detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
else
return make_numeric_column(output_type, input.size(), mask_state::UNALLOCATED, stream, mr);
}();
auto rank_mutable_view = rank_column->mutable_view();
std::unique_ptr<column> sorted_order =
(method == rank_method::FIRST)
? detail::stable_sorted_order(
table_view{{input}}, {column_order}, {null_precedence}, stream, mr)
: detail::sorted_order(table_view{{input}}, {column_order}, {null_precedence}, stream, mr);
column_view sorted_order_view = sorted_order->view();
// dense: All equal values have same rank and rank always increases by 1 between groups
// acts as key for min, max, average to denote equal value groups
rmm::device_uvector<size_type> const dense_rank_sorted =
[&method, &input, &sorted_order_view, &stream] {
if (method != rank_method::FIRST)
return sorted_dense_rank(input, sorted_order_view, stream);
else
return rmm::device_uvector<size_type>(0, stream);
}();
if (output_type.id() == type_id::FLOAT64) {
switch (method) {
case rank_method::FIRST:
rank_first<double>(sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::DENSE:
rank_dense<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::MIN:
rank_min<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::MAX:
rank_max<double>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::AVERAGE:
rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
default: CUDF_FAIL("Unexpected rank_method for rank()");
}
} else {
switch (method) {
case rank_method::FIRST:
rank_first<size_type>(sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::DENSE:
rank_dense<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::MIN:
rank_min<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::MAX:
rank_max<size_type>(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
case rank_method::AVERAGE:
rank_average(dense_rank_sorted, sorted_order_view, rank_mutable_view, stream);
break;
default: CUDF_FAIL("Unexpected rank_method for rank()");
}
}
// pct inplace transform
if (percentage) {
auto rank_iter = rank_mutable_view.begin<double>();
size_type const count =
(null_handling == null_policy::EXCLUDE) ? input.size() - input.null_count() : input.size();
auto drs = dense_rank_sorted.data();
bool const is_dense = (method == rank_method::DENSE);
thrust::transform(rmm::exec_policy(stream),
rank_iter,
rank_iter + input.size(),
rank_iter,
[is_dense, drs, count] __device__(double r) -> double {
return is_dense ? r / drs[count - 1] : r / count;
});
}
return rank_column;
}
} // namespace detail
std::unique_ptr<column> rank(column_view const& input,
rank_method method,
order column_order,
null_policy null_handling,
null_order null_precedence,
bool percentage,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rank(
input, method, column_order, null_handling, null_precedence, percentage, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/stable_sort.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_impl.cuh"
#include <cudf/column/column.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
std::unique_ptr<column> stable_sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return sorted_order<true>(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> stable_sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values.num_rows() == keys.num_rows(),
"Mismatch in number of rows for values and keys");
auto sorted_order = detail::stable_sorted_order(
keys, column_order, null_precedence, stream, rmm::mr::get_current_device_resource());
return detail::gather(values,
sorted_order->view(),
out_of_bounds_policy::DONT_CHECK,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> stable_sorted_order(table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_sorted_order(input, column_order, null_precedence, stream, mr);
}
std::unique_ptr<table> stable_sort_by_key(table_view const& values,
table_view const& keys,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_sort_by_key(values, keys, column_order, null_precedence, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/sort/is_sorted.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
bool is_sorted(cudf::table_view const& in,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream)
{
auto const comparator =
experimental::row::lexicographic::self_comparator{in, column_order, null_precedence, stream};
if (cudf::detail::has_nested_columns(in)) {
auto const device_comparator = comparator.less<true>(has_nested_nulls(in));
// Using a temporary buffer for intermediate transform results from the lambda containing
// the comparator speeds up compile-time significantly over using the comparator directly
// in thrust::is_sorted.
auto d_results = rmm::device_uvector<bool>(in.num_rows(), stream);
thrust::transform(rmm::exec_policy(stream),
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(in.num_rows()),
d_results.begin(),
[device_comparator] __device__(auto idx) -> bool {
return (idx == 0) || device_comparator(idx - 1, idx);
});
return thrust::count(rmm::exec_policy(stream), d_results.begin(), d_results.end(), false) == 0;
} else {
auto const device_comparator = comparator.less<false>(has_nested_nulls(in));
return thrust::is_sorted(rmm::exec_policy(stream),
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(in.num_rows()),
device_comparator);
}
}
} // namespace detail
bool is_sorted(cudf::table_view const& in,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
if (in.num_columns() == 0 || in.num_rows() == 0) { return true; }
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<unsigned int>(in.num_columns()) == column_order.size(),
"Number of columns in the table doesn't match the vector column_order's size .\n");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(
static_cast<unsigned int>(in.num_columns()) == null_precedence.size(),
"Number of columns in the table doesn't match the vector null_precedence's size .\n");
}
return detail::is_sorted(in, column_order, null_precedence, stream);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/unary/null_ops.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/counting_iterator.h>
namespace cudf {
namespace detail {
std::unique_ptr<column> is_null(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input_device_view = column_device_view::create(input, stream);
auto device_view = *input_device_view;
auto predicate = [device_view] __device__(auto index) { return (device_view.is_null(index)); };
return detail::true_if(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.size()),
input.size(),
predicate,
stream,
mr);
}
std::unique_ptr<column> is_valid(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input_device_view = column_device_view::create(input, stream);
auto device_view = *input_device_view;
auto predicate = [device_view] __device__(auto index) { return device_view.is_valid(index); };
return detail::true_if(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.size()),
input.size(),
predicate,
stream,
mr);
}
} // namespace detail
std::unique_ptr<column> is_null(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_null(input, stream, mr);
}
std::unique_ptr<column> is_valid(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_valid(input, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/unary/unary_ops.cuh
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace unary {
template <typename T, typename Tout, typename F>
struct launcher {
static std::unique_ptr<cudf::column> launch(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
std::unique_ptr<cudf::column> output = [&] {
if (op == cudf::unary_operator::NOT) {
auto type = cudf::data_type{cudf::type_id::BOOL8};
auto size = input.size();
return std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), 0, mr},
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count());
} else {
return cudf::detail::allocate_like(
input, input.size(), mask_allocation_policy::NEVER, stream, mr);
}
}();
if (input.is_empty()) return output;
auto output_view = output->mutable_view();
CUDF_EXPECTS(input.size() > 0, "Launcher requires input size to be non-zero.");
CUDF_EXPECTS(input.size() == output_view.size(),
"Launcher requires input and output size to be equal.");
if (input.nullable())
output->set_null_mask(
rmm::device_buffer{input.null_mask(), bitmask_allocation_size_bytes(input.size())},
input.null_count());
thrust::transform(
rmm::exec_policy(stream), input.begin<T>(), input.end<T>(), output_view.begin<Tout>(), F{});
CUDF_CHECK_CUDA(stream.value());
return output;
}
};
} // namespace unary
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/unary/nan_ops.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
struct nan_dispatcher {
template <typename T, typename Predicate>
std::enable_if_t<std::is_floating_point_v<T>, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto input_device_view = column_device_view::create(input, stream);
if (input.has_nulls()) {
auto input_pair_iterator = make_pair_iterator<T, true>(*input_device_view);
return true_if(input_pair_iterator,
input_pair_iterator + input.size(),
input.size(),
predicate,
stream,
mr);
} else {
auto input_pair_iterator = make_pair_iterator<T, false>(*input_device_view);
return true_if(input_pair_iterator,
input_pair_iterator + input.size(),
input.size(),
predicate,
stream,
mr);
}
}
template <typename T, typename Predicate>
std::enable_if_t<!std::is_floating_point_v<T>, std::unique_ptr<column>> operator()(
cudf::column_view const& input,
Predicate predicate,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("NAN is not supported in a Non-floating point type column");
}
};
std::unique_ptr<column> is_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto predicate = [] __device__(auto element_validity_pair) {
return element_validity_pair.second and std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, stream, mr);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto predicate = [] __device__(auto element_validity_pair) {
return !element_validity_pair.second or !std::isnan(element_validity_pair.first);
};
return cudf::type_dispatcher(input.type(), nan_dispatcher{}, input, predicate, stream, mr);
}
} // namespace detail
std::unique_ptr<column> is_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_nan(input, stream, mr);
}
std::unique_ptr<column> is_not_nan(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_not_nan(input, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/unary/math_ops.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/transform.h>
#include <cmath>
#include <type_traits>
namespace cudf {
namespace detail {
namespace {
// trig functions
struct DeviceSin {
template <typename T>
__device__ T operator()(T data)
{
return std::sin(data);
}
};
struct DeviceCos {
template <typename T>
__device__ T operator()(T data)
{
return std::cos(data);
}
};
struct DeviceTan {
template <typename T>
__device__ T operator()(T data)
{
return std::tan(data);
}
};
struct DeviceArcSin {
template <typename T>
__device__ T operator()(T data)
{
return std::asin(data);
}
};
struct DeviceArcCos {
template <typename T>
__device__ T operator()(T data)
{
return std::acos(data);
}
};
struct DeviceArcTan {
template <typename T>
__device__ T operator()(T data)
{
return std::atan(data);
}
};
struct DeviceSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::sinh(data);
}
};
struct DeviceCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::cosh(data);
}
};
struct DeviceTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::tanh(data);
}
};
struct DeviceArcSinH {
template <typename T>
__device__ T operator()(T data)
{
return std::asinh(data);
}
};
struct DeviceArcCosH {
template <typename T>
__device__ T operator()(T data)
{
return std::acosh(data);
}
};
struct DeviceArcTanH {
template <typename T>
__device__ T operator()(T data)
{
return std::atanh(data);
}
};
// exponential functions
struct DeviceExp {
template <typename T>
__device__ T operator()(T data)
{
return std::exp(data);
}
};
struct DeviceLog {
template <typename T>
__device__ T operator()(T data)
{
return std::log(data);
}
};
struct DeviceSqrt {
template <typename T>
__device__ T operator()(T data)
{
return std::sqrt(data);
}
};
struct DeviceCbrt {
template <typename T>
__device__ T operator()(T data)
{
return std::cbrt(data);
}
};
// rounding functions
struct DeviceCeil {
template <typename T>
__device__ T operator()(T data)
{
return std::ceil(data);
}
};
struct DeviceFloor {
template <typename T>
__device__ T operator()(T data)
{
return std::floor(data);
}
};
struct DeviceAbs {
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> __device__ operator()(T data)
{
return std::abs(data);
}
template <typename T>
std::enable_if_t<!std::is_signed_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
struct DeviceRInt {
template <typename T>
std::enable_if_t<std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return std::rint(data);
}
// Dummy to handle other types, will never be executed
template <typename T>
std::enable_if_t<!std::is_floating_point_v<T>, T> __device__ operator()(T data)
{
return data;
}
};
// bitwise op
struct DeviceInvert {
template <typename T>
__device__ T operator()(T data)
{
return ~data;
}
};
// logical op
struct DeviceNot {
template <typename T>
__device__ bool operator()(T data)
{
return !data;
}
};
// fixed_point ops
/*
* Ceiling is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a negative number, this is all that is needed since the ceiling
* operation is defined as the least integer greater than the value. For a positive number, we may
* need to round up if the `fixed_point` number has a fractional part. This is handled by comparing
* the truncated value to the original value and if they are not equal, the result needs to be
* incremented by `n`.
*/
template <typename T>
struct fixed_point_ceil {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a + (data > 0 && a != data ? n : 0); // add 1 if positive and not round number
}
};
/*
* Floor is calculated using integer division. When we divide by `n`, we get the integer part of
* the `fixed_point` number. For a positive number, this is all that is needed since the floor
* operation is defined as the greatest integer less than the value. For a negative number, we may
* need to round down if the `fixed_point` number has a fractional part. This is handled by
* comparing the truncated value to the original value and if they are not equal, the result needs
* to be decremented by `n`.
*/
template <typename T>
struct fixed_point_floor {
T n; // 10^-scale (value required to determine integer part of fixed_point number)
__device__ T operator()(T data)
{
T const a = (data / n) * n; // result of integer division
return a - (data < 0 && a != data ? n : 0); // subtract 1 if negative and not round number
}
};
template <typename T>
struct fixed_point_abs {
T n;
__device__ T operator()(T data) { return numeric::detail::abs(data); }
};
template <typename T, template <typename> typename FixedPointFunctor>
std::unique_ptr<column> unary_op_with(column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Type = device_storage_type_t<T>;
using FixedPointUnaryOpFunctor = FixedPointFunctor<Type>;
// When scale is >= 0 and unary_operator is CEIL or FLOOR, the unary_operation is a no-op
if (input.type().scale() >= 0 &&
(std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_ceil<Type>> ||
std::is_same_v<FixedPointUnaryOpFunctor, fixed_point_floor<Type>>))
return std::make_unique<cudf::column>(input, stream, mr);
auto result = cudf::make_fixed_width_column(input.type(),
input.size(),
detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
auto out_view = result->mutable_view();
Type n = 10;
for (int i = 1; i < -input.type().scale(); ++i) {
n *= 10;
}
thrust::transform(rmm::exec_policy(stream),
input.begin<Type>(),
input.end<Type>(),
out_view.begin<Type>(),
FixedPointUnaryOpFunctor{n});
result->set_null_count(input.null_count());
return result;
}
template <typename OutputType, typename UFN, typename InputIterator>
std::unique_ptr<cudf::column> transform_fn(InputIterator begin,
InputIterator end,
rmm::device_buffer&& null_mask,
size_type null_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = cudf::distance(begin, end);
std::unique_ptr<cudf::column> output =
make_fixed_width_column(data_type{type_to_id<OutputType>()},
size,
std::forward<rmm::device_buffer>(null_mask),
null_count,
stream,
mr);
if (size == 0) return output;
auto output_view = output->mutable_view();
thrust::transform(rmm::exec_policy(stream), begin, end, output_view.begin<OutputType>(), UFN{});
output->set_null_count(null_count);
return output;
}
template <typename T, typename UFN>
std::unique_ptr<cudf::column> transform_fn(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
auto default_mr = rmm::mr::get_current_device_resource();
// call unary-op using temporary output buffer
auto output = transform_fn<T, UFN>(dictionary_itr,
dictionary_itr + input.size(),
detail::copy_bitmask(input.parent(), stream, default_mr),
input.null_count(),
stream,
default_mr);
return cudf::dictionary::detail::encode(
output->view(), dictionary::detail::get_indices_type_for_size(output->size()), stream, mr);
}
template <typename UFN>
struct MathOpDispatcher {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys must be numeric for this operation");
}
};
template <
typename T,
std::enable_if_t<!std::is_arithmetic_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_arithmetic_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported data type for operation");
}
};
template <typename UFN>
struct BitwiseOpDispatcher {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<T, UFN>(input, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T>, std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!std::is_integral_v<T> and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return empty_like(input);
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!std::is_integral_v<T> and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename UFN>
struct LogicalOpDispatcher {
private:
template <typename T>
static constexpr bool is_supported()
{
return std::is_arithmetic_v<T> || std::is_same_v<T, bool>;
}
public:
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return transform_fn<bool, UFN>(input.begin<T>(),
input.end<T>(),
cudf::detail::copy_bitmask(input, stream, mr),
input.null_count(),
stream,
mr);
}
struct dictionary_dispatch {
template <typename T, std::enable_if_t<is_supported<T>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::dictionary_column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto dictionary_view = cudf::column_device_view::create(input.parent(), stream);
auto dictionary_itr = dictionary::detail::make_dictionary_iterator<T>(*dictionary_view);
return transform_fn<bool, UFN>(dictionary_itr,
dictionary_itr + input.size(),
cudf::detail::copy_bitmask(input.parent(), stream, mr),
input.null_count(),
stream,
mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>(), std::unique_ptr<cudf::column>> operator()(Args&&...)
{
CUDF_FAIL("dictionary keys type not supported for this operation");
}
};
template <typename T,
std::enable_if_t<!is_supported<T>() and std::is_same_v<T, dictionary32>>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) return make_empty_column(cudf::data_type{cudf::type_id::BOOL8});
auto dictionary_col = dictionary_column_view(input);
return type_dispatcher(
dictionary_col.keys().type(), dictionary_dispatch{}, dictionary_col, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!is_supported<T>() and !std::is_same_v<T, dictionary32>,
std::unique_ptr<cudf::column>>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported datatype for operation");
}
};
struct FixedPointOpDispatcher {
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("FixedPointOpDispatcher only for fixed_point");
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), std::unique_ptr<column>> operator()(
column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// clang-format off
switch (op) {
case cudf::unary_operator::CEIL: return unary_op_with<T, fixed_point_ceil>(input, stream, mr);
case cudf::unary_operator::FLOOR: return unary_op_with<T, fixed_point_floor>(input, stream, mr);
case cudf::unary_operator::ABS: return unary_op_with<T, fixed_point_abs>(input, stream, mr);
default: CUDF_FAIL("Unsupported fixed_point unary operation");
}
// clang-format on
}
};
} // namespace
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (cudf::is_fixed_point(input.type()))
return type_dispatcher(input.type(), detail::FixedPointOpDispatcher{}, input, op, stream, mr);
switch (op) {
case cudf::unary_operator::SIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSin>{}, input, stream, mr);
case cudf::unary_operator::COS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCos>{}, input, stream, mr);
case cudf::unary_operator::TAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTan>{}, input, stream, mr);
case cudf::unary_operator::ARCSIN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSin>{}, input, stream, mr);
case cudf::unary_operator::ARCCOS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCos>{}, input, stream, mr);
case cudf::unary_operator::ARCTAN:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTan>{}, input, stream, mr);
case cudf::unary_operator::SINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSinH>{}, input, stream, mr);
case cudf::unary_operator::COSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCosH>{}, input, stream, mr);
case cudf::unary_operator::TANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceTanH>{}, input, stream, mr);
case cudf::unary_operator::ARCSINH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcSinH>{}, input, stream, mr);
case cudf::unary_operator::ARCCOSH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcCosH>{}, input, stream, mr);
case cudf::unary_operator::ARCTANH:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceArcTanH>{}, input, stream, mr);
case cudf::unary_operator::EXP:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceExp>{}, input, stream, mr);
case cudf::unary_operator::LOG:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceLog>{}, input, stream, mr);
case cudf::unary_operator::SQRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceSqrt>{}, input, stream, mr);
case cudf::unary_operator::CBRT:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCbrt>{}, input, stream, mr);
case cudf::unary_operator::CEIL:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceCeil>{}, input, stream, mr);
case cudf::unary_operator::FLOOR:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceFloor>{}, input, stream, mr);
case cudf::unary_operator::ABS:
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceAbs>{}, input, stream, mr);
case cudf::unary_operator::RINT:
CUDF_EXPECTS(
(input.type().id() == type_id::FLOAT32) or (input.type().id() == type_id::FLOAT64),
"rint expects floating point values");
return cudf::type_dispatcher(
input.type(), detail::MathOpDispatcher<detail::DeviceRInt>{}, input, stream, mr);
case cudf::unary_operator::BIT_INVERT:
return cudf::type_dispatcher(
input.type(), detail::BitwiseOpDispatcher<detail::DeviceInvert>{}, input, stream, mr);
case cudf::unary_operator::NOT:
return cudf::type_dispatcher(
input.type(), detail::LogicalOpDispatcher<detail::DeviceNot>{}, input, stream, mr);
default: CUDF_FAIL("Undefined unary operation");
}
}
} // namespace detail
std::unique_ptr<cudf::column> unary_operation(cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::unary_operation(input, op, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/unary/cast_ops.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/binaryop.hpp>
#include <cudf/detail/fill.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace detail {
namespace { // anonymous namespace
template <typename _TargetT>
struct unary_cast {
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_numeric<SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element);
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
// Convert source tick counts into target tick counts without blindly truncating them
// by dividing the respective duration time periods (which may not work for time before
// UNIX epoch)
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())};
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element)};
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_duration<TargetT>()>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return TargetT{static_cast<typename TargetT::rep>(element)};
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())};
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<cudf::is_duration<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element.count());
}
template <
typename SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)};
}
};
template <typename _SourceT, typename _TargetT>
struct fixed_point_unary_cast {
numeric::scale_type scale;
using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>;
using DeviceT = device_storage_type_t<FixedPointT>;
template <
typename SourceT = _SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
}
template <
typename SourceT = _SourceT,
typename TargetT = _TargetT,
std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point<TargetT>())>* = nullptr>
__device__ inline DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
}
};
template <typename From, typename To>
constexpr inline auto is_supported_non_fixed_point_cast()
{
return cudf::is_fixed_width<To>() &&
// Disallow fixed_point here (requires different specialization)
!(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) &&
// Disallow conversions between timestamps and numeric
!(cudf::is_timestamp<From>() && is_numeric<To>()) &&
!(cudf::is_timestamp<To>() && is_numeric<From>());
}
template <typename From, typename To>
constexpr inline auto is_supported_fixed_point_cast()
{
return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) ||
(cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) ||
(cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>());
}
template <typename From, typename To>
constexpr inline auto is_supported_cast()
{
return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>();
}
template <typename From, typename To>
struct device_cast {
__device__ To operator()(From element) { return static_cast<To>(element); }
};
/**
* @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new
* @p scale
*
* @tparam T Type of the `fixed_point` column_view (`decimal32`, `decimal64` or `decimal128`)
* @param input Input `column_view`
* @param scale `scale` of the returned `column`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return std::unique_ptr<column> Returned column with new @p scale
*/
template <typename T, std::enable_if_t<is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> rescale(column_view input,
numeric::scale_type scale,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
using RepType = device_storage_type_t<T>;
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
if (input.type().scale() >= scale) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale}, stream);
return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr);
} else {
auto const diff = input.type().scale() - scale;
// The value of fixed point scalar will overflow if the scale difference is larger than the
// max digits of underlying integral type. Under this condition, the output values can be
// nothing other than zero value. Therefore, we simply return a zero column.
if (-diff > cuda::std::numeric_limits<RepType>::digits10) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale}, stream);
auto output_column = make_column_from_scalar(*scalar, input.size(), stream, mr);
if (input.nullable()) {
auto const null_mask = detail::copy_bitmask(input, stream, mr);
output_column->set_null_mask(std::move(null_mask), input.null_count());
}
return output_column;
}
RepType scalar_value = 10;
for (int i = 1; i < -diff; ++i) {
scalar_value *= 10;
}
auto const scalar = make_fixed_point_scalar<T>(scalar_value, scale_type{diff}, stream);
return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr);
}
};
template <typename _SourceT>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<TargetT>(),
unary_cast<TargetT>{});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<SourceT>;
auto const scale = numeric::scale_type{input.type().scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<DeviceT>(),
input.end<DeviceT>(),
output_mutable.begin<TargetT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_fixed_point<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<TargetT>;
auto const scale = numeric::scale_type{type.scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<DeviceT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.type() == type) {
return std::make_unique<column>(input, stream, mr); // TODO add test for this
}
return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr);
}
template <typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
not std::is_same_v<SourceT, TargetT>>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
using SourceDeviceT = device_storage_type_t<SourceT>;
using TargetDeviceT = device_storage_type_t<TargetT>;
auto casted = [&]() {
auto const size = input.size();
auto output = std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()},
size,
rmm::device_buffer{size * cudf::size_of(type), stream},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceDeviceT>(),
input.end<SourceDeviceT>(),
output_mutable.begin<TargetDeviceT>(),
device_cast<SourceDeviceT, TargetDeviceT>{});
return output;
};
if (input.type().scale() == type.scale()) return casted();
if constexpr (sizeof(SourceDeviceT) < sizeof(TargetDeviceT)) {
// device_cast BEFORE rescale when SourceDeviceT is < TargetDeviceT
auto temporary = casted();
return detail::rescale<TargetT>(*temporary, scale_type{type.scale()}, stream, mr);
} else {
// device_cast AFTER rescale when SourceDeviceT is > TargetDeviceT to avoid overflow
auto temporary = detail::rescale<SourceT>(input, scale_type{type.scale()}, stream, mr);
return detail::cast(*temporary, type, stream, mr);
}
}
template <typename TargetT,
typename SourceT = _SourceT,
std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
if (!cudf::is_fixed_width<TargetT>())
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64/128");
else if (cudf::is_fixed_point<SourceT>())
CUDF_FAIL("Currently only decimal32/64/128 to floating point/integral is supported");
else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>())
CUDF_FAIL("Timestamps can be created only from duration");
else
CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64/128");
}
};
} // anonymous namespace
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, stream, mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/quantiles/quantiles_util.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
namespace cudf {
namespace detail {
template <typename Result, typename T>
CUDF_HOST_DEVICE inline Result get_array_value(T const* devarr, size_type location)
{
T result;
#if defined(__CUDA_ARCH__)
result = devarr[location];
#else
CUDF_CUDA_TRY(cudaMemcpy(&result, devarr + location, sizeof(T), cudaMemcpyDefault));
#endif
return static_cast<Result>(result);
}
namespace interpolate {
template <typename Result, typename T>
CUDF_HOST_DEVICE inline Result linear(T lhs, T rhs, double frac)
{
// TODO: safe operation to avoid overflow/underflow
// double can fully represent int8-32 value range.
// Since the fraction part of double is 52 bits,
// double cannot fully represent int64.
// Underflow may occur when converting int64 to double
// detail: https://github.com/rapidsai/cudf/issues/1417
auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
double one_minus_frac = 1.0 - frac;
return static_cast<Result>(one_minus_frac * dlhs + frac * drhs);
}
template <typename Result, typename T>
CUDF_HOST_DEVICE inline Result midpoint(T lhs, T rhs)
{
// TODO: try std::midpoint (C++20) if available
auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
return static_cast<Result>(dlhs / 2 + drhs / 2);
}
template <typename Result>
CUDF_HOST_DEVICE inline Result midpoint(int64_t lhs, int64_t rhs)
{
// caring to avoid integer overflow and underflow between int64_t and Result( double )
int64_t half = lhs / 2 + rhs / 2;
int64_t rest = lhs % 2 + rhs % 2;
return static_cast<Result>(static_cast<Result>(half) + static_cast<Result>(rest) * 0.5);
}
template <>
CUDF_HOST_DEVICE inline int64_t midpoint(int64_t lhs, int64_t rhs)
{
// caring to avoid integer overflow
int64_t half = lhs / 2 + rhs / 2;
int64_t rest = lhs % 2 + rhs % 2;
int64_t result = half;
// rounding toward zero
result += (half >= 0 && rest != 0) ? rest / 2 : 0;
result += (half < 0 && rest != 0) ? 1 : 0;
return result;
}
} // namespace interpolate
struct quantile_index {
size_type lower;
size_type higher;
size_type nearest;
double fraction;
CUDF_HOST_DEVICE inline quantile_index(size_type count, double quantile)
{
quantile = std::min(std::max(quantile, 0.0), 1.0);
double val = quantile * (count - 1);
lower = std::floor(val);
higher = static_cast<size_type>(std::ceil(val));
nearest = static_cast<size_type>(std::nearbyint(val));
fraction = val - lower;
}
};
#ifdef __CUDACC__
#pragma nv_exec_check_disable
#endif
/* @brief computes a quantile value.
*
* Computes a value for a quantile by interpolating between two values on either
* side of the desired quantile.
*
* `get_value` must have signature: `T <T>(size_type)` where T can be
* `static_cast` to `Result`.
*
* @param[in] get_value Gets the value at a given index in range [0, size].
* @param[in] size Number of values indexed by `get_value`.
* @param[in] q Desired quantile in range [0, 1].
* @param[in] interp Strategy used to interpolate between the two values
* on either side of the desired quantile.
*
* @returns Value of the desired quantile.
*/
template <typename Result, typename ValueAccessor>
CUDF_HOST_DEVICE inline Result select_quantile(ValueAccessor get_value,
size_type size,
double q,
interpolation interp)
{
if (size < 2) { return get_value(0); }
quantile_index idx(size, q);
switch (interp) {
case interpolation::LINEAR:
return interpolate::linear<Result>(get_value(idx.lower), get_value(idx.higher), idx.fraction);
case interpolation::MIDPOINT:
return interpolate::midpoint<Result>(get_value(idx.lower), get_value(idx.higher));
case interpolation::LOWER: return static_cast<Result>(get_value(idx.lower));
case interpolation::HIGHER: return static_cast<Result>(get_value(idx.higher));
case interpolation::NEAREST: return static_cast<Result>(get_value(idx.nearest));
default: {
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid interpolation operation for quantiles.");
#else
CUDF_UNREACHABLE("Invalid interpolation operation for quantiles");
#endif
}
}
}
template <typename Result, typename Iterator>
CUDF_HOST_DEVICE inline Result select_quantile_data(Iterator begin,
size_type size,
double q,
interpolation interp)
{
if (size == 0) return static_cast<Result>(*begin);
quantile_index idx(size, q);
switch (interp) {
case interpolation::LOWER: return static_cast<Result>(*(begin + idx.lower));
case interpolation::HIGHER: return static_cast<Result>(*(begin + idx.higher));
case interpolation::NEAREST: return static_cast<Result>(*(begin + idx.nearest));
case interpolation::LINEAR:
return interpolate::linear<Result>(*(begin + idx.lower), *(begin + idx.higher), idx.fraction);
case interpolation::MIDPOINT:
return interpolate::midpoint<Result>(*(begin + idx.lower), *(begin + idx.higher));
default: {
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid interpolation operation for quantiles.");
#else
CUDF_UNREACHABLE("Invalid interpolation operation for quantiles");
#endif
}
}
}
template <typename Iterator>
CUDF_HOST_DEVICE inline bool select_quantile_validity(Iterator begin,
size_type size,
double q,
interpolation interp)
{
quantile_index idx(size, q);
switch (interp) {
case interpolation::HIGHER: return *(begin + idx.higher);
case interpolation::LOWER: return *(begin + idx.lower);
case interpolation::NEAREST: return *(begin + idx.nearest);
case interpolation::LINEAR:
case interpolation::MIDPOINT: return *(begin + idx.lower) and *(begin + idx.higher);
default: {
#ifndef __CUDA_ARCH__
CUDF_FAIL("Invalid interpolation operation for quantiles.");
#else
CUDF_UNREACHABLE("Invalid interpolation operation for quantiles");
#endif
}
}
}
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/quantiles/quantile.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <quantiles/quantiles_util.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/dictionary/detail/iterator.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#include <memory>
#include <vector>
namespace cudf {
namespace detail {
template <bool exact, typename SortMapIterator>
struct quantile_functor {
SortMapIterator ordered_indices;
size_type size;
std::vector<double> const& q;
interpolation interp;
bool retain_types;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
template <typename T>
std::enable_if_t<not std::is_arithmetic_v<T> and not cudf::is_fixed_point<T>(),
std::unique_ptr<column>>
operator()(column_view const& input)
{
CUDF_FAIL("quantile does not support non-numeric types");
}
template <typename T>
std::enable_if_t<std::is_arithmetic_v<T> or cudf::is_fixed_point<T>(), std::unique_ptr<column>>
operator()(column_view const& input)
{
using StorageType = cudf::device_storage_type_t<T>;
using ExactResult = std::conditional_t<exact and not cudf::is_fixed_point<T>(), double, T>;
using StorageResult = cudf::device_storage_type_t<ExactResult>;
auto const type =
is_fixed_point(input.type()) ? input.type() : data_type{type_to_id<StorageResult>()};
auto output = make_fixed_width_column(type, q.size(), mask_state::UNALLOCATED, stream, mr);
if (output->size() == 0) { return output; }
if (input.is_empty()) {
auto mask = cudf::detail::create_null_mask(output->size(), mask_state::ALL_NULL, stream, mr);
output->set_null_mask(std::move(mask), output->size());
return output;
}
auto d_input = column_device_view::create(input, stream);
auto d_output = mutable_column_device_view::create(output->mutable_view(), stream);
auto q_device =
cudf::detail::make_device_uvector_sync(q, stream, rmm::mr::get_current_device_resource());
if (!cudf::is_dictionary(input.type())) {
auto sorted_data =
thrust::make_permutation_iterator(input.data<StorageType>(), ordered_indices);
thrust::transform(rmm::exec_policy(stream),
q_device.begin(),
q_device.end(),
d_output->template begin<StorageResult>(),
[sorted_data, interp = interp, size = size] __device__(double q) {
return select_quantile_data<StorageResult>(sorted_data, size, q, interp);
});
} else {
auto sorted_data = thrust::make_permutation_iterator(
dictionary::detail::make_dictionary_iterator<T>(*d_input), ordered_indices);
thrust::transform(rmm::exec_policy(stream),
q_device.begin(),
q_device.end(),
d_output->template begin<StorageResult>(),
[sorted_data, interp = interp, size = size] __device__(double q) {
return select_quantile_data<StorageResult>(sorted_data, size, q, interp);
});
}
if (input.nullable()) {
auto sorted_validity = thrust::make_transform_iterator(
ordered_indices,
[input = *d_input] __device__(size_type idx) { return input.is_valid_nocheck(idx); });
auto [mask, null_count] = valid_if(
q_device.begin(),
q_device.end(),
[sorted_validity, interp = interp, size = size] __device__(double q) {
return select_quantile_validity(sorted_validity, size, q, interp);
},
stream,
mr);
output->set_null_mask(std::move(mask), null_count);
}
return output;
}
};
template <bool exact, typename SortMapIterator>
std::unique_ptr<column> quantile(column_view const& input,
SortMapIterator ordered_indices,
size_type size,
std::vector<double> const& q,
interpolation interp,
bool retain_types,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto functor = quantile_functor<exact, SortMapIterator>{
ordered_indices, size, q, interp, retain_types, stream, mr};
auto input_type = cudf::is_dictionary(input.type()) && !input.is_empty()
? dictionary_column_view(input).keys().type()
: input.type();
return type_dispatcher(input_type, functor, input);
}
std::unique_ptr<column> quantile(column_view const& input,
std::vector<double> const& q,
interpolation interp,
column_view const& indices,
bool exact,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (indices.is_empty()) {
auto begin = thrust::make_counting_iterator<size_type>(0);
if (exact) {
return quantile<true>(input, begin, input.size(), q, interp, exact, stream, mr);
} else {
return quantile<false>(input, begin, input.size(), q, interp, exact, stream, mr);
}
} else {
CUDF_EXPECTS(indices.type() == data_type{type_to_id<size_type>()},
"`indices` type must be `INT32`.");
if (exact) {
return quantile<true>(
input, indices.begin<size_type>(), indices.size(), q, interp, exact, stream, mr);
} else {
return quantile<false>(
input, indices.begin<size_type>(), indices.size(), q, interp, exact, stream, mr);
}
}
}
} // namespace detail
std::unique_ptr<column> quantile(column_view const& input,
std::vector<double> const& q,
interpolation interp,
column_view const& ordered_indices,
bool exact,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::quantile(input, q, interp, ordered_indices, exact, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src
|
rapidsai_public_repos/cudf/cpp/src/quantiles/quantiles.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <quantiles/quantiles_util.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <memory>
#include <vector>
namespace cudf {
namespace detail {
template <typename SortMapIterator>
std::unique_ptr<table> quantiles(table_view const& input,
SortMapIterator sortmap,
std::vector<double> const& q,
interpolation interp,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto quantile_idx_lookup = [sortmap, interp, size = input.num_rows()] __device__(double q) {
auto selector = [sortmap] __device__(auto idx) { return sortmap[idx]; };
return detail::select_quantile<size_type>(selector, size, q, interp);
};
auto const q_device =
cudf::detail::make_device_uvector_async(q, stream, rmm::mr::get_current_device_resource());
auto quantile_idx_iter = thrust::make_transform_iterator(q_device.begin(), quantile_idx_lookup);
return detail::gather(input,
quantile_idx_iter,
quantile_idx_iter + q.size(),
out_of_bounds_policy::DONT_CHECK,
stream,
mr);
}
std::unique_ptr<table> quantiles(table_view const& input,
std::vector<double> const& q,
interpolation interp,
cudf::sorted is_input_sorted,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (q.empty()) { return empty_like(input); }
CUDF_EXPECTS(interp == interpolation::HIGHER || interp == interpolation::LOWER ||
interp == interpolation::NEAREST,
"multi-column quantiles require a non-arithmetic interpolation strategy.");
CUDF_EXPECTS(input.num_rows() > 0, "multi-column quantiles require at least one input row.");
if (is_input_sorted == sorted::YES) {
return detail::quantiles(
input, thrust::make_counting_iterator<size_type>(0), q, interp, stream, mr);
} else {
auto sorted_idx = detail::sorted_order(
input, column_order, null_precedence, stream, rmm::mr::get_current_device_resource());
return detail::quantiles(input, sorted_idx->view().data<size_type>(), q, interp, stream, mr);
}
}
} // namespace detail
std::unique_ptr<table> quantiles(table_view const& input,
std::vector<double> const& q,
interpolation interp,
cudf::sorted is_input_sorted,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::quantiles(input,
q,
interp,
is_input_sorted,
column_order,
null_precedence,
cudf::get_default_stream(),
mr);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/quantiles
|
rapidsai_public_repos/cudf/cpp/src/quantiles/tdigest/tdigest_aggregation.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <quantiles/tdigest/tdigest_util.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/merge.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/merge.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/replace.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace tdigest {
namespace detail {
namespace {
// the most representative point within a cluster of similar
// values. {mean, weight}
// NOTE: Using a tuple here instead of a struct to take advantage of
// thrust zip iterators for output.
using centroid = thrust::tuple<double, double, bool>;
// make a centroid from a scalar with a weight of 1.
template <typename T>
struct make_centroid {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
auto const is_valid = col.is_valid(index);
auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0;
auto const weight = is_valid ? 1.0 : 0.0;
return {mean, weight, is_valid};
}
};
// make a centroid from a scalar with a weight of 1. this functor
// assumes any value index it is passed is not null
template <typename T>
struct make_centroid_no_nulls {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
return {static_cast<double>(col.element<T>(index)), 1.0, true};
}
};
// make a centroid from an input stream of mean/weight values.
struct make_weighted_centroid {
double const* mean;
double const* weight;
centroid operator() __device__(size_type index) { return {mean[index], weight[index], true}; }
};
// merge two centroids
struct merge_centroids {
centroid operator() __device__(centroid const& lhs, centroid const& rhs) const
{
bool const lhs_valid = thrust::get<2>(lhs);
bool const rhs_valid = thrust::get<2>(rhs);
if (!lhs_valid && !rhs_valid) { return {0, 0, false}; }
if (!lhs_valid) { return rhs; }
if (!rhs_valid) { return lhs; }
double const lhs_mean = thrust::get<0>(lhs);
double const rhs_mean = thrust::get<0>(rhs);
double const lhs_weight = thrust::get<1>(lhs);
double const rhs_weight = thrust::get<1>(rhs);
double const new_weight = lhs_weight + rhs_weight;
return {(lhs_mean * lhs_weight + rhs_mean * rhs_weight) / new_weight, new_weight, true};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the grouped input stream prior to
* the specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights_grouped {
size_type const* group_offsets;
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
auto const group_size = group_offsets[group_index + 1] - group_offsets[group_index];
return {f, relative_weight_index < group_size ? relative_weight_index : group_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights {
size_type const input_size;
thrust::pair<double, int> operator() __device__(double next_limit, size_type) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
return {f, relative_weight_index < input_size ? relative_weight_index : input_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes we are dealing with grouped, sorted, weighted centroids.
*/
template <typename GroupOffsetsIter>
struct nearest_value_centroid_weights {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets; // groups
size_type const* inner_offsets; // tdigests within a group
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
auto const num_weights = inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
// NOTE: as it is today, this functor will never be called for any digests that are empty, but
// I'll leave this check here for safety.
if (num_weights == 0) { return thrust::pair<double, int>{0, 0}; }
double const* group_cumulative_weights = cumulative_weights + inner_offsets[tdigest_begin];
auto const index = ((thrust::lower_bound(thrust::seq,
group_cumulative_weights,
group_cumulative_weights + num_weights,
next_limit)) -
group_cumulative_weights);
return index == 0 ? thrust::pair<double, int>{0, 0}
: thrust::pair<double, int>{group_cumulative_weights[index - 1],
static_cast<int>(index) - 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight_grouped {
cudf::device_span<size_type const> group_offsets;
cudf::device_span<size_type const> group_labels;
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const group_index = group_labels[value_index];
auto const relative_value_index = value_index - group_offsets[group_index];
return {group_index, relative_value_index, relative_value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight {
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
return {0, value_index, value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input centroids.
*
* This functor assumes we are dealing with grouped, weighted centroids.
*/
template <typename GroupLabelsIter, typename GroupOffsetsIter>
struct cumulative_centroid_weight {
double const* cumulative_weights;
GroupLabelsIter group_labels;
GroupOffsetsIter outer_offsets; // groups
cudf::device_span<size_type const> inner_offsets; // tdigests with a group
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const tdigest_index =
static_cast<size_type>(
thrust::upper_bound(thrust::seq, inner_offsets.begin(), inner_offsets.end(), value_index) -
inner_offsets.begin()) -
1;
auto const group_index = group_labels[tdigest_index];
auto const first_tdigest_index = outer_offsets[group_index];
auto const first_weight_index = inner_offsets[first_tdigest_index];
auto const relative_value_index = value_index - first_weight_index;
double const* group_cumulative_weights = cumulative_weights + first_weight_index;
return {group_index, relative_value_index, group_cumulative_weights[relative_value_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs by group index.
struct scalar_group_info_grouped {
size_type const* group_valid_counts;
size_type const* group_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
return {static_cast<double>(group_valid_counts[group_index]),
group_offsets[group_index + 1] - group_offsets[group_index],
group_offsets[group_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs
struct scalar_group_info {
double const total_weight;
size_type const size;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type) const
{
return {total_weight, size, 0};
}
};
// retrieve group info of centroid inputs by group index
template <typename GroupOffsetsIter>
struct centroid_group_info {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets;
size_type const* inner_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
// if there's no weights in this group of digests at all, return 0.
auto const group_start = inner_offsets[outer_offsets[group_index]];
auto const group_end = inner_offsets[outer_offsets[group_index + 1]];
auto const num_weights = group_end - group_start;
auto const last_weight_index = group_end - 1;
return num_weights == 0
? thrust::tuple<double, size_type, size_type>{0, num_weights, group_start}
: thrust::tuple<double, size_type, size_type>{
cumulative_weights[last_weight_index], num_weights, group_start};
}
};
struct tdigest_min {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const min = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? min : std::numeric_limits<double>::max();
}
};
struct tdigest_max {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const max = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? max : std::numeric_limits<double>::lowest();
}
};
// a monotonically increasing scale function which produces a distribution
// of centroids that is more densely packed in the middle of the input
// than at the ends.
__device__ double scale_func_k1(double quantile, double delta_norm)
{
double k = delta_norm * asin(2.0 * quantile - 1.0);
k += 1.0;
double const q = (sin(k / delta_norm) + 1.0) / 2.0;
return q;
}
// convert a single-row tdigest column to a scalar.
std::unique_ptr<scalar> to_tdigest_scalar(std::unique_ptr<column>&& tdigest,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(tdigest->size() == 1,
"Encountered invalid tdigest column when converting to scalar");
auto contents = tdigest->release();
return std::make_unique<struct_scalar>(table(std::move(contents.children)), true, stream, mr);
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated. 1 thread
* per group.
*
* This kernel is called in a two-pass style. Once to compute the per-group
* cluster sizes and total # of clusters, and once to compute the actual
* weight limits per cluster.
*
* @param delta tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total
* weight, size and start offset)
* @param group_cluster_wl Output. The set of cluster weight limits for each group.
* @param group_num_clusters Output. The number of output clusters for each input group.
* @param group_cluster_offsets Offsets per-group to the start of it's clusters
* @param has_nulls Whether or not the input contains nulls
*
*/
template <typename GroupInfo, typename NearestWeightFunc, typename CumulativeWeight>
__global__ void generate_cluster_limits_kernel(int delta,
size_type num_groups,
NearestWeightFunc nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
double* group_cluster_wl,
size_type* group_num_clusters,
size_type const* group_cluster_offsets,
bool has_nulls)
{
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const group_index = tid;
if (group_index >= num_groups) { return; }
// we will generate at most delta clusters.
double const delta_norm = static_cast<double>(delta) / (2.0 * M_PI);
double total_weight;
size_type group_size, group_start;
thrust::tie(total_weight, group_size, group_start) = group_info(group_index);
// start at the correct place based on our cluster offset.
double* cluster_wl =
group_cluster_wl ? group_cluster_wl + group_cluster_offsets[group_index] : nullptr;
// a group with nothing in it.
group_num_clusters[group_index] = 0;
if (total_weight <= 0) {
// if the input contains nulls we can potentially have a group that generates no
// clusters because -all- of the input values are null. in that case, the reduce_by_key call
// in the tdigest generation step will need a location to store the unused reduction value for
// that group of nulls. these "stubs" will be postprocessed out afterwards.
if (has_nulls) { group_num_clusters[group_index] = 1; }
return;
}
double cur_limit = 0.0;
double cur_weight = 0.0;
double next_limit = -1.0;
int last_inserted_index = -1; // group-relative index into the input stream
// compute the first cluster limit
double nearest_w;
int nearest_w_index; // group-relative index into the input stream
while (true) {
cur_weight = next_limit < 0 ? 0 : max(cur_weight + 1, nearest_w);
if (cur_weight >= total_weight) { break; }
// based on where we are closing the cluster off (not including the incoming weight),
// compute the next cluster limit
double const quantile = cur_weight / total_weight;
next_limit = total_weight * scale_func_k1(quantile, delta_norm);
// if the next limit is < the cur limit, we're past the end of the distribution, so we're done.
if (next_limit <= cur_limit) {
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = total_weight; }
group_num_clusters[group_index]++;
break;
}
// compute the weight we will be at in the input values just before closing off the current
// cluster (because adding the next value will cross the current limit).
// NOTE: can't use structured bindings here.
thrust::tie(nearest_w, nearest_w_index) = nearest_weight(next_limit, group_index);
// because of the way the scale functions work, it is possible to generate clusters
// in such a way that we end up with "gaps" where there are no input values that
// fall into a given cluster. An example would be this:
//
// cluster weight limits = 0.00003, 1.008, 3.008
//
// input values(weight) = A(1), B(2), C(3)
//
// naively inserting these values into the clusters simply by taking a lower_bound,
// we would get the following distribution of input values into those 3 clusters.
// (), (A), (B,C)
//
// whereas what we really want is:
//
// (A), (B), (C)
//
// to fix this, we will artificially adjust the output cluster limits to guarantee
// at least 1 input value will be put in each cluster during the reduction step.
// this does not affect final centroid results as we still use the "real" weight limits
// to compute subsequent clusters - the purpose is only to allow cluster selection
// during the reduction step to be trivial.
//
double adjusted_next_limit = next_limit;
int adjusted_w_index = nearest_w_index;
if ((last_inserted_index < 0) || // if we haven't inserted anything yet
(nearest_w_index ==
last_inserted_index)) { // if we land in the same bucket as the previous cap
// force the value into this bucket
adjusted_w_index = (last_inserted_index == group_size - 1)
? last_inserted_index
: max(adjusted_w_index, last_inserted_index + 1);
// the "adjusted" cluster limit must be high enough so that this value will fall in the
// bucket. NOTE: cumulative_weight expects an absolute index into the input value stream, not
// a group-relative index
[[maybe_unused]] auto [r, i, adjusted_w] = cumulative_weight(adjusted_w_index + group_start);
adjusted_next_limit = max(next_limit, adjusted_w);
// update the weight with our adjusted value.
nearest_w = adjusted_w;
}
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = adjusted_next_limit; }
last_inserted_index = adjusted_w_index;
group_num_clusters[group_index]++;
cur_limit = next_limit;
}
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated.
*
* @param delta_ tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total weight,
* size and start offset)
* @param has_nulls Whether or not the input data contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tuple containing the set of cluster weight limits for each group, a set of
* list-style offsets indicating group sizes, and the total number of clusters
*/
template <typename GroupInfo, typename NearestWeight, typename CumulativeWeight>
std::tuple<rmm::device_uvector<double>, std::unique_ptr<column>, size_type>
generate_group_cluster_info(int delta,
size_type num_groups,
NearestWeight nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
constexpr size_type block_size = 256;
cudf::detail::grid_1d const grid(num_groups, block_size);
// compute number of clusters per group
// each thread computes 1 set of clusters (# of cluster sets == # of groups)
rmm::device_uvector<size_type> group_num_clusters(num_groups, stream);
generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
nullptr,
group_num_clusters.begin(),
nullptr,
has_nulls);
// generate group cluster offsets (where the clusters for a given group start and end)
auto group_cluster_offsets = cudf::make_numeric_column(
data_type{type_id::INT32}, num_groups + 1, mask_state::UNALLOCATED, stream, mr);
auto cluster_size = cudf::detail::make_counting_transform_iterator(
0, [group_num_clusters = group_num_clusters.begin(), num_groups] __device__(size_type index) {
return index == num_groups ? 0 : group_num_clusters[index];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
cluster_size,
cluster_size + num_groups + 1,
group_cluster_offsets->mutable_view().begin<size_type>(),
0);
// total # of clusters
size_type total_clusters =
cudf::detail::get_value<size_type>(group_cluster_offsets->view(), num_groups, stream);
// fill in the actual cluster weight limits
rmm::device_uvector<double> group_cluster_wl(total_clusters, stream);
generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
group_cluster_wl.begin(),
group_num_clusters.begin(),
group_cluster_offsets->view().begin<size_type>(),
has_nulls);
return {std::move(group_cluster_wl),
std::move(group_cluster_offsets),
static_cast<size_type>(total_clusters)};
}
std::unique_ptr<column> build_output_column(size_type num_rows,
std::unique_ptr<column>&& means,
std::unique_ptr<column>&& weights,
std::unique_ptr<column>&& offsets,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// whether or not this weight is a stub
auto is_stub_weight = [weights = weights->view().begin<double>()] __device__(size_type i) {
return weights[i] == 0;
};
// whether or not this particular tdigest is a stub
auto is_stub_digest = [offsets = offsets->view().begin<size_type>(), is_stub_weight] __device__(
size_type i) { return is_stub_weight(offsets[i]) ? 1 : 0; };
size_type const num_stubs = [&]() {
if (!has_nulls) { return 0; }
auto iter = cudf::detail::make_counting_transform_iterator(0, is_stub_digest);
return thrust::reduce(rmm::exec_policy(stream), iter, iter + num_rows);
}();
// if there are no stub tdigests, we can return immediately.
if (num_stubs == 0) {
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(means),
std::move(weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
// otherwise we need to strip out the stubs.
auto remove_stubs = [&](column_view const& col, size_type num_stubs) {
auto result = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, col.size() - num_stubs, mask_state::UNALLOCATED, stream, mr);
thrust::remove_copy_if(rmm::exec_policy(stream),
col.begin<double>(),
col.end<double>(),
thrust::make_counting_iterator(0),
result->mutable_view().begin<double>(),
is_stub_weight);
return result;
};
// remove from the means and weights column
auto _means = remove_stubs(*means, num_stubs);
auto _weights = remove_stubs(*weights, num_stubs);
// adjust offsets.
rmm::device_uvector<size_type> sizes(num_rows, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_rows,
sizes.begin(),
[offsets = offsets->view().begin<size_type>()] __device__(size_type i) {
return offsets[i + 1] - offsets[i];
});
auto iter = cudf::detail::make_counting_transform_iterator(
0, [sizes = sizes.begin(), is_stub_digest, num_rows] __device__(size_type i) {
return i == num_rows || is_stub_digest(i) ? 0 : sizes[i];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
iter,
iter + num_rows + 1,
offsets->mutable_view().begin<size_type>(),
0);
// assemble final column
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(_means),
std::move(_weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
/**
* @brief Compute a column of tdigests.
*
* Assembles the output tdigest column based on the specified delta, a stream of
* input values (either scalar or centroids), and an assortment of per-group
* clustering information.
*
* This function is effectively just a reduce_by_key that performs a reduction
* from input values -> centroid clusters as defined by the cluster weight
* boundaries.
*
* @param delta tdigest compression level
* @param values_begin Beginning of the range of input values.
* @param values_end End of the range of input values.
* @param cumulative_weight Functor which returns cumulative weight and group information for
* an absolute input value index.
* @param min_col Column containing the minimum value per group.
* @param max_col Column containing the maximum value per group.
* @param group_cluster_wl Cluster weight limits for each group.
* @param group_cluster_offsets R-value reference of offsets into the cluster weight limits.
* @param total_clusters Total number of clusters in all groups.
* @param has_nulls Whether or not the input contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tdigest column with 1 row per output tdigest.
*/
template <typename CentroidIter, typename CumulativeWeight>
std::unique_ptr<column> compute_tdigests(int delta,
CentroidIter centroids_begin,
CentroidIter centroids_end,
CumulativeWeight group_cumulative_weight,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
rmm::device_uvector<double> const& group_cluster_wl,
std::unique_ptr<column>&& group_cluster_offsets,
size_type total_clusters,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// the output for each group is a column of data that represents the tdigest. since we want 1 row
// per group, each row will be a list the length of the tdigest for that group. so our output
// column is of the form:
// struct {
// centroids for the digest
// list {
// struct {
// double // mean
// double // weight
// }
// }
// double // min
// double // max
// }
//
if (total_clusters == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
// each input group represents an individual tdigest. within each tdigest, we want the keys
// to represent cluster indices (for example, if a tdigest had 100 clusters, the keys should fall
// into the range 0-99). But since we have multiple tdigests, we need to keep the keys unique
// between the groups, so we add our group start offset.
auto keys = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[delta,
group_cluster_wl = group_cluster_wl.data(),
group_cluster_offsets = group_cluster_offsets->view().begin<size_type>(),
group_cumulative_weight] __device__(size_type value_index) -> size_type {
// get group index, relative value index within the group and cumulative weight.
[[maybe_unused]] auto [group_index, relative_value_index, cumulative_weight] =
group_cumulative_weight(value_index);
auto const num_clusters =
group_cluster_offsets[group_index + 1] - group_cluster_offsets[group_index];
if (num_clusters == 0) { return group_cluster_offsets[group_index]; }
// compute start of cluster weight limits for this group
double const* weight_limits = group_cluster_wl + group_cluster_offsets[group_index];
// local cluster index
size_type const group_cluster_index =
min(num_clusters - 1,
static_cast<size_type>(
thrust::lower_bound(
thrust::seq, weight_limits, weight_limits + num_clusters, cumulative_weight) -
weight_limits));
// add the cluster offset to generate a globally unique key
return group_cluster_index + group_cluster_offsets[group_index];
});
// mean and weight data
auto centroid_means = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
auto centroid_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
// reduce the centroids down by key.
cudf::mutable_column_view mean_col(*centroid_means);
cudf::mutable_column_view weight_col(*centroid_weights);
// reduce the centroids into the clusters
auto output = thrust::make_zip_iterator(thrust::make_tuple(
mean_col.begin<double>(), weight_col.begin<double>(), thrust::make_discard_iterator()));
auto const num_values = std::distance(centroids_begin, centroids_end);
thrust::reduce_by_key(rmm::exec_policy(stream),
keys,
keys + num_values, // keys
centroids_begin, // values
thrust::make_discard_iterator(), // key output
output, // output
thrust::equal_to{}, // key equality check
merge_centroids{});
// create final tdigest column
return build_output_column(group_cluster_offsets->size() - 1,
std::move(centroid_means),
std::move(centroid_weights),
std::move(group_cluster_offsets),
std::move(min_col),
std::move(max_col),
has_nulls,
stream,
mr);
}
// return the min/max value of scalar inputs by group index
template <typename T>
struct get_scalar_minmax_grouped {
column_device_view const col;
device_span<size_type const> group_offsets;
size_type const* group_valid_counts;
__device__ thrust::tuple<double, double> operator()(size_type group_index)
{
auto const valid_count = group_valid_counts[group_index];
return valid_count > 0
? thrust::make_tuple(
static_cast<double>(col.element<T>(group_offsets[group_index])),
static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
// return the min/max value of scalar inputs
template <typename T>
struct get_scalar_minmax {
column_device_view const col;
size_type const valid_count;
__device__ thrust::tuple<double, double> operator()(size_type)
{
return valid_count > 0
? thrust::make_tuple(static_cast<double>(col.element<T>(0)),
static_cast<double>(col.element<T>(valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
struct typed_group_tdigest {
template <typename T,
std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// first, generate cluster weight information for each input group
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_scalar_weights_grouped{group_offsets.begin()},
scalar_group_info_grouped{group_valid_counts.begin(), group_offsets.begin()},
cumulative_scalar_weight_grouped{group_offsets, group_labels},
col.null_count() > 0,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_groups,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax_grouped<T>{*d_col, group_offsets, group_valid_counts.begin()});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid<T>{*d_col});
// generate the final tdigest
return compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + col.size(),
cumulative_scalar_weight_grouped{group_offsets, group_labels},
std::move(min_col),
std::move(max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
col.null_count() > 0,
stream,
mr);
}
template <typename T,
typename... Args,
std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
struct typed_reduce_tdigest {
// this function assumes col is sorted in ascending order with nulls at the end
template <
typename T,
typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// treat this the same as the groupby path with a single group. Note: even though
// there is only 1 group there are still multiple keys within the group that represent
// the clustering of (N input values) -> (1 output centroid), so the final computation
// remains a reduce_by_key() and not a reduce().
//
// additionally we get a few optimizations.
// - since we only ever have 1 "group" that is sorted with nulls at the end,
// we can simply process just the non-null values and act as if the column
// is non-nullable, allowing us to process fewer values than if we were doing a groupby.
//
// - several of the functors used during the reduction are cheaper than during a groupby.
auto const valid_count = col.size() - col.null_count();
// first, generate cluster weight information for each input group
auto [cluster_wl, cluster_offsets, total_clusters] =
generate_group_cluster_info(delta,
1,
nearest_value_scalar_weights{valid_count},
scalar_group_info{static_cast<double>(valid_count), valid_count},
cumulative_scalar_weight{},
false,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + 1,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax<T>{*d_col, valid_count});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid_no_nulls<T>{*d_col});
// generate the final tdigest and wrap it in a struct_scalar
return to_tdigest_scalar(compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + valid_count,
cumulative_scalar_weight{},
std::move(min_col),
std::move(max_col),
cluster_wl,
std::move(cluster_offsets),
total_clusters,
false,
stream,
mr),
stream,
mr);
}
template <
typename T,
typename... Args,
typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
// utility for merge_tdigests.
template <typename GroupOffsetsIter>
struct group_num_weights_func {
GroupOffsetsIter outer_offsets;
size_type const* inner_offsets;
__device__ size_type operator()(size_type group_index)
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
return inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
}
};
// utility for merge_tdigests.
struct group_is_empty {
__device__ bool operator()(size_type group_size) { return group_size == 0; }
};
// utility for merge_tdigests.
template <typename GroupLabelsIter>
struct group_key_func {
GroupLabelsIter group_labels;
size_type const* inner_offsets;
size_type num_inner_offsets;
__device__ size_type operator()(size_type index)
{
// what -original- tdigest index this absolute index corresponds to
auto const iter = thrust::prev(
thrust::upper_bound(thrust::seq, inner_offsets, inner_offsets + num_inner_offsets, index));
auto const tdigest_index = thrust::distance(inner_offsets, iter);
// what group index the original tdigest belongs to
return group_labels[tdigest_index];
}
};
template <typename HGroupOffsetIter, typename GroupOffsetIter, typename GroupLabelIter>
std::unique_ptr<column> merge_tdigests(tdigest_column_view const& tdv,
HGroupOffsetIter h_outer_offsets,
GroupOffsetIter group_offsets,
GroupLabelIter group_labels,
size_t num_group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// thrust::merge and thrust::merge_by_key don't provide what we need. What we would need is an
// algorithm like a super-merge that takes two layers of keys: one which identifies the outer
// grouping of tdigests, and one which identifies the inner groupings of the tdigests within the
// outer groups.
// TODO: investigate replacing the iterative merge with a single stable_sort_by_key.
// bring tdigest offsets back to the host
auto tdigest_offsets = tdv.centroids().offsets();
std::vector<size_type> h_inner_offsets(tdigest_offsets.size());
cudaMemcpyAsync(h_inner_offsets.data(),
tdigest_offsets.begin<size_type>(),
sizeof(size_type) * tdigest_offsets.size(),
cudaMemcpyDefault,
stream);
stream.synchronize();
// extract all means and weights into a table
cudf::table_view tdigests_unsliced({tdv.means(), tdv.weights()});
// generate the merged (but not yet compressed) tdigests for each group.
std::vector<std::unique_ptr<table>> tdigests;
tdigests.reserve(num_groups);
std::transform(h_outer_offsets,
h_outer_offsets + num_groups,
std::next(h_outer_offsets),
std::back_inserter(tdigests),
[&](auto tdigest_start, auto tdigest_end) {
// the range of tdigests in this group
auto const num_tdigests = tdigest_end - tdigest_start;
// slice each tdigest from the input
std::vector<table_view> unmerged_tdigests;
unmerged_tdigests.reserve(num_tdigests);
auto offset_iter = std::next(h_inner_offsets.begin(), tdigest_start);
std::transform(
offset_iter,
offset_iter + num_tdigests,
std::next(offset_iter),
std::back_inserter(unmerged_tdigests),
[&](size_type start, size_type end) {
return cudf::detail::slice(tdigests_unsliced, {start, end}, stream);
});
// merge
return cudf::detail::merge(unmerged_tdigests,
{0},
{order::ASCENDING},
{},
stream,
rmm::mr::get_current_device_resource());
});
// generate min and max values
auto merged_min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto min_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.min_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_min{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
min_iter,
thrust::make_discard_iterator(),
merged_min_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::minimum{});
auto merged_max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.max_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_max{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
max_iter,
thrust::make_discard_iterator(),
merged_max_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::maximum{});
// for any empty groups, set the min and max to be 0. not technically necessary but it makes
// testing simpler.
auto group_num_weights = cudf::detail::make_counting_transform_iterator(
0,
group_num_weights_func<decltype(group_offsets)>{group_offsets,
tdigest_offsets.begin<size_type>()});
thrust::replace_if(rmm::exec_policy(stream),
merged_min_col->mutable_view().begin<double>(),
merged_min_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
thrust::replace_if(rmm::exec_policy(stream),
merged_max_col->mutable_view().begin<double>(),
merged_max_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
// concatenate all the merged tdigests back into one table.
std::vector<table_view> tdigest_views;
tdigest_views.reserve(num_groups);
std::transform(tdigests.begin(),
tdigests.end(),
std::back_inserter(tdigest_views),
[](std::unique_ptr<table> const& t) { return t->view(); });
auto merged =
cudf::detail::concatenate(tdigest_views, stream, rmm::mr::get_current_device_resource());
// generate cumulative weights
auto merged_weights = merged->get_column(1).view();
auto cumulative_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, merged_weights.size(), mask_state::UNALLOCATED, stream);
auto keys = cudf::detail::make_counting_transform_iterator(
0,
group_key_func<decltype(group_labels)>{
group_labels, tdigest_offsets.begin<size_type>(), tdigest_offsets.size()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
keys,
keys + cumulative_weights->size(),
merged_weights.begin<double>(),
cumulative_weights->mutable_view().begin<double>());
auto const delta = max_centroids;
// generate cluster info
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_centroid_weights<decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
centroid_group_info<decltype(group_offsets)>{cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<size_type>(), static_cast<size_t>(tdigest_offsets.size())}},
false,
stream,
mr);
// input centroid values
auto centroids = cudf::detail::make_counting_transform_iterator(
0,
make_weighted_centroid{merged->get_column(0).view().begin<double>(),
merged_weights.begin<double>()});
// compute the tdigest
return compute_tdigests(
delta,
centroids,
centroids + merged->num_rows(),
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<size_type>(), static_cast<size_t>(tdigest_offsets.size())}},
std::move(merged_min_col),
std::move(merged_max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
false,
stream,
mr);
}
} // anonymous namespace
std::unique_ptr<scalar> reduce_tdigest(column_view const& col,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
// since this isn't coming out of a groupby, we need to sort the inputs in ascending
// order with nulls at the end.
table_view t({col});
auto sorted = cudf::detail::sort(
t, {order::ASCENDING}, {null_order::AFTER}, stream, rmm::mr::get_current_device_resource());
auto const delta = max_centroids;
return cudf::type_dispatcher(
col.type(), typed_reduce_tdigest{}, sorted->get_column(0), delta, stream, mr);
}
std::unique_ptr<scalar> reduce_merge_tdigest(column_view const& input,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (input.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
auto h_group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()](size_type i) { return i == 0 ? 0 : size; });
auto group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()] __device__(size_type i) { return i == 0 ? 0 : size; });
auto group_labels = thrust::make_constant_iterator(0);
return to_tdigest_scalar(merge_tdigests(tdv,
h_group_offsets,
group_offsets,
group_labels,
input.size(),
1,
max_centroids,
stream,
mr),
stream,
mr);
}
std::unique_ptr<column> group_tdigest(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
auto const delta = max_centroids;
return cudf::type_dispatcher(col.type(),
typed_group_tdigest{},
col,
group_offsets,
group_labels,
group_valid_counts,
num_groups,
delta,
stream,
mr);
}
std::unique_ptr<column> group_merge_tdigest(column_view const& input,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (num_groups == 0 || input.size() == 0) {
return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr);
}
// bring group offsets back to the host
std::vector<size_type> h_group_offsets(group_offsets.size());
cudaMemcpyAsync(h_group_offsets.data(),
group_offsets.begin(),
sizeof(size_type) * group_offsets.size(),
cudaMemcpyDefault,
stream);
return merge_tdigests(tdv,
h_group_offsets.begin(),
group_offsets.data(),
group_labels.data(),
group_labels.size(),
num_groups,
max_centroids,
stream,
mr);
}
} // namespace detail
} // namespace tdigest
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/src/quantiles
|
rapidsai_public_repos/cudf/cpp/src/quantiles/tdigest/tdigest_util.cuh
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/iterator.cuh>
#include <cudf/tdigest/tdigest_column_view.hpp>
namespace cudf {
namespace tdigest {
namespace detail {
/**
* @brief Functor to compute the size of each tdigest of a column
*/
struct tdigest_size_fn {
size_type const* offsets; ///< Offsets of the t-digest column
/**
* @brief Returns size of the each tdigest in the column
*
* @param tdigest_index Index of the tdigest in the column
* @return Size of the tdigest
*/
__device__ size_type operator()(size_type tdigest_index)
{
return offsets[tdigest_index + 1] - offsets[tdigest_index];
}
};
/**
* @brief Returns an iterator that returns the size of each tdigest
* in the column (each row is 1 digest)
*
* @return An iterator that returns the size of each tdigest in the column
*/
inline auto size_begin(tdigest_column_view const& tdv)
{
return cudf::detail::make_counting_transform_iterator(
0, tdigest_size_fn{tdv.centroids().offsets_begin()});
}
} // namespace detail
} // namespace tdigest
} // namespace cudf
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.