repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/span.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/device_vector.hpp> #include <thrust/detail/raw_pointer_cast.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/memory.h> #include <cstddef> #include <limits> #include <type_traits> namespace cudf { /// A constant used to differentiate std::span of static and dynamic extent constexpr std::size_t dynamic_extent = std::numeric_limits<std::size_t>::max(); namespace detail { /** * @brief C++20 std::span with reduced feature set. * */ template <typename T, std::size_t Extent, typename Derived> class span_base { static_assert(Extent == dynamic_extent, "Only dynamic extent is supported"); public: using element_type = T; ///< The type of the elements in the span using value_type = std::remove_cv<T>; ///< Stored value type using size_type = std::size_t; ///< The type used for the size of the span using difference_type = std::ptrdiff_t; ///< std::ptrdiff_t using pointer = T*; ///< The type of the pointer returned by data() using iterator = T*; ///< The type of the iterator returned by begin() using const_pointer = T const*; ///< The type of the pointer returned by data() const using reference = T&; ///< The type of the reference returned by operator[](size_type) using const_reference = T const&; ///< The type of the reference returned by operator[](size_type) const static constexpr std::size_t extent = Extent; ///< The extent of the span constexpr span_base() noexcept {} /** * @brief Constructs a span from a pointer and a size. * * @param data Pointer to the first element in the span. * @param size The number of elements in the span. */ constexpr span_base(pointer data, size_type size) : _data(data), _size(size) {} // constexpr span_base(pointer begin, pointer end) : _data(begin), _size(end - begin) {} constexpr span_base(span_base const&) noexcept = default; ///< Copy constructor /** * @brief Copy assignment operator. * * @return Reference to this span. */ constexpr span_base& operator=(span_base const&) noexcept = default; // not noexcept due to undefined behavior when size = 0 /** * @brief Returns a reference to the first element in the span. * * Calling front on an empty span results in undefined behavior. * * @return Reference to the first element in the span */ constexpr reference front() const { return _data[0]; } // not noexcept due to undefined behavior when size = 0 /** * @brief Returns a reference to the last element in the span. * * Calling last on an empty span results in undefined behavior. * * @return Reference to the last element in the span */ constexpr reference back() const { return _data[_size - 1]; } // not noexcept due to undefined behavior when idx < 0 || idx >= size /** * @brief Returns a reference to the idx-th element of the sequence. * * The behavior is undefined if idx is out of range (i.e., if it is greater than or equal to * size()). * * @param idx the index of the element to access * @return A reference to the idx-th element of the sequence, i.e., `data()[idx]` */ constexpr reference operator[](size_type idx) const { return _data[idx]; } /** * @brief Returns an iterator to the first element of the span. * * If the span is empty, the returned iterator will be equal to end(). * * @return An iterator to the first element of the span */ constexpr iterator begin() const noexcept { return _data; } /** * @brief Returns an iterator to the element following the last element of the span. * * This element acts as a placeholder; attempting to access it results in undefined behavior. * * @return An iterator to the element following the last element of the span */ constexpr iterator end() const noexcept { return _data + _size; } /** * @brief Returns a pointer to the beginning of the sequence. * * @return A pointer to the first element of the span */ constexpr pointer data() const noexcept { return _data; } /** * @brief Returns the number of elements in the span. * * @return The number of elements in the span */ [[nodiscard]] constexpr size_type size() const noexcept { return _size; } /** * @brief Returns the size of the sequence in bytes. * * @return The size of the sequence in bytes */ [[nodiscard]] constexpr size_type size_bytes() const noexcept { return sizeof(T) * _size; } /** * @brief Checks if the span is empty. * * @return True if the span is empty, false otherwise */ [[nodiscard]] constexpr bool empty() const noexcept { return _size == 0; } /** * @brief Obtains a subspan consisting of the first N elements of the sequence * * @param count Number of elements from the beginning of this span to put in the subspan. * @return A subspan of the first N elements of the sequence */ constexpr Derived first(size_type count) const noexcept { return Derived(_data, count); } /** * @brief Obtains a subspan consisting of the last N elements of the sequence * * @param count Number of elements from the end of this span to put in the subspan * @return A subspan of the last N elements of the sequence */ constexpr Derived last(size_type count) const noexcept { return Derived(_data + _size - count, count); } /** * @brief Obtains a span that is a view over the `count` elements of this span starting at offset * * @param offset The offset of the first element in the subspan * @param count The number of elements in the subspan * @return A subspan of the sequence, of requested count and offset */ constexpr Derived subspan(size_type offset, size_type count) const noexcept { return Derived(_data + offset, count); } private: pointer _data{nullptr}; size_type _size{0}; }; } // namespace detail // ===== host_span ================================================================================= template <typename T> struct is_host_span_supported_container : std::false_type {}; template <typename T, typename Alloc> struct is_host_span_supported_container< // std::vector<T, Alloc>> : std::true_type {}; template <typename T, typename Alloc> struct is_host_span_supported_container< // thrust::host_vector<T, Alloc>> : std::true_type {}; template <typename T, typename Alloc> struct is_host_span_supported_container< // std::basic_string<T, std::char_traits<T>, Alloc>> : std::true_type {}; /** * @brief C++20 std::span with reduced feature set. * */ template <typename T, std::size_t Extent = cudf::dynamic_extent> struct host_span : public cudf::detail::span_base<T, Extent, host_span<T, Extent>> { using base = cudf::detail::span_base<T, Extent, host_span<T, Extent>>; ///< Base type using base::base; constexpr host_span() noexcept : base() {} // required to compile on centos /// Constructor from container /// @param in The container to construct the span from template < typename C, // Only supported containers of types convertible to T std::enable_if_t<is_host_span_supported_container<C>::value && std::is_convertible_v<std::remove_pointer_t<decltype(thrust::raw_pointer_cast( std::declval<C&>().data()))> (*)[], T (*)[]>>* = nullptr> constexpr host_span(C& in) : base(thrust::raw_pointer_cast(in.data()), in.size()) { } /// Constructor from const container /// @param in The container to construct the span from template < typename C, // Only supported containers of types convertible to T std::enable_if_t<is_host_span_supported_container<C>::value && std::is_convertible_v<std::remove_pointer_t<decltype(thrust::raw_pointer_cast( std::declval<C&>().data()))> (*)[], T (*)[]>>* = nullptr> constexpr host_span(C const& in) : base(thrust::raw_pointer_cast(in.data()), in.size()) { } // Copy construction to support const conversion /// @param other The span to copy template <typename OtherT, std::size_t OtherExtent, std::enable_if_t<(Extent == OtherExtent || Extent == dynamic_extent) && std::is_convertible_v<OtherT (*)[], T (*)[]>, void>* = nullptr> constexpr host_span(host_span<OtherT, OtherExtent> const& other) noexcept : base(other.data(), other.size()) { } }; // ===== device_span =============================================================================== template <typename T> struct is_device_span_supported_container : std::false_type {}; template <typename T, typename Alloc> struct is_device_span_supported_container< // thrust::device_vector<T, Alloc>> : std::true_type {}; template <typename T> struct is_device_span_supported_container< // rmm::device_vector<T>> : std::true_type {}; template <typename T> struct is_device_span_supported_container< // rmm::device_uvector<T>> : std::true_type {}; /** * @brief Device version of C++20 std::span with reduced feature set. * */ template <typename T, std::size_t Extent = cudf::dynamic_extent> struct device_span : public cudf::detail::span_base<T, Extent, device_span<T, Extent>> { using base = cudf::detail::span_base<T, Extent, device_span<T, Extent>>; ///< Base type using base::base; constexpr device_span() noexcept : base() {} // required to compile on centos /// Constructor from container /// @param in The container to construct the span from template < typename C, // Only supported containers of types convertible to T std::enable_if_t<is_device_span_supported_container<C>::value && std::is_convertible_v<std::remove_pointer_t<decltype(thrust::raw_pointer_cast( std::declval<C&>().data()))> (*)[], T (*)[]>>* = nullptr> constexpr device_span(C& in) : base(thrust::raw_pointer_cast(in.data()), in.size()) { } /// Constructor from const container /// @param in The container to construct the span from template < typename C, // Only supported containers of types convertible to T std::enable_if_t<is_device_span_supported_container<C>::value && std::is_convertible_v<std::remove_pointer_t<decltype(thrust::raw_pointer_cast( std::declval<C&>().data()))> (*)[], T (*)[]>>* = nullptr> constexpr device_span(C const& in) : base(thrust::raw_pointer_cast(in.data()), in.size()) { } // Copy construction to support const conversion /// @param other The span to copy template <typename OtherT, std::size_t OtherExtent, std::enable_if_t<(Extent == OtherExtent || Extent == dynamic_extent) && std::is_convertible_v<OtherT (*)[], T (*)[]>, void>* = nullptr> constexpr device_span(device_span<OtherT, OtherExtent> const& other) noexcept : base(other.data(), other.size()) { } }; namespace detail { /** * @brief Generic class for row-major 2D spans. Not compliant with STL container semantics/syntax. * * The index operator returns the corresponding row. */ template <typename T, template <typename, std::size_t> typename RowType> class base_2dspan { public: using size_type = std::pair<size_t, size_t>; ///< Type used to represent the dimension of the span constexpr base_2dspan() noexcept = default; /** * @brief Constructor a 2D span * * @param data Pointer to the data * @param rows Number of rows * @param columns Number of columns */ constexpr base_2dspan(T* data, size_t rows, size_t columns) noexcept : _data{data}, _size{rows, columns} { } /** * @brief Constructor a 2D span * * @param data Pointer to the data * @param size Size of the 2D span as pair */ base_2dspan(T* data, size_type size) noexcept : _data{data}, _size{size} {} /** * @brief Returns a pointer to the beginning of the sequence. * * @return A pointer to the first element of the span */ constexpr auto data() const noexcept { return _data; } /** * @brief Returns the size in the span as pair. * * @return pair representing rows and columns size of the span */ constexpr auto size() const noexcept { return _size; } /** * @brief Returns the number of elements in the span. * * @return Number of elements in the span */ constexpr auto count() const noexcept { return size().first * size().second; } /** * @brief Checks if the span is empty. * * @return True if the span is empty, false otherwise */ [[nodiscard]] constexpr bool is_empty() const noexcept { return count() == 0; } /** * @brief Returns flattened index of the element at the specified 2D position. * * @param row The row index * @param column The column index * @param size The size of the 2D span as pair * @return The flattened index of the element at the specified 2D position */ static constexpr size_t flatten_index(size_t row, size_t column, size_type size) noexcept { return row * size.second + column; } /** * @brief Returns a reference to the row-th element of the sequence. * * The behavior is undefined if row is out of range (i.e., if it is greater than or equal to * size()). * * @param row the index of the element to access * @return A reference to the row-th element of the sequence, i.e., `data()[row]` */ constexpr RowType<T, dynamic_extent> operator[](size_t row) const { return {this->data() + flatten_index(row, 0, this->size()), this->size().second}; } /** * @brief Returns a reference to the first element in the span. * * Calling front() on an empty span results in undefined behavior. * * @return Reference to the first element in the span */ [[nodiscard]] constexpr RowType<T, dynamic_extent> front() const { return (*this)[0]; } /** * @brief Returns a reference to the last element in the span. * * Calling back() on an empty span results in undefined behavior. * * @return Reference to the last element in the span */ [[nodiscard]] constexpr RowType<T, dynamic_extent> back() const { return (*this)[size().first - 1]; } /** * @brief Obtains a 2D span that is a view over the `num_rows` rows of this span starting at * `first_row` * * @param first_row The first row in the subspan * @param num_rows The number of rows in the subspan * @return A subspan of the sequence, of requested starting `first_row` and `num_rows` */ constexpr base_2dspan subspan(size_t first_row, size_t num_rows) const noexcept { return base_2dspan( _data + flatten_index(first_row, 0, this->size()), num_rows, this->size().second); } /** * @brief Returns a flattened span of the 2D span. * * @return A flattened span of the 2D span */ constexpr RowType<T, dynamic_extent> flat_view() { return {this->data(), this->size().first * this->size().second}; } /** * @brief Construct a 2D span from another 2D span of convertible type * * @tparam OtherT Type of the other 2D span * @tparam OtherRowType Type of the row of the other 2D span * @param other The other 2D span */ template <typename OtherT, template <typename, size_t> typename OtherRowType, std::enable_if_t<std::is_convertible_v<OtherRowType<OtherT, dynamic_extent>, RowType<T, dynamic_extent>>, void>* = nullptr> constexpr base_2dspan(base_2dspan<OtherT, OtherRowType> const& other) noexcept : _data{other.data()}, _size{other.size()} { } protected: T* _data = nullptr; ///< pointer to the first element size_type _size{0, 0}; ///< rows, columns }; /** * @brief Alias for the 2D span for host data. * * Index operator returns rows as `host_span`. */ template <class T> using host_2dspan = base_2dspan<T, host_span>; /** * @brief Alias for the 2D span for device data. * * Index operator returns rows as `device_span`. */ template <class T> using device_2dspan = base_2dspan<T, device_span>; } // namespace detail } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/traits.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cuda/std/atomic> namespace cudf { /** * @addtogroup utility_types * @{ * @file */ /** * @brief Indicates whether the type `T` has support for atomics * * @tparam T The type to verify * @return true `T` has support for atomics * @return false `T` no support for atomics */ template <typename T> constexpr inline bool has_atomic_support() { return cuda::std::atomic<T>::is_always_lock_free; } struct has_atomic_support_impl { template <typename T> constexpr bool operator()() { return has_atomic_support<T>(); } }; /** * @brief Indicates whether `type` has support for atomics * * @param type The `data_type` to verify * @return true `type` has support for atomics * @return false `type` no support for atomics */ constexpr inline bool has_atomic_support(data_type type) { return cudf::type_dispatcher(type, has_atomic_support_impl{}); } /** @} */ } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/logger.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <spdlog/spdlog.h> namespace cudf { /** * @brief Returns the global logger. * * This is a global instance of a spdlog logger. It can be used to configure logging behavior in * libcudf. * * Examples: * @code{.cpp} * // Turn off logging at runtime * cudf::logger().set_level(spdlog::level::off); * // Add a stdout sink to the logger * cudf::logger().sinks().push_back(std::make_shared<spdlog::sinks::stdout_sink_mt>()); * // Replace the default sink * cudf::logger().sinks() ={std::make_shared<spdlog::sinks::stderr_sink_mt>()}; * @endcode * * Note: Changes to the sinks are not thread safe and should only be done during global * initialization. * * @return spdlog::logger& The logger. */ spdlog::logger& logger(); } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/default_stream.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/utilities/default_stream.hpp> #include <rmm/cuda_stream.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { /** * @brief Get the current default stream * * @return The current default stream. */ rmm::cuda_stream_view const get_default_stream(); /** * @brief Check if per-thread default stream is enabled. * * @return true if PTDS is enabled, false otherwise. */ bool is_ptds_enabled(); } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/error.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/utilities/stacktrace.hpp> #include <cuda.h> #include <cuda_runtime_api.h> #include <stdexcept> #include <string> #include <type_traits> namespace cudf { /** * @addtogroup utility_error * @{ * @file */ /** * @brief The struct to store the current stacktrace upon its construction. */ struct stacktrace_recorder { stacktrace_recorder() // Exclude the current stackframe, as it is this constructor. : _stacktrace{cudf::detail::get_stacktrace(cudf::detail::capture_last_stackframe::NO)} { } public: /** * @brief Get the stored stacktrace captured during object construction. * * @return The pointer to a null-terminated string storing the output stacktrace */ char const* stacktrace() const { return _stacktrace.c_str(); } protected: std::string const _stacktrace; //!< The whole stacktrace stored as one string. }; /** * @brief Exception thrown when logical precondition is violated. * * This exception should not be thrown directly and is instead thrown by the * CUDF_EXPECTS macro. */ struct logic_error : public std::logic_error, public stacktrace_recorder { /** * @brief Constructs a logic_error with the error message. * * @param message Message to be associated with the exception */ logic_error(char const* const message) : std::logic_error(message) {} /** * @brief Construct a new logic error object with error message * * @param message Message to be associated with the exception */ logic_error(std::string const& message) : std::logic_error(message) {} // TODO Add an error code member? This would be useful for translating an // exception to an error code in a pure-C API }; /** * @brief Exception thrown when a CUDA error is encountered. * */ struct cuda_error : public std::runtime_error, public stacktrace_recorder { /** * @brief Construct a new cuda error object with error message and code. * * @param message Error message * @param error CUDA error code */ cuda_error(std::string const& message, cudaError_t const& error) : std::runtime_error(message), _cudaError(error) { } public: /** * @brief Returns the CUDA error code associated with the exception. * * @return CUDA error code */ cudaError_t error_code() const { return _cudaError; } protected: cudaError_t _cudaError; //!< CUDA error code }; struct fatal_cuda_error : public cuda_error { using cuda_error::cuda_error; // Inherit constructors }; /** * @brief Exception thrown when an operation is attempted on an unsupported dtype. * * This exception should be thrown when an operation is attempted on an * unsupported data_type. This exception should not be thrown directly and is * instead thrown by the CUDF_EXPECTS or CUDF_FAIL macros. */ struct data_type_error : public std::invalid_argument, public stacktrace_recorder { /** * @brief Constructs a data_type_error with the error message. * * @param message Message to be associated with the exception */ data_type_error(char const* const message) : std::invalid_argument(message) {} /** * @brief Construct a new data_type_error object with error message * * @param message Message to be associated with the exception */ data_type_error(std::string const& message) : std::invalid_argument(message) {} }; /** @} */ } // namespace cudf #define STRINGIFY_DETAIL(x) #x ///< Stringify a macro argument #define CUDF_STRINGIFY(x) STRINGIFY_DETAIL(x) ///< Stringify a macro argument /** * @addtogroup utility_error * @{ */ /** * @brief Macro for checking (pre-)conditions that throws an exception when * a condition is violated. * * Defaults to throwing `cudf::logic_error`, but a custom exception may also be * specified. * * Example usage: * ``` * // throws cudf::logic_error * CUDF_EXPECTS(p != nullptr, "Unexpected null pointer"); * * // throws std::runtime_error * CUDF_EXPECTS(p != nullptr, "Unexpected nullptr", std::runtime_error); * ``` * @param ... This macro accepts either two or three arguments: * - The first argument must be an expression that evaluates to true or * false, and is the condition being checked. * - The second argument is a string literal used to construct the `what` of * the exception. * - When given, the third argument is the exception to be thrown. When not * specified, defaults to `cudf::logic_error`. * @throw `_exception_type` if the condition evaluates to 0 (false). */ #define CUDF_EXPECTS(...) \ GET_CUDF_EXPECTS_MACRO(__VA_ARGS__, CUDF_EXPECTS_3, CUDF_EXPECTS_2) \ (__VA_ARGS__) /// @cond #define GET_CUDF_EXPECTS_MACRO(_1, _2, _3, NAME, ...) NAME #define CUDF_EXPECTS_3(_condition, _reason, _exception_type) \ do { \ static_assert(std::is_base_of_v<std::exception, _exception_type>); \ (_condition) ? static_cast<void>(0) \ : throw _exception_type /*NOLINT(bugprone-macro-parentheses)*/ \ {"CUDF failure at: " __FILE__ ":" CUDF_STRINGIFY(__LINE__) ": " _reason}; \ } while (0) #define CUDF_EXPECTS_2(_condition, _reason) CUDF_EXPECTS_3(_condition, _reason, cudf::logic_error) /// @endcond /** * @brief Indicates that an erroneous code path has been taken. * * Example usage: * ```c++ * // Throws `cudf::logic_error` * CUDF_FAIL("Unsupported code path"); * * // Throws `std::runtime_error` * CUDF_FAIL("Unsupported code path", std::runtime_error); * ``` * * @param ... This macro accepts either one or two arguments: * - The first argument is a string literal used to construct the `what` of * the exception. * - When given, the second argument is the exception to be thrown. When not * specified, defaults to `cudf::logic_error`. * @throw `_exception_type` if the condition evaluates to 0 (false). */ #define CUDF_FAIL(...) \ GET_CUDF_FAIL_MACRO(__VA_ARGS__, CUDF_FAIL_2, CUDF_FAIL_1) \ (__VA_ARGS__) /// @cond #define GET_CUDF_FAIL_MACRO(_1, _2, NAME, ...) NAME #define CUDF_FAIL_2(_what, _exception_type) \ /*NOLINTNEXTLINE(bugprone-macro-parentheses)*/ \ throw _exception_type { "CUDF failure at:" __FILE__ ":" CUDF_STRINGIFY(__LINE__) ": " _what } #define CUDF_FAIL_1(_what) CUDF_FAIL_2(_what, cudf::logic_error) /// @endcond namespace cudf { namespace detail { // @cond inline void throw_cuda_error(cudaError_t error, char const* file, unsigned int line) { // Calls cudaGetLastError to clear the error status. It is nearly certain that a fatal error // occurred if it still returns the same error after a cleanup. cudaGetLastError(); auto const last = cudaFree(0); auto const msg = std::string{"CUDA error encountered at: " + std::string{file} + ":" + std::to_string(line) + ": " + std::to_string(error) + " " + cudaGetErrorName(error) + " " + cudaGetErrorString(error)}; // Call cudaDeviceSynchronize to ensure `last` did not result from an asynchronous error. // between two calls. if (error == last && last == cudaDeviceSynchronize()) { throw fatal_cuda_error{"Fatal " + msg, error}; } else { throw cuda_error{msg, error}; } } // @endcond } // namespace detail } // namespace cudf /** * @brief Error checking macro for CUDA runtime API functions. * * Invokes a CUDA runtime API function call, if the call does not return * cudaSuccess, invokes cudaGetLastError() to clear the error and throws an * exception detailing the CUDA error that occurred */ #define CUDF_CUDA_TRY(call) \ do { \ cudaError_t const status = (call); \ if (cudaSuccess != status) { cudf::detail::throw_cuda_error(status, __FILE__, __LINE__); } \ } while (0); /** * @brief Debug macro to check for CUDA errors * * In a non-release build, this macro will synchronize the specified stream * before error checking. In both release and non-release builds, this macro * checks for any pending CUDA errors from previous calls. If an error is * reported, an exception is thrown detailing the CUDA error that occurred. * * The intent of this macro is to provide a mechanism for synchronous and * deterministic execution for debugging asynchronous CUDA execution. It should * be used after any asynchronous CUDA call, e.g., cudaMemcpyAsync, or an * asynchronous kernel launch. */ #ifndef NDEBUG #define CUDF_CHECK_CUDA(stream) \ do { \ CUDF_CUDA_TRY(cudaStreamSynchronize(stream)); \ CUDF_CUDA_TRY(cudaPeekAtLastError()); \ } while (0); #else #define CUDF_CHECK_CUDA(stream) CUDF_CUDA_TRY(cudaPeekAtLastError()); #endif /** @} */
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/type_checks.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column_view.hpp> namespace cudf { /** * @brief Compares the type of two `column_view`s * * This function returns true if the type of `lhs` equals that of `rhs`. * - For fixed point types, the scale is compared. * - For dictionary types, the type of the keys are compared if both are * non-empty columns. * - For lists types, the type of child columns are compared recursively. * - For struct types, the type of each field are compared in order. * - For all other types, the `id` of `data_type` is compared. * * @param lhs The first `column_view` to compare * @param rhs The second `column_view` to compare * @return true if column types match */ bool column_types_equal(column_view const& lhs, column_view const& rhs); /** * @brief Compare the type IDs of two `column_view`s * This function returns true if the type of `lhs` equals that of `rhs`. * - For fixed point types, the scale is ignored. * * @param lhs The first `column_view` to compare * @param rhs The second `column_view` to compare * @return true if column types match */ bool column_types_equivalent(column_view const& lhs, column_view const& rhs); } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/bit.hpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cassert> #include <cuda/std/climits> #include <cudf/types.hpp> /** * @file bit.hpp * @brief Utilities for bit and bitmask operations. */ namespace cudf { namespace detail { // @cond // Work around a bug in NVRTC that fails to compile assert() in constexpr // functions (fixed after CUDA 11.0) #if defined __GNUC__ #define LIKELY(EXPR) __builtin_expect(!!(EXPR), 1) #else #define LIKELY(EXPR) (!!(EXPR)) #endif #ifdef NDEBUG #define constexpr_assert(CHECK) static_cast<void>(0) #else #define constexpr_assert(CHECK) (LIKELY(CHECK) ? void(0) : [] { assert(!#CHECK); }()) #endif // @endcond /** * @brief Returns the number of bits the given type can hold. * * @tparam T The type to query * @return `sizeof(T)` in bits */ template <typename T> constexpr CUDF_HOST_DEVICE inline std::size_t size_in_bits() { static_assert(CHAR_BIT == 8, "Size of a byte must be 8 bits."); return sizeof(T) * CHAR_BIT; } } // namespace detail /** * @addtogroup utility_bitmask * @{ * @file */ /** * @brief Returns the index of the word containing the specified bit. * * @param bit_index The index of the bit to query * @return The index of the word containing the specified bit */ constexpr CUDF_HOST_DEVICE inline size_type word_index(size_type bit_index) { return bit_index / detail::size_in_bits<bitmask_type>(); } /** * @brief Returns the position within a word of the specified bit. * * @param bit_index The index of the bit to query * @return The position within a word of the specified bit */ constexpr CUDF_HOST_DEVICE inline size_type intra_word_index(size_type bit_index) { return bit_index % detail::size_in_bits<bitmask_type>(); } /** * @brief Sets the specified bit to `1` * * This function is not thread-safe, i.e., attempting to update bits within the * same word concurrently from multiple threads results in undefined behavior. * * @param bitmask The bitmask containing the bit to set * @param bit_index Index of the bit to set */ CUDF_HOST_DEVICE inline void set_bit_unsafe(bitmask_type* bitmask, size_type bit_index) { assert(nullptr != bitmask); bitmask[word_index(bit_index)] |= (bitmask_type{1} << intra_word_index(bit_index)); } /** * @brief Sets the specified bit to `0` * * This function is not thread-safe, i.e., attempting to update bits within the * same word concurrently from multiple threads results in undefined behavior. * * @param bitmask The bitmask containing the bit to clear * @param bit_index The index of the bit to clear */ CUDF_HOST_DEVICE inline void clear_bit_unsafe(bitmask_type* bitmask, size_type bit_index) { assert(nullptr != bitmask); bitmask[word_index(bit_index)] &= ~(bitmask_type{1} << intra_word_index(bit_index)); } /** * @brief Indicates whether the specified bit is set to `1` * * @param bitmask The bitmask containing the bit to clear * @param bit_index Index of the bit to test * @return true The specified bit is `1` * @return false The specified bit is `0` */ CUDF_HOST_DEVICE inline bool bit_is_set(bitmask_type const* bitmask, size_type bit_index) { assert(nullptr != bitmask); return bitmask[word_index(bit_index)] & (bitmask_type{1} << intra_word_index(bit_index)); } /** * @brief optional-like interface to check if a specified bit of a bitmask is set. * * @param bitmask The bitmask containing the bit to clear * @param bit_index Index of the bit to test * @param default_value Value to return if `bitmask` is nullptr * @return true The specified bit is `1` * @return false The specified bit is `0` * @return `default_value` if `bitmask` is nullptr */ CUDF_HOST_DEVICE inline bool bit_value_or(bitmask_type const* bitmask, size_type bit_index, bool default_value) { return bitmask != nullptr ? bit_is_set(bitmask, bit_index) : default_value; } /** * @brief Returns a bitmask word with the `n` least significant bits set. * * Behavior is undefined if `n < 0` or if `n >= size_in_bits<bitmask_type>()` * * @param n The number of least significant bits to set * @return A bitmask word with `n` least significant bits set */ constexpr CUDF_HOST_DEVICE inline bitmask_type set_least_significant_bits(size_type n) { constexpr_assert(0 <= n && n < static_cast<size_type>(detail::size_in_bits<bitmask_type>())); return ((bitmask_type{1} << n) - 1); } /** * @brief Returns a bitmask word with the `n` most significant bits set. * * Behavior is undefined if `n < 0` or if `n >= size_in_bits<bitmask_type>()` * * @param n The number of most significant bits to set * @return A bitmask word with `n` most significant bits set */ constexpr CUDF_HOST_DEVICE inline bitmask_type set_most_significant_bits(size_type n) { constexpr size_type word_size{detail::size_in_bits<bitmask_type>()}; constexpr_assert(0 <= n && n < word_size); return ~((bitmask_type{1} << (word_size - n)) - 1); } #ifdef __CUDACC__ /** * @brief Sets the specified bit to `1` * * @note This operation requires a global atomic operation. Therefore, it is * not recommended to use this function in performance critical regions. When * possible, it is more efficient to compute and update an entire word at * once using `set_word`. * * This function is thread-safe. * * @param bitmask The bitmask containing the bit to set * @param bit_index Index of the bit to set */ __device__ inline void set_bit(bitmask_type* bitmask, size_type bit_index) { assert(nullptr != bitmask); atomicOr(&bitmask[word_index(bit_index)], (bitmask_type{1} << intra_word_index(bit_index))); } /** * @brief Sets the specified bit to `0` * * @note This operation requires a global atomic operation. Therefore, it is * not recommended to use this function in performance critical regions. When * possible, it is more efficient to compute and update an entire element at * once using `set_element`. * This function is thread-safe. * * @param bit_index Index of the bit to clear */ __device__ inline void clear_bit(bitmask_type* bitmask, size_type bit_index) { assert(nullptr != bitmask); atomicAnd(&bitmask[word_index(bit_index)], ~(bitmask_type{1} << intra_word_index(bit_index))); } #endif /** @} */ // end of group } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/utilities/type_dispatcher.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/utilities/assert.cuh> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/wrappers/dictionary.hpp> #include <cudf/wrappers/durations.hpp> #include <cudf/wrappers/timestamps.hpp> #include <string> /** * @file * @brief Defines the mapping between `cudf::type_id` runtime type information * and concrete C++ types. */ namespace cudf { /** * @addtogroup utility_dispatcher * @{ * @file */ /** * @brief Maps a C++ type to its corresponding `cudf::type_id` * * When explicitly passed a template argument of a given type, returns the * appropriate `type_id` enum for the specified C++ type. * * For example: * * ``` * return cudf::type_to_id<int32_t>(); // Returns INT32 * ``` * * @tparam T The type to map to a `cudf::type_id` * @return The `cudf::type_id` corresponding to the specified type */ template <typename T> inline constexpr type_id type_to_id() { return type_id::EMPTY; }; /** * @brief Maps a `cudf::type_id` types to its corresponding C++ type name string * */ struct type_to_name_impl { /** * @brief Maps a `cudf::type_id` types to its corresponding C++ type name string * * @return The C++ type name as string */ template <typename T> inline std::string operator()() { return "void"; } }; template <cudf::type_id t> struct id_to_type_impl { using type = void; }; /** * @brief Maps a `cudf::type_id` to its corresponding concrete C++ type * * Example: * ``` * static_assert(std::is_same<int32_t, id_to_type<id_type::INT32>); * ``` * @tparam t The `cudf::type_id` to map */ template <cudf::type_id Id> using id_to_type = typename id_to_type_impl<Id>::type; /** * @brief "Returns" the corresponding type that is stored on the device when using `cudf::column` * * For `decimal32`, the storage type is an `int32_t`. * For `decimal64`, the storage type is an `int64_t`. * For `decimal128`, the storage type is an `__int128_t`. * * Use this "type function" with the `using` type alias: * @code * using Type = device_storage_type_t<Element>; * @endcode * * @tparam T The literal type that is stored on the host */ // clang-format off template <typename T> using device_storage_type_t = std::conditional_t<std::is_same_v<numeric::decimal32, T>, int32_t, std::conditional_t<std::is_same_v<numeric::decimal64, T>, int64_t, std::conditional_t<std::is_same_v<numeric::decimal128, T>, __int128_t, T>>>; // clang-format on /** * @brief Checks if `fixed_point`-like types have template type `T` matching the column's * stored type id * * @tparam T The type that is stored on the device * @param id The `data_type::id` of the column * @return `true` If T matches the stored column `type_id` * @return `false` If T does not match the stored column `type_id` */ template <typename T> constexpr bool type_id_matches_device_storage_type(type_id id) { return (id == type_id::DECIMAL32 && std::is_same_v<T, int32_t>) || (id == type_id::DECIMAL64 && std::is_same_v<T, int64_t>) || (id == type_id::DECIMAL128 && std::is_same_v<T, __int128_t>) || id == type_to_id<T>(); } /** * @brief Macro used to define a mapping between a concrete C++ type and a *`cudf::type_id` enum. * @param Type The concrete C++ type * @param Id The `cudf::type_id` enum */ #ifndef CUDF_TYPE_MAPPING #define CUDF_TYPE_MAPPING(Type, Id) \ template <> \ constexpr inline type_id type_to_id<Type>() \ { \ return Id; \ } \ template <> \ inline std::string type_to_name_impl::operator()<Type>() \ { \ return CUDF_STRINGIFY(Type); \ } \ template <> \ struct id_to_type_impl<Id> { \ using type = Type; \ }; #endif // Defines all of the mappings between C++ types and their corresponding `cudf::type_id` values. CUDF_TYPE_MAPPING(int8_t, type_id::INT8) CUDF_TYPE_MAPPING(int16_t, type_id::INT16) CUDF_TYPE_MAPPING(int32_t, type_id::INT32) CUDF_TYPE_MAPPING(int64_t, type_id::INT64) CUDF_TYPE_MAPPING(uint8_t, type_id::UINT8) CUDF_TYPE_MAPPING(uint16_t, type_id::UINT16) CUDF_TYPE_MAPPING(uint32_t, type_id::UINT32) CUDF_TYPE_MAPPING(uint64_t, type_id::UINT64) CUDF_TYPE_MAPPING(float, type_id::FLOAT32) CUDF_TYPE_MAPPING(double, type_id::FLOAT64) CUDF_TYPE_MAPPING(bool, type_id::BOOL8) CUDF_TYPE_MAPPING(cudf::timestamp_D, type_id::TIMESTAMP_DAYS) CUDF_TYPE_MAPPING(cudf::timestamp_s, type_id::TIMESTAMP_SECONDS) CUDF_TYPE_MAPPING(cudf::timestamp_ms, type_id::TIMESTAMP_MILLISECONDS) CUDF_TYPE_MAPPING(cudf::timestamp_us, type_id::TIMESTAMP_MICROSECONDS) CUDF_TYPE_MAPPING(cudf::timestamp_ns, type_id::TIMESTAMP_NANOSECONDS) CUDF_TYPE_MAPPING(cudf::duration_D, type_id::DURATION_DAYS) CUDF_TYPE_MAPPING(cudf::duration_s, type_id::DURATION_SECONDS) CUDF_TYPE_MAPPING(cudf::duration_ms, type_id::DURATION_MILLISECONDS) CUDF_TYPE_MAPPING(cudf::duration_us, type_id::DURATION_MICROSECONDS) CUDF_TYPE_MAPPING(cudf::duration_ns, type_id::DURATION_NANOSECONDS) CUDF_TYPE_MAPPING(cudf::dictionary32, type_id::DICTIONARY32) CUDF_TYPE_MAPPING(cudf::string_view, type_id::STRING) CUDF_TYPE_MAPPING(cudf::list_view, type_id::LIST) CUDF_TYPE_MAPPING(numeric::decimal32, type_id::DECIMAL32) CUDF_TYPE_MAPPING(numeric::decimal64, type_id::DECIMAL64) CUDF_TYPE_MAPPING(numeric::decimal128, type_id::DECIMAL128) CUDF_TYPE_MAPPING(cudf::struct_view, type_id::STRUCT) /** * @brief Use this specialization on `type_dispatcher` whenever you only need to operate on the * underlying stored type. * * For example, `cudf::sort` in sort.cu uses `cudf::type_dispatcher<dispatch_storage_type>(...)`. * `cudf::gather` in gather.cuh also uses `cudf::type_dispatcher<dispatch_storage_type>(...)`. * However, reductions needs both `data_type` and underlying type, so cannot use this. */ template <cudf::type_id Id> struct dispatch_storage_type { using type = device_storage_type_t<id_to_type<Id>>; ///< The underlying type }; template <typename T> struct type_to_scalar_type_impl { using ScalarType = cudf::scalar; }; /** * @brief Macro used to define scalar type and scalar device type for * `cudf::numeric_scalar` template class for numeric C++ types. * * @param Type The numeric C++ type */ #ifndef MAP_NUMERIC_SCALAR #define MAP_NUMERIC_SCALAR(Type) \ template <> \ struct type_to_scalar_type_impl<Type> { \ using ScalarType = cudf::numeric_scalar<Type>; \ using ScalarDeviceType = cudf::numeric_scalar_device_view<Type>; \ }; #endif MAP_NUMERIC_SCALAR(int8_t) MAP_NUMERIC_SCALAR(int16_t) MAP_NUMERIC_SCALAR(int32_t) MAP_NUMERIC_SCALAR(int64_t) MAP_NUMERIC_SCALAR(__int128_t) MAP_NUMERIC_SCALAR(uint8_t) MAP_NUMERIC_SCALAR(uint16_t) MAP_NUMERIC_SCALAR(uint32_t) MAP_NUMERIC_SCALAR(uint64_t) MAP_NUMERIC_SCALAR(float) MAP_NUMERIC_SCALAR(double) MAP_NUMERIC_SCALAR(bool) template <> struct type_to_scalar_type_impl<std::string> { using ScalarType = cudf::string_scalar; using ScalarDeviceType = cudf::string_scalar_device_view; }; template <> struct type_to_scalar_type_impl<cudf::string_view> { using ScalarType = cudf::string_scalar; using ScalarDeviceType = cudf::string_scalar_device_view; }; template <> struct type_to_scalar_type_impl<numeric::decimal32> { using ScalarType = cudf::fixed_point_scalar<numeric::decimal32>; using ScalarDeviceType = cudf::fixed_point_scalar_device_view<numeric::decimal32>; }; template <> struct type_to_scalar_type_impl<numeric::decimal64> { using ScalarType = cudf::fixed_point_scalar<numeric::decimal64>; using ScalarDeviceType = cudf::fixed_point_scalar_device_view<numeric::decimal64>; }; template <> struct type_to_scalar_type_impl<numeric::decimal128> { using ScalarType = cudf::fixed_point_scalar<numeric::decimal128>; using ScalarDeviceType = cudf::fixed_point_scalar_device_view<numeric::decimal128>; }; template <> // TODO: this is a temporary solution for make_pair_iterator struct type_to_scalar_type_impl<cudf::dictionary32> { using ScalarType = cudf::numeric_scalar<int32_t>; using ScalarDeviceType = cudf::numeric_scalar_device_view<int32_t>; }; template <> // TODO: this is to get compilation working. list scalars will be implemented at a // later time. struct type_to_scalar_type_impl<cudf::list_view> { using ScalarType = cudf::list_scalar; // using ScalarDeviceType = cudf::list_scalar_device_view; }; template <> // TODO: Ditto, likewise. struct type_to_scalar_type_impl<cudf::struct_view> { using ScalarType = cudf::struct_scalar; // using ScalarDeviceType = cudf::struct_scalar_device_view; // CALEB: TODO! }; /** * @brief Macro used to define scalar type and scalar device type for * `cudf::timestamp_scalar` template class for timestamp C++ types. * * @param Type The timestamp C++ type */ #ifndef MAP_TIMESTAMP_SCALAR #define MAP_TIMESTAMP_SCALAR(Type) \ template <> \ struct type_to_scalar_type_impl<Type> { \ using ScalarType = cudf::timestamp_scalar<Type>; \ using ScalarDeviceType = cudf::timestamp_scalar_device_view<Type>; \ }; #endif MAP_TIMESTAMP_SCALAR(timestamp_D) MAP_TIMESTAMP_SCALAR(timestamp_s) MAP_TIMESTAMP_SCALAR(timestamp_ms) MAP_TIMESTAMP_SCALAR(timestamp_us) MAP_TIMESTAMP_SCALAR(timestamp_ns) /** * @brief Macro used to define scalar type and scalar device type for * `cudf::duration_scalar` template class for duration C++ types. * * @param Type The duration C++ type */ #ifndef MAP_DURATION_SCALAR #define MAP_DURATION_SCALAR(Type) \ template <> \ struct type_to_scalar_type_impl<Type> { \ using ScalarType = cudf::duration_scalar<Type>; \ using ScalarDeviceType = cudf::duration_scalar_device_view<Type>; \ }; #endif MAP_DURATION_SCALAR(duration_D) MAP_DURATION_SCALAR(duration_s) MAP_DURATION_SCALAR(duration_ms) MAP_DURATION_SCALAR(duration_us) MAP_DURATION_SCALAR(duration_ns) /** * @brief Maps a C++ type to the scalar type required to hold its value * * @tparam T The concrete C++ type to map */ template <typename T> using scalar_type_t = typename type_to_scalar_type_impl<T>::ScalarType; /** * @brief Maps a C++ type to the scalar device type required to hold its value * * @tparam T The concrete C++ type to map */ template <typename T> using scalar_device_type_t = typename type_to_scalar_type_impl<T>::ScalarDeviceType; /** * @brief Invokes an `operator()` template with the type instantiation based on * the specified `cudf::data_type`'s `id()`. * * Example usage with a functor that returns the size of the dispatched type: * * @code * struct size_of_functor{ * template <typename T> * int operator()(){ * return sizeof(T); * } * }; * cudf::data_type t{INT32}; * cudf::type_dispatcher(t, size_of_functor{}); // returns 4 * @endcode * * The `type_dispatcher` uses `cudf::type_to_id<t>` to provide a default mapping * of `cudf::type_id`s to dispatched C++ types. However, this mapping may be * customized by explicitly specifying a user-defined trait struct for the * `IdTypeMap`. For example, to always dispatch `int32_t` * * @code * template<cudf::type_id t> struct always_int{ using type = int32_t; } * * // This will always invoke operator()<int32_t> * cudf::type_dispatcher<always_int>(data_type, f); * @endcode * * It is sometimes necessary to customize the dispatched functor's * `operator()` for different types. This can be done in several ways. * * The first method is to use explicit template specialization. This is useful * for specializing behavior for single types. For example, a functor that * prints `int32_t` or `double` when invoked with either of those types, else it * prints `unhandled type`: * * @code * struct type_printer { * template <typename ColumnType> * void operator()() { std::cout << "unhandled type\n"; } * }; * * // Due to a bug in g++, explicit member function specializations need to be * // defined outside of the class definition * template <> * void type_printer::operator()<int32_t>() { std::cout << "int32_t\n"; } * * template <> * void type_printer::operator()<double>() { std::cout << "double\n"; } * @endcode * * A second method is to use SFINAE with `std::enable_if_t`. This is useful for * specializing for a set of types that share some property. For example, a * functor that prints `integral` or `floating point` for integral or floating * point types: * * @code * struct integral_or_floating_point { * template <typename ColumnType, * std::enable_if_t<not std::is_integral_v<ColumnType> and * not std::is_floating_point_v<ColumnType> >* = nullptr> * void operator()() { * std::cout << "neither integral nor floating point\n "; } * * template <typename ColumnType, * std::enable_if_t<std::is_integral_v<ColumnType> >* = nullptr> * void operator()() { std::cout << "integral\n"; } * * template <typename ColumnType, * std::enable_if_t<std::is_floating_point_v<ColumnType> >* = nullptr> * void operator()() { std::cout << "floating point\n"; } * }; * @endcode * * For more info on SFINAE and `std::enable_if`, see * https://eli.thegreenplace.net/2014/sfinae-and-enable_if/ * * The return type for all template instantiations of the functor's "operator()" * lambda must be the same, else there will be a compiler error as you would be * trying to return different types from the same function. * * @tparam id_to_type_impl Maps a `cudf::type_id` its dispatched C++ type * @tparam Functor The callable object's type * @tparam Ts Variadic parameter pack type * @param dtype The `cudf::data_type` whose `id()` determines which template * instantiation is invoked * @param f The callable whose `operator()` template is invoked * @param args Parameter pack of arguments forwarded to the `operator()` * invocation * @return Whatever is returned by the callable's `operator()` */ // This pragma disables a compiler warning that complains about the valid usage // of calling a __host__ functor from this function which is __host__ __device__ #ifdef __CUDACC__ #pragma nv_exec_check_disable #endif template <template <cudf::type_id> typename IdTypeMap = id_to_type_impl, typename Functor, typename... Ts> CUDF_HOST_DEVICE __forceinline__ constexpr decltype(auto) type_dispatcher(cudf::data_type dtype, Functor f, Ts&&... args) { switch (dtype.id()) { case type_id::INT8: return f.template operator()<typename IdTypeMap<type_id::INT8>::type>( std::forward<Ts>(args)...); case type_id::INT16: return f.template operator()<typename IdTypeMap<type_id::INT16>::type>( std::forward<Ts>(args)...); case type_id::INT32: return f.template operator()<typename IdTypeMap<type_id::INT32>::type>( std::forward<Ts>(args)...); case type_id::INT64: return f.template operator()<typename IdTypeMap<type_id::INT64>::type>( std::forward<Ts>(args)...); case type_id::UINT8: return f.template operator()<typename IdTypeMap<type_id::UINT8>::type>( std::forward<Ts>(args)...); case type_id::UINT16: return f.template operator()<typename IdTypeMap<type_id::UINT16>::type>( std::forward<Ts>(args)...); case type_id::UINT32: return f.template operator()<typename IdTypeMap<type_id::UINT32>::type>( std::forward<Ts>(args)...); case type_id::UINT64: return f.template operator()<typename IdTypeMap<type_id::UINT64>::type>( std::forward<Ts>(args)...); case type_id::FLOAT32: return f.template operator()<typename IdTypeMap<type_id::FLOAT32>::type>( std::forward<Ts>(args)...); case type_id::FLOAT64: return f.template operator()<typename IdTypeMap<type_id::FLOAT64>::type>( std::forward<Ts>(args)...); case type_id::BOOL8: return f.template operator()<typename IdTypeMap<type_id::BOOL8>::type>( std::forward<Ts>(args)...); case type_id::TIMESTAMP_DAYS: return f.template operator()<typename IdTypeMap<type_id::TIMESTAMP_DAYS>::type>( std::forward<Ts>(args)...); case type_id::TIMESTAMP_SECONDS: return f.template operator()<typename IdTypeMap<type_id::TIMESTAMP_SECONDS>::type>( std::forward<Ts>(args)...); case type_id::TIMESTAMP_MILLISECONDS: return f.template operator()<typename IdTypeMap<type_id::TIMESTAMP_MILLISECONDS>::type>( std::forward<Ts>(args)...); case type_id::TIMESTAMP_MICROSECONDS: return f.template operator()<typename IdTypeMap<type_id::TIMESTAMP_MICROSECONDS>::type>( std::forward<Ts>(args)...); case type_id::TIMESTAMP_NANOSECONDS: return f.template operator()<typename IdTypeMap<type_id::TIMESTAMP_NANOSECONDS>::type>( std::forward<Ts>(args)...); case type_id::DURATION_DAYS: return f.template operator()<typename IdTypeMap<type_id::DURATION_DAYS>::type>( std::forward<Ts>(args)...); case type_id::DURATION_SECONDS: return f.template operator()<typename IdTypeMap<type_id::DURATION_SECONDS>::type>( std::forward<Ts>(args)...); case type_id::DURATION_MILLISECONDS: return f.template operator()<typename IdTypeMap<type_id::DURATION_MILLISECONDS>::type>( std::forward<Ts>(args)...); case type_id::DURATION_MICROSECONDS: return f.template operator()<typename IdTypeMap<type_id::DURATION_MICROSECONDS>::type>( std::forward<Ts>(args)...); case type_id::DURATION_NANOSECONDS: return f.template operator()<typename IdTypeMap<type_id::DURATION_NANOSECONDS>::type>( std::forward<Ts>(args)...); case type_id::DICTIONARY32: return f.template operator()<typename IdTypeMap<type_id::DICTIONARY32>::type>( std::forward<Ts>(args)...); case type_id::STRING: return f.template operator()<typename IdTypeMap<type_id::STRING>::type>( std::forward<Ts>(args)...); case type_id::LIST: return f.template operator()<typename IdTypeMap<type_id::LIST>::type>( std::forward<Ts>(args)...); case type_id::DECIMAL32: return f.template operator()<typename IdTypeMap<type_id::DECIMAL32>::type>( std::forward<Ts>(args)...); case type_id::DECIMAL64: return f.template operator()<typename IdTypeMap<type_id::DECIMAL64>::type>( std::forward<Ts>(args)...); case type_id::DECIMAL128: return f.template operator()<typename IdTypeMap<type_id::DECIMAL128>::type>( std::forward<Ts>(args)...); case type_id::STRUCT: return f.template operator()<typename IdTypeMap<type_id::STRUCT>::type>( std::forward<Ts>(args)...); default: { #ifndef __CUDA_ARCH__ CUDF_FAIL("Invalid type_id."); #else CUDF_UNREACHABLE("Invalid type_id."); #endif } } } // @cond namespace detail { template <typename T1> struct double_type_dispatcher_second_type { #ifdef __CUDACC__ #pragma nv_exec_check_disable #endif template <typename T2, typename F, typename... Ts> CUDF_HOST_DEVICE __forceinline__ decltype(auto) operator()(F&& f, Ts&&... args) const { return f.template operator()<T1, T2>(std::forward<Ts>(args)...); } }; template <template <cudf::type_id> typename IdTypeMap> struct double_type_dispatcher_first_type { #ifdef __CUDACC__ #pragma nv_exec_check_disable #endif template <typename T1, typename F, typename... Ts> CUDF_HOST_DEVICE __forceinline__ decltype(auto) operator()(cudf::data_type type2, F&& f, Ts&&... args) const { return type_dispatcher<IdTypeMap>(type2, detail::double_type_dispatcher_second_type<T1>{}, std::forward<F>(f), std::forward<Ts>(args)...); } }; } // namespace detail // @endcond /** * @brief Dispatches two type template parameters to a callable. * * This function expects a callable `f` with an `operator()` template accepting * two typename template parameters `T1` and `T2`. * * @param type1 The `data_type` used to dispatch a type for the first template * parameter of the callable `F` * @param type2 The `data_type` used to dispatch a type for the second template * parameter of the callable `F` * @param f The callable whose `operator()` template is invoked * @param args Parameter pack forwarded to the `operator()` invocation `F`. * * @return The result of invoking `f.template operator<T1, T2>(args)` */ #ifdef __CUDACC__ #pragma nv_exec_check_disable #endif template <template <cudf::type_id> typename IdTypeMap = id_to_type_impl, typename F, typename... Ts> CUDF_HOST_DEVICE __forceinline__ constexpr decltype(auto) double_type_dispatcher( cudf::data_type type1, cudf::data_type type2, F&& f, Ts&&... args) { return type_dispatcher<IdTypeMap>(type1, detail::double_type_dispatcher_first_type<IdTypeMap>{}, type2, std::forward<F>(f), std::forward<Ts>(args)...); } /** * @brief Return a name for a given type. * * The returned type names are intended for error messages and are not * guaranteed to be stable. * * @param type The `data_type` * @return Name of the type */ std::string type_to_name(data_type type); /** @} */ // end of group } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/segmented_reduction_functions.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <optional> namespace cudf { namespace reduction { namespace detail { /** * @brief Compute sum of each segment in the input column * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype`. * @throw cudf::logic_error if `output_dtype` is not an arithmetic type. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Sums of segments as type `output_dtype` */ std::unique_ptr<column> segmented_sum(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes product of each segment in the input column * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype`. * @throw cudf::logic_error if `output_dtype` is not an arithmetic type. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Product of segments as type `output_dtype` */ std::unique_ptr<column> segmented_product(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Compute minimum of each segment in the input column * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype`. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Minimums of segments as type `output_dtype` */ std::unique_ptr<column> segmented_min(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Compute maximum of each segment in the input column * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype`. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Maximums of segments as type `output_dtype` */ std::unique_ptr<column> segmented_max(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Compute if any of the values in the segment are true when typecasted to bool * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to bool. * @throw cudf::logic_error if `output_dtype` is not BOOL8. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of type BOOL8 for the results of the segments */ std::unique_ptr<column> segmented_any(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Compute if all of the values in the segment are true when typecasted to bool * * If an input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not convertible to bool. * @throw cudf::logic_error if `output_dtype` is not BOOL8. * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param init Initial value of each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of BOOL8 for the results of the segments */ std::unique_ptr<column> segmented_all(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes mean of elements of segments in the input column * * If input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of `output_dtype` for the reduction results of the segments */ std::unique_ptr<column> segmented_mean(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes sum of squares of elements of segments in the input column * * If input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not an arithmetic type * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of `output_dtype` for the reduction results of the segments */ std::unique_ptr<column> segmented_sum_of_squares(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes the standard deviation of elements of segments in the input column * * If input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param ddof Delta degrees of freedom. * The divisor used is N - ddof, where N the number of elements in each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of `output_dtype` for the reduction results of the segments */ std::unique_ptr<column> segmented_standard_deviation(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, size_type ddof, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes the variance of elements of segments in the input column * * If input segment is empty, the segment result is null. * * If `null_handling==null_policy::INCLUDE`, all elements in a segment must be valid * for the reduced value to be valid. * If `null_handling==null_policy::EXCLUDE`, the reduced value is valid if any element * in the segment is valid. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param output_dtype Data type of the output column * @param null_handling Specifies how null elements are processed for each segment * @param ddof Delta degrees of freedom. * The divisor used is N - ddof, where N the number of elements in each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of `output_dtype` for the reduction results of the segments */ std::unique_ptr<column> segmented_variance(column_view const& col, device_span<size_type const> offsets, data_type const output_dtype, null_policy null_handling, size_type ddof, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Counts the number of unique values within each segment of a column * * Unique entries are counted by comparing adjacent values so the column segments * are expected to be sorted before calling this function otherwise the results * are undefined. * * If any input segment is empty, that segment's result is null. * * If `null_handling==null_policy::INCLUDE`, the segment count is the number of * unique values +1 which includes all the null entries in that segment. * If `null_handling==null_policy::EXCLUDE`, the segment count does not include nulls. * * @throw cudf::logic_error if input column type is a nested type * * @param col Input column data * @param offsets Indices to identify segment boundaries within input `col` * @param null_handling Specifies how null elements are processed for each segment * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Column of unique counts per segment */ std::unique_ptr<column> segmented_nunique(column_view const& col, device_span<size_type const> offsets, null_policy null_handling, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace reduction } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/reduction_functions.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column_view.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <optional> namespace cudf { namespace reduction { namespace detail { /** * @brief Computes sum of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype` * @throw cudf::logic_error if `output_dtype` is not an arithmetic type * * @param col input column to compute sum * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the sum * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Sum as scalar of type `output_dtype` */ std::unique_ptr<scalar> sum(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes minimum of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is convertible to `output_dtype` * * @param col input column to compute minimum * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the minimum * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Minimum element as scalar of type `output_dtype` */ std::unique_ptr<scalar> min(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes maximum of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is convertible to `output_dtype` * * @param col input column to compute maximum * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the maximum * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Maximum element as scalar of type `output_dtype` */ std::unique_ptr<scalar> max(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes any of elements in input column is true when typecasted to bool * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not convertible to bool * @throw cudf::logic_error if `output_dtype` is not bool * * @param col input column to compute any * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the any * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return bool scalar if any of elements is true when typecasted to bool */ std::unique_ptr<scalar> any(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes all of elements in input column is true when typecasted to bool * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not convertible to bool * @throw cudf::logic_error if `output_dtype` is not bool * * @param col input column to compute all * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the all * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return bool scalar if all of elements is true when typecasted to bool */ std::unique_ptr<scalar> all(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Compute frequency for each unique element in the input column. * * The result histogram is stored in structs column having two children. The first child contains * unique elements from the input, and the second child contains their corresponding frequencies. * * @param input The column to compute histogram * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return A list_scalar storing a structs column as the result histogram */ std::unique_ptr<scalar> histogram(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Merge multiple histograms together. * * @param input The input given as multiple histograms concatenated together * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return A list_scalar storing the result histogram */ std::unique_ptr<scalar> merge_histogram(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes product of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype` * @throw cudf::logic_error if `output_dtype` is not an arithmetic type * * @param col input column to compute product * @param output_dtype data type of return type and typecast elements of input column * @param init initial value of the product * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Product as scalar of type `output_dtype` */ std::unique_ptr<scalar> product(column_view const& col, data_type const output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes sum of squares of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not convertible to `output_dtype` * @throw cudf::logic_error if `output_dtype` is not an arithmetic type * * @param col input column to compute sum of squares * @param output_dtype data type of return type and typecast elements of input column * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Sum of squares as scalar of type `output_dtype` */ std::unique_ptr<scalar> sum_of_squares(column_view const& col, data_type const output_dtype, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes mean of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col input column to compute mean * @param output_dtype data type of return type and typecast elements of input column * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Mean as scalar of type `output_dtype` */ std::unique_ptr<scalar> mean(column_view const& col, data_type const output_dtype, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes variance of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col input column to compute variance * @param output_dtype data type of return type and typecast elements of input column * @param ddof Delta degrees of freedom. The divisor used is N - ddof, where N represents the number * of elements. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Variance as scalar of type `output_dtype` */ std::unique_ptr<scalar> variance(column_view const& col, data_type const output_dtype, size_type ddof, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Computes standard deviation of elements in input column * * If all elements in input column are null, output scalar is null. * * @throw cudf::logic_error if input column type is not arithmetic type * @throw cudf::logic_error if `output_dtype` is not floating point type * * @param col input column to compute standard deviation * @param output_dtype data type of return type and typecast elements of input column * @param ddof Delta degrees of freedom. The divisor used is N - ddof, where N represents the number * of elements. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return Standard deviation as scalar of type `output_dtype` */ std::unique_ptr<scalar> standard_deviation(column_view const& col, data_type const output_dtype, size_type ddof, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Returns nth element in input column * * A negative value `n` is interpreted as `n+count`, where `count` is the number of valid * elements in the input column if `null_handling` is `null_policy::EXCLUDE`, else `col.size()`. * * If all elements in input column are null, output scalar is null. * * @warning This function is expensive (invokes a kernel launch). So, it is not * recommended to be used in performance sensitive code or inside a loop. * It takes O(`col.size()`) time and space complexity for nullable column with * `null_policy::EXCLUDE` as input. * * @throw cudf::logic_error if n falls outside the range `[-count, count)` where `count` is the * number of valid * elements in the input column if `null_handling` is `null_policy::EXCLUDE`, * else `col.size()`. * * @param col input column to get nth element from * @param n index of element to get * @param null_handling Indicates if null values will be counted while indexing * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return nth element as scalar */ std::unique_ptr<scalar> nth_element(column_view const& col, size_type n, null_policy null_handling, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Collect input column into a (list) scalar * * @param col input column to collect from * @param null_handling Indicates if null values will be counted while collecting * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return collected list as scalar */ std::unique_ptr<scalar> collect_list(column_view const& col, null_policy null_handling, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Merge a bunch of list scalars into single list scalar * * @param col input list column representing numbers of list scalars to be merged * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return merged list as scalar */ std::unique_ptr<scalar> merge_lists(lists_column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Collect input column into a (list) scalar without duplicated elements * * @param col input column to collect from * @param null_handling Indicates if null values will be counted while collecting * @param nulls_equal Indicates if null values will be considered as equal values * @param nans_equal Indicates if nan values will be considered as equal values * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return collected list with unique elements as scalar */ std::unique_ptr<scalar> collect_set(column_view const& col, null_policy null_handling, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Merge a bunch of list scalars into single list scalar then drop duplicated elements * * @param col input list column representing numbers of list scalars to be merged * @param nulls_equal Indicates if null values will be considered as equal values * @param nans_equal Indicates if nan values will be considered as equal values * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned scalar's device memory * @return collected list with unique elements as scalar */ std::unique_ptr<scalar> merge_sets(lists_column_view const& col, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace reduction } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/reduction.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/aggregation.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/types.hpp> #include <optional> namespace cudf::reduction::detail { /** * @copydoc cudf::reduce(column_view const&, reduce_aggregation const&, data_type, * std::optional<std::reference_wrapper<scalar const>>, rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<scalar> reduce(column_view const& col, reduce_aggregation const& agg, data_type output_dtype, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace cudf::reduction::detail
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/segmented_reduction.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "reduction_operators.cuh" #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/exec_policy.hpp> #include <cub/device/device_segmented_reduce.cuh> #include <thrust/iterator/iterator_traits.h> #include <thrust/transform.h> namespace cudf { namespace reduction { namespace detail { /** * @brief Compute the specified simple reduction over each of the segments in the * input range of elements * * @tparam InputIterator Input iterator type * @tparam OffsetIterator Offset iterator type * @tparam OutputIterator Output iterator type * @tparam BinaryOp Binary operator used for reduce * @tparam OutputType The output type derived from the OutputIterator * * @param d_in Input data iterator * @param d_offset_begin Begin iterator to segment indices * @param d_offset_end End iterator to segment indices * @param d_out Output data iterator * @param binary_op The reduction operator * @param initial_value Initial value of the reduction * @param stream CUDA stream used for device memory operations and kernel launches * */ template <typename InputIterator, typename OffsetIterator, typename OutputIterator, typename BinaryOp, typename OutputType = typename thrust::iterator_value<OutputIterator>::type, typename std::enable_if_t<is_fixed_width<OutputType>() && !cudf::is_fixed_point<OutputType>()>* = nullptr> void segmented_reduce(InputIterator d_in, OffsetIterator d_offset_begin, OffsetIterator d_offset_end, OutputIterator d_out, BinaryOp binary_op, OutputType initial_value, rmm::cuda_stream_view stream) { auto const num_segments = static_cast<size_type>(std::distance(d_offset_begin, d_offset_end)) - 1; // Allocate temporary storage size_t temp_storage_bytes = 0; cub::DeviceSegmentedReduce::Reduce(nullptr, temp_storage_bytes, d_in, d_out, num_segments, d_offset_begin, d_offset_begin + 1, binary_op, initial_value, stream.value()); auto d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; // Run reduction cub::DeviceSegmentedReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, d_out, num_segments, d_offset_begin, d_offset_begin + 1, binary_op, initial_value, stream.value()); } template <typename InputIterator, typename OffsetIterator, typename OutputIterator, typename BinaryOp, typename OutputType = typename thrust::iterator_value<OutputIterator>::type, typename std::enable_if_t<!(is_fixed_width<OutputType>() && !cudf::is_fixed_point<OutputType>())>* = nullptr> void segmented_reduce(InputIterator, OffsetIterator, OffsetIterator, OutputIterator, BinaryOp, OutputType, rmm::cuda_stream_view) { CUDF_FAIL( "Unsupported data types called on segmented_reduce. Only numeric and chrono types are " "supported."); } /** * @brief Compute reduction by the compound operator (reduce and transform) * * The reduction operator must have an `intermediate::compute_result()` method. * This method performs reduction using binary operator `Op::Op` and calculates the * result to `OutputType` using `compute_result()` through the transform method. * * @tparam Op Reduction operator * @tparam InputIterator Input iterator type * @tparam OffsetIterator Offsets iterator type * @tparam OutputIterator Output iterator type * * @param d_in Input data iterator * @param d_offset_begin Begin iterator to segment indices * @param d_offset_end End iterator to segment indices * @param d_out Output data iterator * @param op The reduction operator * @param ddof Delta degrees of freedom used for standard deviation and variance * @param d_valid_counts Number of valid values per segment * @param stream CUDA stream used for device memory operations and kernel launches */ template <typename Op, typename InputIterator, typename OffsetIterator, typename OutputIterator> void segmented_reduce(InputIterator d_in, OffsetIterator d_offset_begin, OffsetIterator d_offset_end, OutputIterator d_out, op::compound_op<Op> op, size_type ddof, size_type* d_valid_counts, rmm::cuda_stream_view stream) { using OutputType = typename thrust::iterator_value<OutputIterator>::type; using IntermediateType = typename thrust::iterator_value<InputIterator>::type; auto num_segments = static_cast<size_type>(std::distance(d_offset_begin, d_offset_end)) - 1; auto const binary_op = op.get_binary_op(); auto const initial_value = op.template get_identity<IntermediateType>(); rmm::device_uvector<IntermediateType> intermediate_result{static_cast<std::size_t>(num_segments), stream}; // Allocate temporary storage size_t temp_storage_bytes = 0; cub::DeviceSegmentedReduce::Reduce(nullptr, temp_storage_bytes, d_in, intermediate_result.data(), num_segments, d_offset_begin, d_offset_begin + 1, binary_op, initial_value, stream.value()); auto d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; // Run reduction cub::DeviceSegmentedReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, intermediate_result.data(), num_segments, d_offset_begin, d_offset_begin + 1, binary_op, initial_value, stream.value()); // compute the result value from intermediate value in device thrust::transform( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_segments), d_out, [ir = intermediate_result.data(), op, d_valid_counts, ddof] __device__(auto idx) { auto const count = d_valid_counts[idx]; return count > 0 ? op.template compute_result<OutputType>(ir[idx], count, ddof) : OutputType{0}; }); } } // namespace detail } // namespace reduction } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/reduction_operators.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/detail/utilities/transform_unary_functions.cuh> #include <cudf/types.hpp> //for CUDF_HOST_DEVICE #include <cmath> #include <thrust/functional.h> namespace cudf { namespace reduction { namespace detail { // intermediate data structure to compute `var`, `std` template <typename ResultType> struct var_std { ResultType value; /// the value ResultType value_squared; /// the value of squared CUDF_HOST_DEVICE inline var_std(ResultType _value = 0, ResultType _value_squared = 0) : value(_value), value_squared(_value_squared){}; using this_t = var_std<ResultType>; CUDF_HOST_DEVICE inline this_t operator+(this_t const& rhs) const { return this_t((this->value + rhs.value), (this->value_squared + rhs.value_squared)); }; }; // transformer for `struct var_std` in order to compute `var`, `std` template <typename ResultType> struct transformer_var_std { using OutputType = var_std<ResultType>; CUDF_HOST_DEVICE inline OutputType operator()(ResultType const& value) { return OutputType(value, value * value); }; }; // ------------------------------------------------------------------------ // Definitions of device struct for reduction operation // all `op::xxx` must have `op` and `transformer` // `op` is used to compute the reduction at device // `transformer` is used to convert elements for computing the reduction at device. // By default `transformer` is static type conversion to ResultType. // In some cases, it could be square or abs or complex operations namespace op { /** * @brief Simple reduction operator CRTP Base class * * @tparam Derived operator with simple_op interface */ template <typename Derived> struct simple_op { /** * @brief Get binary operator functor for reduction * * @return binary operator functor object */ auto get_binary_op() { using binary_op = typename Derived::op; return binary_op{}; } /** * @brief Get transformer functor for transforming input column * which inturn is used by reduction binary operator * * @tparam ResultType output type for element transformer * * @return element transformer functor object */ template <typename ResultType> auto get_element_transformer() { using element_transformer = typename Derived::transformer<ResultType>; return element_transformer{}; } /** * @brief Get transformer functor for transforming input column pair iterator * which is used by reduction binary operator * * @tparam ResultType output type for element transformer * * @return element transformer functor object */ template <typename ResultType> auto get_null_replacing_element_transformer() { using element_transformer = typename Derived::transformer<ResultType>; return null_replacing_transformer<ResultType, element_transformer>{get_identity<ResultType>(), element_transformer{}}; } /** * @brief get identity value of type `T` for binary reduction operator * * @tparam T data type of identity value * * @return identity value */ template <typename T> constexpr T get_identity() { return Derived::op::template identity<T>(); } }; // `sum`, `product`, `sum_of_squares`, `min`, `max` are used at simple_reduction // interface is defined by CRTP class simple_op // operator for `sum` struct sum : public simple_op<sum> { using op = cudf::DeviceSum; template <typename ResultType> using transformer = thrust::identity<ResultType>; }; // operator for `product` struct product : public simple_op<product> { using op = cudf::DeviceProduct; template <typename ResultType> using transformer = thrust::identity<ResultType>; }; // operator for `sum_of_squares` struct sum_of_squares : public simple_op<sum_of_squares> { using op = cudf::DeviceSum; template <typename ResultType> using transformer = cudf::transformer_squared<ResultType>; }; // operator for `min` struct min : public simple_op<min> { using op = cudf::DeviceMin; template <typename ResultType> using transformer = thrust::identity<ResultType>; }; // operator for `max` struct max : public simple_op<max> { using op = cudf::DeviceMax; template <typename ResultType> using transformer = thrust::identity<ResultType>; }; /** * @brief Compound reduction operator CRTP Base class * This template class defines the interface for compound operators * In addition to interface defined by simple_op CRTP, this class defines * interface for final result transformation. * * @tparam Derived compound operators derived from compound_op */ template <typename Derived> struct compound_op : public simple_op<Derived> { /** * @copydoc simple_op<Derived>::template get_null_replacing_element_transformer<ResultType>() */ template <typename ResultType> auto get_null_replacing_element_transformer() { using element_transformer = typename Derived::transformer<ResultType>; using OutputType = typename Derived::intermediate<ResultType>::IntermediateType; return null_replacing_transformer<OutputType, element_transformer>{ simple_op<Derived>::template get_identity<OutputType>(), element_transformer{}}; } /** * @brief computes the transformed result from result of simple operator. * * @tparam ResultType output type of compound reduction operator * @tparam IntermediateType output type of simple reduction operator * @param input output of simple reduction as input for result transformation * @param count validity count * @param ddof `ddof` parameter used by variance and standard deviation * * @return transformed output result of compound operator */ template <typename ResultType, typename IntermediateType> CUDF_HOST_DEVICE inline static ResultType compute_result(IntermediateType const& input, cudf::size_type const& count, cudf::size_type const& ddof) { // Enforced interface return Derived::template intermediate<ResultType>::compute_result(input, count, ddof); } }; // `mean`, `variance`, `standard_deviation` are used at compound_reduction // compound_reduction requires intermediate::IntermediateType and // intermediate::compute_result IntermediateType is the intermediate data // structure type of a single reduction call, it is also used as OutputType of // cudf::reduction::detail::reduce at compound_reduction. compute_result // computes the final ResultType from the IntermediateType. // intermediate::compute_result method is enforced by CRTP base class compound_op // operator for `mean` struct mean : public compound_op<mean> { using op = cudf::DeviceSum; template <typename ResultType> using transformer = thrust::identity<ResultType>; template <typename ResultType> struct intermediate { using IntermediateType = ResultType; // sum value // compute `mean` from intermediate type `IntermediateType` CUDF_HOST_DEVICE inline static ResultType compute_result(IntermediateType const& input, cudf::size_type const& count, cudf::size_type const& ddof) { return (input / count); }; }; }; // operator for `variance` struct variance : public compound_op<variance> { using op = cudf::DeviceSum; template <typename ResultType> using transformer = cudf::reduction::detail::transformer_var_std<ResultType>; template <typename ResultType> struct intermediate { using IntermediateType = var_std<ResultType>; // with sum of value, and sum of squared value // compute `variance` from intermediate type `IntermediateType` CUDF_HOST_DEVICE inline static ResultType compute_result(IntermediateType const& input, cudf::size_type const& count, cudf::size_type const& ddof) { ResultType mean = input.value / count; ResultType asum = input.value_squared; cudf::size_type div = count - ddof; ResultType var = asum / div - ((mean * mean) * count) / div; return var; }; }; }; // operator for `standard deviation` struct standard_deviation : public compound_op<standard_deviation> { using op = cudf::DeviceSum; template <typename ResultType> using transformer = cudf::reduction::detail::transformer_var_std<ResultType>; template <typename ResultType> struct intermediate { using IntermediateType = var_std<ResultType>; // with sum of value, and sum of squared value // compute `standard deviation` from intermediate type `IntermediateType` CUDF_HOST_DEVICE inline static ResultType compute_result(IntermediateType const& input, cudf::size_type const& count, cudf::size_type const& ddof) { using intermediateOp = variance::template intermediate<ResultType>; ResultType var = intermediateOp::compute_result(input, count, ddof); return static_cast<ResultType>(std::sqrt(var)); }; }; }; } // namespace op } // namespace detail } // namespace reduction } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/histogram.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/table/table_view.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <memory> #include <optional> namespace cudf::reduction::detail { /** * @brief Compute the frequency for each distinct row in the input table. * * @param input The input table to compute histogram * @param partial_counts An optional column containing count for each row * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate memory of the returned objects * @return A pair of array contains the (stable-order) indices of the distinct rows in the input * table, and their corresponding distinct counts */ [[nodiscard]] std::pair<std::unique_ptr<rmm::device_uvector<size_type>>, std::unique_ptr<column>> compute_row_frequencies(table_view const& input, std::optional<column_view> const& partial_counts, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Create an empty histogram column. * * A histogram column is a structs column `STRUCT<T, int64_t>` where T is type of the input * values. * * @returns An empty histogram column */ [[nodiscard]] std::unique_ptr<column> make_empty_histogram_like(column_view const& values); } // namespace cudf::reduction::detail
0
rapidsai_public_repos/cudf/cpp/include/cudf/reduction
rapidsai_public_repos/cudf/cpp/include/cudf/reduction/detail/reduction.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "reduction_operators.cuh" #include <cudf/column/column_factories.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/exec_policy.hpp> #include <cub/device/device_reduce.cuh> #include <thrust/for_each.h> #include <thrust/iterator/iterator_traits.h> #include <optional> namespace cudf { namespace reduction { namespace detail { /** * @brief Compute the specified simple reduction over the input range of elements. * * @param[in] d_in the begin iterator * @param[in] num_items the number of items * @param[in] op the reduction operator * @param[in] init Optional initial value of the reduction * @param[in] stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate the returned scalar's device * memory * @returns Output scalar in device memory * * @tparam Op the reduction operator with device binary operator * @tparam InputIterator the input column iterator * @tparam OutputType the output type of reduction */ template <typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type, std::enable_if_t<is_fixed_width<OutputType>() && not cudf::is_fixed_point<OutputType>()>* = nullptr> std::unique_ptr<scalar> reduce(InputIterator d_in, cudf::size_type num_items, op::simple_op<Op> op, std::optional<OutputType> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const binary_op = op.get_binary_op(); auto const initial_value = init.value_or(op.template get_identity<OutputType>()); auto dev_result = rmm::device_scalar<OutputType>{initial_value, stream, mr}; // Allocate temporary storage rmm::device_buffer d_temp_storage; size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.data(), num_items, binary_op, initial_value, stream.value()); d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; // Run reduction cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.data(), num_items, binary_op, initial_value, stream.value()); // only for string_view, data is copied auto s = new cudf::scalar_type_t<OutputType>(std::move(dev_result), true, stream, mr); return std::unique_ptr<scalar>(s); } template <typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type, std::enable_if_t<is_fixed_point<OutputType>()>* = nullptr> std::unique_ptr<scalar> reduce(InputIterator d_in, cudf::size_type num_items, op::simple_op<Op> op, std::optional<OutputType> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL( "This function should never be called. fixed_point reduce should always go through the reduce " "for the corresponding device_storage_type_t"); } // @brief string_view specialization of simple reduction template <typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type, std::enable_if_t<std::is_same_v<OutputType, string_view>>* = nullptr> std::unique_ptr<scalar> reduce(InputIterator d_in, cudf::size_type num_items, op::simple_op<Op> op, std::optional<OutputType> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const binary_op = op.get_binary_op(); auto const initial_value = init.value_or(op.template get_identity<OutputType>()); auto dev_result = rmm::device_scalar<OutputType>{initial_value, stream}; // Allocate temporary storage rmm::device_buffer d_temp_storage; size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.data(), num_items, binary_op, initial_value, stream.value()); d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; // Run reduction cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, dev_result.data(), num_items, binary_op, initial_value, stream.value()); using ScalarType = cudf::scalar_type_t<OutputType>; auto s = new ScalarType(dev_result, true, stream, mr); // only for string_view, data is copied return std::unique_ptr<scalar>(s); } /** * @brief compute reduction by the compound operator (reduce and transform) * * @param[in] d_in the begin iterator * @param[in] num_items the number of items * @param[in] op the reduction operator * @param[in] valid_count Number of valid items * @param[in] ddof Delta degrees of freedom used for standard deviation and variance * @param[in] init Optional initial value of the reduction * @param[in] stream CUDA stream used for device memory operations and kernel launches * @param[in] mr Device memory resource used to allocate the returned scalar's device * memory * @returns Output scalar in device memory * * The reduction operator must have `intermediate::compute_result()` method. * This method performs reduction using binary operator `Op::Op` and transforms the * result to `OutputType` using `compute_result()` transform method. * * @tparam Op the reduction operator with device binary operator * @tparam InputIterator the input column iterator * @tparam OutputType the output type of reduction */ template <typename Op, typename InputIterator, typename OutputType, typename IntermediateType = typename thrust::iterator_value<InputIterator>::type> std::unique_ptr<scalar> reduce(InputIterator d_in, cudf::size_type num_items, op::compound_op<Op> op, cudf::size_type valid_count, cudf::size_type ddof, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const binary_op = op.get_binary_op(); auto const initial_value = op.template get_identity<IntermediateType>(); rmm::device_scalar<IntermediateType> intermediate_result{initial_value, stream}; // Allocate temporary storage rmm::device_buffer d_temp_storage; size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, intermediate_result.data(), num_items, binary_op, initial_value, stream.value()); d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; // Run reduction cub::DeviceReduce::Reduce(d_temp_storage.data(), temp_storage_bytes, d_in, intermediate_result.data(), num_items, binary_op, initial_value, stream.value()); // compute the result value from intermediate value in device using ScalarType = cudf::scalar_type_t<OutputType>; auto result = new ScalarType(OutputType{0}, true, stream, mr); thrust::for_each_n(rmm::exec_policy(stream), intermediate_result.data(), 1, [dres = result->data(), op, valid_count, ddof] __device__(auto i) { *dres = op.template compute_result<OutputType>(i, valid_count, ddof); }); return std::unique_ptr<scalar>(result); } } // namespace detail } // namespace reduction } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/parquet_metadata.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file parquet_metadata.hpp * @brief cuDF-IO freeform API */ #pragma once #include <cudf/io/types.hpp> #include <optional> #include <string_view> #include <variant> #include <vector> namespace cudf { namespace io { namespace parquet { /** * @brief Basic data types in Parquet, determines how data is physically stored */ enum class TypeKind : int8_t { UNDEFINED_TYPE = -1, // Undefined for non-leaf nodes BOOLEAN = 0, INT32 = 1, INT64 = 2, INT96 = 3, // Deprecated FLOAT = 4, DOUBLE = 5, BYTE_ARRAY = 6, FIXED_LEN_BYTE_ARRAY = 7, }; } // namespace parquet /** * @brief Schema of a parquet column, including the nested columns. */ struct parquet_column_schema { public: /** * @brief constructor * * @param name column name * @param type parquet type * @param children child columns (empty for non-nested types) */ parquet_column_schema(std::string_view name, parquet::TypeKind type, std::vector<parquet_column_schema> children) : _name{name}, _type_kind{type}, _children{std::move(children)} { } /** * @brief Returns parquet column name; can be empty. * * @return Column name */ [[nodiscard]] auto name() const { return _name; } /** * @brief Returns parquet type of the column. * * @return Column parquet type */ [[nodiscard]] auto type_kind() const { return _type_kind; } /** * @brief Returns schemas of all child columns. * * @return Children schemas */ [[nodiscard]] auto const& children() const& { return _children; } /** @copydoc children * Children array is moved out of the object (rvalues only). * */ [[nodiscard]] auto children() && { return std::move(_children); } /** * @brief Returns schema of the child with the given index. * * @param idx child index * * @return Child schema */ [[nodiscard]] auto const& child(int idx) const& { return children().at(idx); } /** @copydoc child * Child is moved out of the object (rvalues only). * */ [[nodiscard]] auto child(int idx) && { return std::move(children().at(idx)); } /** * @brief Returns the number of child columns. * * @return Children count */ [[nodiscard]] auto num_children() const { return children().size(); } private: std::string _name; // 3 types available: Physical, Converted, Logical. parquet::TypeKind _type_kind; // Physical std::vector<parquet_column_schema> _children; }; /** * @brief Schema of a parquet file. */ struct parquet_schema { public: /** * @brief constructor * * @param root_column_schema root column */ parquet_schema(parquet_column_schema root_column_schema) : _root{std::move(root_column_schema)} {} /** * @brief Returns the schema of the struct column that contains all columns as fields. * * @return Root column schema */ [[nodiscard]] auto const& root() const& { return _root; } /** @copydoc root * Root column schema is moved out of the object (rvalues only). * */ [[nodiscard]] auto root() && { return std::move(_root); } private: parquet_column_schema _root; }; /** * @brief Information about content of a parquet file. */ class parquet_metadata { public: /// Key-value metadata in the file footer. using key_value_metadata = std::unordered_map<std::string, std::string>; /** * @brief constructor * * @param schema parquet schema * @param num_rows number of rows * @param num_rowgroups number of row groups * @param file_metadata key-value metadata in the file footer */ parquet_metadata(parquet_schema schema, int64_t num_rows, size_type num_rowgroups, key_value_metadata file_metadata) : _schema{std::move(schema)}, _num_rows{num_rows}, _num_rowgroups{num_rowgroups}, _file_metadata{std::move(file_metadata)} { } /** * @brief Returns the parquet schema. * * @return parquet schema */ [[nodiscard]] auto const& schema() const { return _schema; } /** * @brief Returns the number of rows of the root column. * * If a file contains list columns, nested columns can have a different number of rows. * * @return Number of rows */ [[nodiscard]] auto num_rows() const { return _num_rows; } /** * @brief Returns the number of rowgroups in the file. * * @return Number of row groups */ [[nodiscard]] auto num_rowgroups() const { return _num_rowgroups; } /** * @brief Returns the Key value metadata in the file footer. * * @return Key value metadata as a map */ [[nodiscard]] auto const& metadata() const { return _file_metadata; } private: parquet_schema _schema; int64_t _num_rows; size_type _num_rowgroups; key_value_metadata _file_metadata; }; /** * @brief Reads metadata of parquet dataset. * * @ingroup io_readers * * @param src_info Dataset source * * @return parquet_metadata with parquet schema, number of rows, number of row groups and key-value * metadata. */ parquet_metadata read_parquet_metadata(source_info const& src_info); } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/orc_metadata.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file orc_metadata.hpp * @brief cuDF-IO freeform API */ #pragma once #include <cudf/io/orc_types.hpp> #include <cudf/io/types.hpp> #include <optional> #include <variant> #include <vector> namespace cudf { namespace io { /** * @brief Holds column names and buffers containing raw file-level and stripe-level statistics. * * The buffers can be parsed using a Protobuf parser. Alternatively, use `parsed_orc_statistics` to * get the statistics parsed into a libcudf representation. * * The `column_names` and `file_stats` members contain one element per column. The `stripes_stats` * contains one element per stripe, where each element contains column statistics for each column. */ struct raw_orc_statistics { std::vector<std::string> column_names; ///< Column names std::vector<std::string> file_stats; ///< File-level statistics for each column std::vector<std::vector<std::string>> stripes_stats; ///< Stripe-level statistics for each column }; /** * @brief Reads file-level and stripe-level statistics of ORC dataset. * * @ingroup io_readers * * The following code snippet demonstrates how to read statistics of a dataset * from a file: * @code * auto result = cudf::read_raw_orc_statistics(cudf::source_info("dataset.orc")); * @endcode * * @param src_info Dataset source * * @return Column names and encoded ORC statistics */ raw_orc_statistics read_raw_orc_statistics(source_info const& src_info); /** * @brief Monostate type alias for the statistics variant. */ using no_statistics = std::monostate; /** * @brief Base class for column statistics that include optional minimum and maximum. * * Includes accessors for the minimum and maximum values. */ template <typename T> struct minmax_statistics { std::optional<T> minimum; ///< Minimum value std::optional<T> maximum; ///< Maximum value }; /** * @brief Base class for column statistics that include an optional sum. * * Includes accessors for the sum value. */ template <typename T> struct sum_statistics { std::optional<T> sum; ///< Sum of values in column }; /** * @brief Statistics for integral columns. */ struct integer_statistics : minmax_statistics<int64_t>, sum_statistics<int64_t> {}; /** * @brief Statistics for floating point columns. */ struct double_statistics : minmax_statistics<double>, sum_statistics<double> {}; /** * @brief Statistics for string columns. * * The `minimum` and `maximum` are the first and last elements, respectively, in lexicographical * order. The `sum` is the total length of elements in the column. * Note: According to ORC specs, the sum should be signed, but pyarrow uses unsigned value */ struct string_statistics : minmax_statistics<std::string>, sum_statistics<int64_t> {}; /** * @brief Statistics for boolean columns. * * The `count` array contains the count of `true` values. */ struct bucket_statistics { std::vector<uint64_t> count; ///< count of `true` values }; /** * @brief Statistics for decimal columns. */ struct decimal_statistics : minmax_statistics<std::string>, sum_statistics<std::string> {}; /** * @brief Statistics for date(time) columns. */ using date_statistics = minmax_statistics<int32_t>; /** * @brief Statistics for binary columns. * * The `sum` is the total number of bytes across all elements. */ using binary_statistics = sum_statistics<int64_t>; /** * @brief Statistics for timestamp columns. * * The `minimum` and `maximum` min/max elements in the column, as the number of milliseconds since * the UNIX epoch. The `minimum_utc` and `maximum_utc` are the same values adjusted to UTC. */ struct timestamp_statistics : minmax_statistics<int64_t> { std::optional<int64_t> minimum_utc; ///< minimum in milliseconds std::optional<int64_t> maximum_utc; ///< maximum in milliseconds std::optional<uint32_t> minimum_nanos; ///< nanoseconds part of the minimum std::optional<uint32_t> maximum_nanos; ///< nanoseconds part of the maximum }; namespace orc { // forward declare the type that ProtobufReader uses. The `cudf::io::column_statistics` objects, // returned from `read_parsed_orc_statistics`, are constructed from // `cudf::io::orc::column_statistics` objects that `ProtobufReader` initializes. struct column_statistics; } // namespace orc /** * @brief Contains per-column ORC statistics. * * All columns can have the `number_of_values` statistics. Depending on the data type, a column can * have additional statistics, accessible through `type_specific_stats` accessor. */ struct column_statistics { std::optional<uint64_t> number_of_values; ///< number of statistics std::optional<bool> has_null; ///< column has any nulls std::variant<no_statistics, integer_statistics, double_statistics, string_statistics, bucket_statistics, decimal_statistics, date_statistics, binary_statistics, timestamp_statistics> type_specific_stats; ///< type-specific statistics /** * @brief Construct a new column statistics object * * @param detail_statistics The statistics to initialize the object with */ column_statistics(orc::column_statistics&& detail_statistics); }; /** * @brief Holds column names and parsed file-level and stripe-level statistics. * * The `column_names` and `file_stats` members contain one element per column. The `stripes_stats` * member contains one element per stripe, where each element contains column statistics for each * column. */ struct parsed_orc_statistics { std::vector<std::string> column_names; ///< column names std::vector<column_statistics> file_stats; ///< file-level statistics std::vector<std::vector<column_statistics>> stripes_stats; ///< stripe-level statistics }; /** * @brief Reads file-level and stripe-level statistics of ORC dataset. * * @ingroup io_readers * * @param src_info Dataset source * * @return Column names and decoded ORC statistics */ parsed_orc_statistics read_parsed_orc_statistics(source_info const& src_info); /** * @brief Schema of an ORC column, including the nested columns. */ struct orc_column_schema { public: /** * @brief constructor * * @param name column name * @param type ORC type * @param children child columns (empty for non-nested types) */ orc_column_schema(std::string_view name, orc::TypeKind type, std::vector<orc_column_schema> children) : _name{name}, _type_kind{type}, _children{std::move(children)} { } /** * @brief Returns ORC column name; can be empty. * * @return Column name */ [[nodiscard]] auto name() const { return _name; } /** * @brief Returns ORC type of the column. * * @return Column ORC type */ [[nodiscard]] auto type_kind() const { return _type_kind; } /** * @brief Returns schemas of all child columns. * * @return Children schemas */ [[nodiscard]] auto const& children() const& { return _children; } /** @copydoc children * Children array is moved out of the object (rvalues only). * */ [[nodiscard]] auto children() && { return std::move(_children); } /** * @brief Returns schema of the child with the given index. * * @param idx child index * * @return Child schema */ [[nodiscard]] auto const& child(int idx) const& { return children().at(idx); } /** @copydoc child * Child is moved out of the object (rvalues only). * */ [[nodiscard]] auto child(int idx) && { return std::move(children().at(idx)); } /** * @brief Returns the number of child columns. * * @return Children count */ [[nodiscard]] auto num_children() const { return children().size(); } private: std::string _name; orc::TypeKind _type_kind; std::vector<orc_column_schema> _children; }; /** * @brief Schema of an ORC file. */ struct orc_schema { public: /** * @brief constructor * * @param root_column_schema root column */ orc_schema(orc_column_schema root_column_schema) : _root{std::move(root_column_schema)} {} /** * @brief Returns the schema of the struct column that contains all columns as fields. * * @return Root column schema */ [[nodiscard]] auto const& root() const& { return _root; } /** @copydoc root * Root column schema is moved out of the object (rvalues only). * */ [[nodiscard]] auto root() && { return std::move(_root); } private: orc_column_schema _root; }; /** * @brief Information about content of an ORC file. */ class orc_metadata { public: /** * @brief constructor * * @param schema ORC schema * @param num_rows number of rows * @param num_stripes number of stripes */ orc_metadata(orc_schema schema, size_type num_rows, size_type num_stripes) : _schema{std::move(schema)}, _num_rows{num_rows}, _num_stripes{num_stripes} { } /** * @brief Returns the ORC schema. * * @return ORC schema */ [[nodiscard]] auto const& schema() const { return _schema; } ///< Number of rows in the root column; can vary for nested columns /** * @brief Returns the number of rows of the root column. * * If a file contains list columns, nested columns can have a different number of rows. * * @return Number of rows */ [[nodiscard]] auto num_rows() const { return _num_rows; } /** * @brief Returns the number of stripes in the file. * * @return Number of stripes */ [[nodiscard]] auto num_stripes() const { return _num_stripes; } private: orc_schema _schema; size_type _num_rows; size_type _num_stripes; }; /** * @brief Reads metadata of ORC dataset. * * @ingroup io_readers * * @param src_info Dataset source * * @return orc_metadata with ORC schema, number of rows and number of stripes. */ orc_metadata read_orc_metadata(source_info const& src_info); } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/types.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file types.hpp * @brief cuDF-IO API type definitions */ #pragma once #include <cudf/table/table.hpp> #include <cudf/types.hpp> #include <cudf/utilities/span.hpp> #include <map> #include <memory> #include <optional> #include <string> #include <unordered_map> #include <vector> namespace cudf { //! IO interfaces namespace io { class data_sink; class datasource; } // namespace io } // namespace cudf //! cuDF interfaces namespace cudf { //! IO interfaces namespace io { /** * @brief Compression algorithms */ enum class compression_type { NONE, ///< No compression AUTO, ///< Automatically detect or select compression format SNAPPY, ///< Snappy format, using byte-oriented LZ77 GZIP, ///< GZIP format, using DEFLATE algorithm BZIP2, ///< BZIP2 format, using Burrows-Wheeler transform BROTLI, ///< BROTLI format, using LZ77 + Huffman + 2nd order context modeling ZIP, ///< ZIP format, using DEFLATE algorithm XZ, ///< XZ format, using LZMA(2) algorithm ZLIB, ///< ZLIB format, using DEFLATE algorithm LZ4, ///< LZ4 format, using LZ77 LZO, ///< Lempel–Ziv–Oberhumer format ZSTD ///< Zstandard format }; /** * @brief Data source or destination types */ enum class io_type { FILEPATH, ///< Input/output is a file path HOST_BUFFER, ///< Input/output is a buffer in host memory DEVICE_BUFFER, ///< Input/output is a buffer in device memory VOID, ///< Input/output is nothing. No work is done. Useful for benchmarking USER_IMPLEMENTED, ///< Input/output is handled by a custom user class }; /** * @brief Behavior when handling quotations in field data */ enum class quote_style { MINIMAL, ///< Quote only fields which contain special characters ALL, ///< Quote all fields NONNUMERIC, ///< Quote all non-numeric fields NONE ///< Never quote fields; disable quotation parsing }; /** * @brief Column statistics granularity type for parquet/orc writers */ enum statistics_freq { STATISTICS_NONE = 0, ///< No column statistics STATISTICS_ROWGROUP = 1, ///< Per-Rowgroup column statistics STATISTICS_PAGE = 2, ///< Per-page column statistics STATISTICS_COLUMN = 3, ///< Full column and offset indices. Implies STATISTICS_ROWGROUP }; /** * @brief Statistics about compression performed by a writer. */ class writer_compression_statistics { public: /** * @brief Default constructor */ writer_compression_statistics() = default; /** * @brief Constructor with initial values. * * @param num_compressed_bytes The number of bytes that were successfully compressed * @param num_failed_bytes The number of bytes that failed to compress * @param num_skipped_bytes The number of bytes that were skipped during compression * @param num_compressed_output_bytes The number of bytes in the compressed output */ writer_compression_statistics(size_t num_compressed_bytes, size_t num_failed_bytes, size_t num_skipped_bytes, size_t num_compressed_output_bytes) : _num_compressed_bytes(num_compressed_bytes), _num_failed_bytes(num_failed_bytes), _num_skipped_bytes(num_skipped_bytes), _num_compressed_output_bytes(num_compressed_output_bytes) { } /** * @brief Adds the values from another `writer_compression_statistics` object. * * @param other The other writer_compression_statistics object * @return writer_compression_statistics& Reference to this object */ writer_compression_statistics& operator+=(writer_compression_statistics const& other) noexcept { _num_compressed_bytes += other._num_compressed_bytes; _num_failed_bytes += other._num_failed_bytes; _num_skipped_bytes += other._num_skipped_bytes; _num_compressed_output_bytes += other._num_compressed_output_bytes; return *this; } /** * @brief Returns the number of bytes in blocks that were successfully compressed. * * This is the number of bytes that were actually compressed, not the size of the compressed * output. * * @return size_t The number of bytes that were successfully compressed */ [[nodiscard]] auto num_compressed_bytes() const noexcept { return _num_compressed_bytes; } /** * @brief Returns the number of bytes in blocks that failed to compress. * * @return size_t The number of bytes that failed to compress */ [[nodiscard]] auto num_failed_bytes() const noexcept { return _num_failed_bytes; } /** * @brief Returns the number of bytes in blocks that were skipped during compression. * * @return size_t The number of bytes that were skipped during compression */ [[nodiscard]] auto num_skipped_bytes() const noexcept { return _num_skipped_bytes; } /** * @brief Returns the total size of compression inputs. * * @return size_t The total size of compression inputs */ [[nodiscard]] auto num_total_input_bytes() const noexcept { return num_compressed_bytes() + num_failed_bytes() + num_skipped_bytes(); } /** * @brief Returns the compression ratio for the successfully compressed blocks. * * Returns nan if there were no successfully compressed blocks. * * @return double The ratio between the size of the compression inputs and the size of the * compressed output. */ [[nodiscard]] auto compression_ratio() const noexcept { return static_cast<double>(num_compressed_bytes()) / _num_compressed_output_bytes; } private: std::size_t _num_compressed_bytes = 0; ///< The number of bytes that were successfully compressed std::size_t _num_failed_bytes = 0; ///< The number of bytes that failed to compress std::size_t _num_skipped_bytes = 0; ///< The number of bytes that were skipped during compression std::size_t _num_compressed_output_bytes = 0; ///< The number of bytes in the compressed output }; /** * @brief Control use of dictionary encoding for parquet writer */ enum dictionary_policy { NEVER = 0, ///< Never use dictionary encoding ADAPTIVE = 1, ///< Use dictionary when it will not impact compression ALWAYS = 2 ///< Use dictionary regardless of impact on compression }; /** * @brief Detailed name (and optionally nullability) information for output columns. * * The hierarchy of children matches the hierarchy of children in the output * cudf columns. */ struct column_name_info { std::string name; ///< Column name std::optional<bool> is_nullable; ///< Column nullability std::vector<column_name_info> children; ///< Child column names /** * @brief Construct a column name info with a name, optional nullabilty, and no children * * @param _name Column name * @param _is_nullable True if column is nullable */ column_name_info(std::string const& _name, std::optional<bool> _is_nullable = std::nullopt) : name(_name), is_nullable(_is_nullable) { } column_name_info() = default; }; /** * @brief Table metadata returned by IO readers. */ struct table_metadata { std::vector<column_name_info> schema_info; //!< Detailed name information for the entire output hierarchy std::map<std::string, std::string> user_data; //!< Format-dependent metadata of the first input //!< file as key-values pairs (deprecated) std::vector<std::unordered_map<std::string, std::string>> per_file_user_data; //!< Per file format-dependent metadata as key-values pairs }; /** * @brief Table with table metadata used by io readers to return the metadata by value */ struct table_with_metadata { std::unique_ptr<table> tbl; //!< Table table_metadata metadata; //!< Table metadata }; /** * @brief Non-owning view of a host memory buffer * * @deprecated Since 23.04 * * Used to describe buffer input in `source_info` objects. */ struct host_buffer { // TODO: to be replaced by `host_span` char const* data = nullptr; //!< Pointer to the buffer size_t size = 0; //!< Size of the buffer host_buffer() = default; /** * @brief Construct a new host buffer object * * @param data Pointer to the buffer * @param size Size of the buffer */ host_buffer(char const* data, size_t size) : data(data), size(size) {} }; /** * @brief Returns `true` if the type is byte-like, meaning it is reasonable to pass as a pointer to * bytes. * * @tparam T The representation type * @return `true` if the type is considered a byte-like type */ template <typename T> constexpr inline auto is_byte_like_type() { using non_cv_T = std::remove_cv_t<T>; return std::is_same_v<non_cv_T, int8_t> || std::is_same_v<non_cv_T, char> || std::is_same_v<non_cv_T, uint8_t> || std::is_same_v<non_cv_T, unsigned char> || std::is_same_v<non_cv_T, std::byte>; } /** * @brief Source information for read interfaces */ struct source_info { source_info() = default; /** * @brief Construct a new source info object for multiple files * * @param file_paths Input files paths */ explicit source_info(std::vector<std::string> const& file_paths) : _type(io_type::FILEPATH), _filepaths(file_paths) { } /** * @brief Construct a new source info object for a single file * * @param file_path Single input file */ explicit source_info(std::string const& file_path) : _type(io_type::FILEPATH), _filepaths({file_path}) { } /** * @brief Construct a new source info object for multiple buffers in host memory * * @deprecated Since 23.04 * * @param host_buffers Input buffers in host memory */ explicit source_info(std::vector<host_buffer> const& host_buffers) : _type(io_type::HOST_BUFFER) { _host_buffers.reserve(host_buffers.size()); std::transform(host_buffers.begin(), host_buffers.end(), std::back_inserter(_host_buffers), [](auto const hb) { return cudf::host_span<std::byte const>{ reinterpret_cast<std::byte const*>(hb.data), hb.size}; }); } /** * @brief Construct a new source info object for a single buffer * * @deprecated Since 23.04 * * @param host_data Input buffer in host memory * @param size Size of the buffer */ explicit source_info(char const* host_data, size_t size) : _type(io_type::HOST_BUFFER), _host_buffers( {cudf::host_span<std::byte const>(reinterpret_cast<std::byte const*>(host_data), size)}) { } /** * @brief Construct a new source info object for multiple buffers in host memory * * @param host_buffers Input buffers in host memory */ template <typename T, CUDF_ENABLE_IF(is_byte_like_type<std::remove_cv_t<T>>())> explicit source_info(cudf::host_span<cudf::host_span<T>> const host_buffers) : _type(io_type::HOST_BUFFER) { if constexpr (not std::is_same_v<std::remove_cv_t<T>, std::byte>) { _host_buffers.reserve(host_buffers.size()); std::transform(host_buffers.begin(), host_buffers.end(), std::back_inserter(_host_buffers), [](auto const s) { return cudf::host_span<std::byte const>{ reinterpret_cast<std::byte const*>(s.data()), s.size()}; }); } else { _host_buffers.assign(host_buffers.begin(), host_buffers.end()); } } /** * @brief Construct a new source info object for a single buffer * * @param host_data Input buffer in host memory */ template <typename T, CUDF_ENABLE_IF(is_byte_like_type<std::remove_cv_t<T>>())> explicit source_info(cudf::host_span<T> host_data) : _type(io_type::HOST_BUFFER), _host_buffers{cudf::host_span<std::byte const>( reinterpret_cast<std::byte const*>(host_data.data()), host_data.size())} { } /** * @brief Construct a new source info object for multiple buffers in device memory * * @param device_buffers Input buffers in device memory */ explicit source_info(cudf::host_span<cudf::device_span<std::byte const>> device_buffers) : _type(io_type::DEVICE_BUFFER), _device_buffers(device_buffers.begin(), device_buffers.end()) { } /** * @brief Construct a new source info object from a device buffer * * @param d_buffer Input buffer in device memory */ explicit source_info(cudf::device_span<std::byte const> d_buffer) : _type(io_type::DEVICE_BUFFER), _device_buffers({{d_buffer}}) { } /** * @brief Construct a new source info object for multiple user-implemented sources * * @param sources User-implemented input sources */ explicit source_info(std::vector<cudf::io::datasource*> const& sources) : _type(io_type::USER_IMPLEMENTED), _user_sources(sources) { } /** * @brief Construct a new source info object for a single user-implemented source * * @param source Single user-implemented Input source */ explicit source_info(cudf::io::datasource* source) : _type(io_type::USER_IMPLEMENTED), _user_sources({source}) { } /** * @brief Get the type of the input * * @return The type of the input */ [[nodiscard]] auto type() const { return _type; } /** * @brief Get the filepaths of the input * * @return The filepaths of the input */ [[nodiscard]] auto const& filepaths() const { return _filepaths; } /** * @brief Get the host buffers of the input * * @return The host buffers of the input */ [[nodiscard]] auto const& host_buffers() const { return _host_buffers; } /** * @brief Get the device buffers of the input * * @return The device buffers of the input */ [[nodiscard]] auto const& device_buffers() const { return _device_buffers; } /** * @brief Get the user sources of the input * * @return The user sources of the input */ [[nodiscard]] auto const& user_sources() const { return _user_sources; } private: io_type _type = io_type::VOID; std::vector<std::string> _filepaths; std::vector<cudf::host_span<std::byte const>> _host_buffers; std::vector<cudf::device_span<std::byte const>> _device_buffers; std::vector<cudf::io::datasource*> _user_sources; }; /** * @brief Destination information for write interfaces */ struct sink_info { sink_info() = default; /** * @brief Construct a new sink info object * * @param num_sinks Number of sinks */ sink_info(size_t num_sinks) : _num_sinks(num_sinks) {} /** * @brief Construct a new sink info object for multiple files * * @param file_paths Output files paths */ explicit sink_info(std::vector<std::string> const& file_paths) : _type(io_type::FILEPATH), _num_sinks(file_paths.size()), _filepaths(file_paths) { } /** * @brief Construct a new sink info object for a single file * * @param file_path Single output file path */ explicit sink_info(std::string const& file_path) : _type(io_type::FILEPATH), _filepaths({file_path}) { } /** * @brief Construct a new sink info object for multiple host buffers * * @param buffers Output host buffers */ explicit sink_info(std::vector<std::vector<char>*> const& buffers) : _type(io_type::HOST_BUFFER), _num_sinks(buffers.size()), _buffers(buffers) { } /** * @brief Construct a new sink info object for a single host buffer * * @param buffer Single output host buffer */ explicit sink_info(std::vector<char>* buffer) : _type(io_type::HOST_BUFFER), _buffers({buffer}) {} /** * @brief Construct a new sink info object for multiple user-implemented sinks * * @param user_sinks Output user-implemented sinks */ explicit sink_info(std::vector<cudf::io::data_sink*> const& user_sinks) : _type(io_type::USER_IMPLEMENTED), _num_sinks(user_sinks.size()), _user_sinks(user_sinks) { } /** * @brief Construct a new sink info object for a single user-implemented sink * * @param user_sink Single output user-implemented sink */ explicit sink_info(class cudf::io::data_sink* user_sink) : _type(io_type::USER_IMPLEMENTED), _user_sinks({user_sink}) { } /** * @brief Get the type of the input * * @return The type of the input */ [[nodiscard]] auto type() const { return _type; } /** * @brief Get the number of sinks * * @return The number of sinks */ [[nodiscard]] auto num_sinks() const { return _num_sinks; } /** * @brief Get the filepaths of the input * * @return The filepaths of the input */ [[nodiscard]] auto const& filepaths() const { return _filepaths; } /** * @brief Get the host buffers of the input * * @return The host buffers of the input */ [[nodiscard]] auto const& buffers() const { return _buffers; } /** * @brief Get the user sinks of the input * * @return The user sinks of the input */ [[nodiscard]] auto const& user_sinks() const { return _user_sinks; } private: io_type _type = io_type::VOID; size_t _num_sinks = 1; std::vector<std::string> _filepaths; std::vector<std::vector<char>*> _buffers; std::vector<cudf::io::data_sink*> _user_sinks; }; class table_input_metadata; /** * @brief Metadata for a column */ class column_in_metadata { friend table_input_metadata; std::string _name = ""; std::optional<bool> _nullable; bool _list_column_is_map = false; bool _use_int96_timestamp = false; bool _output_as_binary = false; std::optional<uint8_t> _decimal_precision; std::optional<int32_t> _parquet_field_id; std::vector<column_in_metadata> children; public: column_in_metadata() = default; /** * @brief Construct a new column in metadata object * * @param name Column name */ column_in_metadata(std::string_view name) : _name{name} {} /** * @brief Add the children metadata of this column * * @param child The children metadata of this column to add * @return this for chaining */ column_in_metadata& add_child(column_in_metadata const& child) { children.push_back(child); return *this; } /** * @brief Set the name of this column * * @param name Name of the column * @return this for chaining */ column_in_metadata& set_name(std::string const& name) noexcept { _name = name; return *this; } /** * @brief Set the nullability of this column * * @param nullable Whether this column is nullable * @return this for chaining */ column_in_metadata& set_nullability(bool nullable) noexcept { _nullable = nullable; return *this; } /** * @brief Specify that this list column should be encoded as a map in the written file * * The column must have the structure list<struct<key, value>>. This option is invalid otherwise * * @return this for chaining */ column_in_metadata& set_list_column_as_map() noexcept { _list_column_is_map = true; return *this; } /** * @brief Specifies whether this timestamp column should be encoded using the deprecated int96 * physical type. Only valid for the following column types: * timestamp_s, timestamp_ms, timestamp_us, timestamp_ns * * @param req True = use int96 physical type. False = use int64 physical type * @return this for chaining */ column_in_metadata& set_int96_timestamps(bool req) noexcept { _use_int96_timestamp = req; return *this; } /** * @brief Set the decimal precision of this column. Only valid if this column is a decimal * (fixed-point) type * * @param precision The integer precision to set for this decimal column * @return this for chaining */ column_in_metadata& set_decimal_precision(uint8_t precision) noexcept { _decimal_precision = precision; return *this; } /** * @brief Set the parquet field id of this column. * * @param field_id The parquet field id to set * @return this for chaining */ column_in_metadata& set_parquet_field_id(int32_t field_id) noexcept { _parquet_field_id = field_id; return *this; } /** * @brief Specifies whether this column should be written as binary or string data * Only valid for the following column types: * string * * @param binary True = use binary data type. False = use string data type * @return this for chaining */ column_in_metadata& set_output_as_binary(bool binary) noexcept { _output_as_binary = binary; return *this; } /** * @brief Get reference to a child of this column * * @param i Index of the child to get * @return this for chaining */ column_in_metadata& child(size_type i) noexcept { return children[i]; } /** * @brief Get const reference to a child of this column * * @param i Index of the child to get * @return this for chaining */ [[nodiscard]] column_in_metadata const& child(size_type i) const noexcept { return children[i]; } /** * @brief Get the name of this column * * @return The name of this column */ [[nodiscard]] std::string get_name() const noexcept { return _name; } /** * @brief Get whether nullability has been explicitly set for this column. * * @return Boolean indicating whether nullability has been explicitly set for this column */ [[nodiscard]] bool is_nullability_defined() const noexcept { return _nullable.has_value(); } /** * @brief Gets the explicitly set nullability for this column. * * @throws If nullability is not explicitly defined for this column. * Check using `is_nullability_defined()` first. * @return Boolean indicating whether this column is nullable */ [[nodiscard]] bool nullable() const { return _nullable.value(); } /** * @brief If this is the metadata of a list column, returns whether it is to be encoded as a map. * * @return Boolean indicating whether this column is to be encoded as a map */ [[nodiscard]] bool is_map() const noexcept { return _list_column_is_map; } /** * @brief Get whether to encode this timestamp column using deprecated int96 physical type * * @return Boolean indicating whether to encode this timestamp column using deprecated int96 * physical type */ [[nodiscard]] bool is_enabled_int96_timestamps() const noexcept { return _use_int96_timestamp; } /** * @brief Get whether precision has been set for this decimal column * * @return Boolean indicating whether precision has been set for this decimal column */ [[nodiscard]] bool is_decimal_precision_set() const noexcept { return _decimal_precision.has_value(); } /** * @brief Get the decimal precision that was set for this column. * * @throws If decimal precision was not set for this column. * Check using `is_decimal_precision_set()` first. * @return The decimal precision that was set for this column */ [[nodiscard]] uint8_t get_decimal_precision() const { return _decimal_precision.value(); } /** * @brief Get whether parquet field id has been set for this column. * * @return Boolean indicating whether parquet field id has been set for this column */ [[nodiscard]] bool is_parquet_field_id_set() const noexcept { return _parquet_field_id.has_value(); } /** * @brief Get the parquet field id that was set for this column. * * @throws If parquet field id was not set for this column. * Check using `is_parquet_field_id_set()` first. * @return The parquet field id that was set for this column */ [[nodiscard]] int32_t get_parquet_field_id() const { return _parquet_field_id.value(); } /** * @brief Get the number of children of this column * * @return The number of children of this column */ [[nodiscard]] size_type num_children() const noexcept { return children.size(); } /** * @brief Get whether to encode this column as binary or string data * * @return Boolean indicating whether to encode this column as binary data */ [[nodiscard]] bool is_enabled_output_as_binary() const noexcept { return _output_as_binary; } }; /** * @brief Metadata for a table */ class table_input_metadata { public: table_input_metadata() = default; // Required by cython /** * @brief Construct a new table_input_metadata from a table_view. * * The constructed table_input_metadata has the same structure as the passed table_view * * @param table The table_view to construct metadata for */ explicit table_input_metadata(table_view const& table); /** * @brief Construct a new table_input_metadata from a table_metadata object. * * The constructed table_input_metadata has the same structure, column names and nullability as * the passed table_metadata. * * @param metadata The table_metadata to construct table_intput_metadata for */ explicit table_input_metadata(table_metadata const& metadata); std::vector<column_in_metadata> column_metadata; //!< List of column metadata }; /** * @brief Information used while writing partitioned datasets * * This information defines the slice of an input table to write to file. In partitioned dataset * writing, one partition_info struct defines one partition and corresponds to one output file */ struct partition_info { size_type start_row; //!< The start row of the partition size_type num_rows; //!< The number of rows in the partition partition_info() = default; /** * @brief Construct a new partition_info * * @param start_row The start row of the partition * @param num_rows The number of rows in the partition */ partition_info(size_type start_row, size_type num_rows) : start_row(start_row), num_rows(num_rows) { } }; /** * @brief schema element for reader * */ class reader_column_schema { // Whether to read binary data as a string column bool _convert_binary_to_strings{true}; std::vector<reader_column_schema> children; public: reader_column_schema() = default; /** * @brief Construct a new reader column schema object * * @param number_of_children number of child schema objects to default construct */ reader_column_schema(size_type number_of_children) { children.resize(number_of_children); } /** * @brief Construct a new reader column schema object with a span defining the children * * @param child_span span of child schema objects */ reader_column_schema(host_span<reader_column_schema> const& child_span) { children.assign(child_span.begin(), child_span.end()); } /** * @brief Add the children metadata of this column * * @param child The children metadata of this column to add * @return this for chaining */ reader_column_schema& add_child(reader_column_schema const& child) { children.push_back(child); return *this; } /** * @brief Get reference to a child of this column * * @param i Index of the child to get * @return this for chaining */ [[nodiscard]] reader_column_schema& child(size_type i) { return children[i]; } /** * @brief Get const reference to a child of this column * * @param i Index of the child to get * @return this for chaining */ [[nodiscard]] reader_column_schema const& child(size_type i) const { return children[i]; } /** * @brief Specifies whether this column should be written as binary or string data * Only valid for the following column types: * string, list<int8> * * @param convert_to_string True = convert binary to strings False = return binary * @return this for chaining */ reader_column_schema& set_convert_binary_to_strings(bool convert_to_string) { _convert_binary_to_strings = convert_to_string; return *this; } /** * @brief Get whether to encode this column as binary or string data * * @return Boolean indicating whether to encode this column as binary data */ [[nodiscard]] bool is_enabled_convert_binary_to_strings() const { return _convert_binary_to_strings; } /** * @brief Get the number of child objects * * @return number of children */ [[nodiscard]] size_t get_num_children() const { return children.size(); } }; } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/json.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "types.hpp" #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <map> #include <string> #include <variant> #include <vector> namespace cudf { namespace io { /** * @addtogroup io_readers * @{ * @file */ class json_reader_options_builder; /** * @brief Allows specifying the target types for nested JSON data via json_reader_options' * `set_dtypes` method. */ struct schema_element { /** * @brief The type that this column should be converted to */ data_type type; /** * @brief Allows specifying this column's child columns target type */ std::map<std::string, schema_element> child_types; }; /** * @brief Control the error recovery behavior of the json parser */ enum class json_recovery_mode_t { FAIL, ///< Does not recover from an error when encountering an invalid format RECOVER_WITH_NULL ///< Recovers from an error, replacing invalid records with null }; /** * @brief Input arguments to the `read_json` interface. * * Available parameters are closely patterned after PANDAS' `read_json` API. * Not all parameters are supported. If the matching PANDAS' parameter * has a default value of `None`, then a default value of `-1` or `0` may be * used as the equivalent. * * Parameters in PANDAS that are unavailable or in cudf: * * | Name | Description | * | -------------------- | ------------------------------------------------ | * | `orient` | currently fixed-format | * | `typ` | data is always returned as a cudf::table | * | `convert_axes` | use column functions for axes operations instead | * | `convert_dates` | dates are detected automatically | * | `keep_default_dates` | dates are detected automatically | * | `numpy` | data is always returned as a cudf::table | * | `precise_float` | there is only one converter | * | `date_unit` | only millisecond units are supported | * | `encoding` | only ASCII-encoded data is supported | * | `chunksize` | use `byte_range_xxx` for chunking instead | */ class json_reader_options { source_info _source; // Data types of the column; empty to infer dtypes std::variant<std::vector<data_type>, std::map<std::string, data_type>, std::map<std::string, schema_element>> _dtypes; // Specify the compression format of the source or infer from file extension compression_type _compression = compression_type::AUTO; // Read the file as a json object per line bool _lines = false; // Bytes to skip from the start size_t _byte_range_offset = 0; // Bytes to read; always reads complete rows size_t _byte_range_size = 0; // Whether to parse dates as DD/MM versus MM/DD bool _dayfirst = false; // Whether to use the legacy reader bool _legacy = false; // Whether to keep the quote characters of string values bool _keep_quotes = false; // Whether to recover after an invalid JSON line json_recovery_mode_t _recovery_mode = json_recovery_mode_t::FAIL; /** * @brief Constructor from source info. * * @param src source information used to read parquet file */ explicit json_reader_options(source_info src) : _source{std::move(src)} {} friend json_reader_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ json_reader_options() = default; /** * @brief create json_reader_options_builder which will build json_reader_options. * * @param src source information used to read json file * @returns builder to build the options */ static json_reader_options_builder builder(source_info src); /** * @brief Returns source info. * * @returns Source info */ [[nodiscard]] source_info const& get_source() const { return _source; } /** * @brief Returns data types of the columns. * * @returns Data types of the columns */ std::variant<std::vector<data_type>, std::map<std::string, data_type>, std::map<std::string, schema_element>> const& get_dtypes() const { return _dtypes; } /** * @brief Returns compression format of the source. * * @return Compression format of the source */ compression_type get_compression() const { return _compression; } /** * @brief Returns number of bytes to skip from source start. * * @return Number of bytes to skip from source start */ size_t get_byte_range_offset() const { return _byte_range_offset; } /** * @brief Returns number of bytes to read. * * @return Number of bytes to read */ size_t get_byte_range_size() const { return _byte_range_size; } /** * @brief Returns number of bytes to read with padding. * * @return Number of bytes to read with padding */ size_t get_byte_range_size_with_padding() const { if (_byte_range_size == 0) { return 0; } else { return _byte_range_size + get_byte_range_padding(); } } /** * @brief Returns number of bytes to pad when reading. * * @return Number of bytes to pad */ size_t get_byte_range_padding() const { auto const num_columns = std::visit([](auto const& dtypes) { return dtypes.size(); }, _dtypes); auto const max_row_bytes = 16 * 1024; // 16KB auto const column_bytes = 64; auto const base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known return max_row_bytes; } // Expand the size based on the number of columns, if available return base_padding + num_columns * column_bytes; } /** * @brief Whether to read the file as a json object per line. * * @return `true` if reading the file as a json object per line */ bool is_enabled_lines() const { return _lines; } /** * @brief Whether to parse dates as DD/MM versus MM/DD. * * @returns true if dates are parsed as DD/MM, false if MM/DD */ bool is_enabled_dayfirst() const { return _dayfirst; } /** * @brief Whether the legacy reader should be used. * * @returns true if the legacy reader will be used, false otherwise */ bool is_enabled_legacy() const { return _legacy; } /** * @brief Whether the reader should keep quotes of string values. * * @returns true if the reader should keep quotes, false otherwise */ bool is_enabled_keep_quotes() const { return _keep_quotes; } /** * @brief Queries the JSON reader's behavior on invalid JSON lines. * * @returns An enum that specifies the JSON reader's behavior on invalid JSON lines. */ json_recovery_mode_t recovery_mode() const { return _recovery_mode; } /** * @brief Set data types for columns to be read. * * @param types Vector of dtypes */ void set_dtypes(std::vector<data_type> types) { _dtypes = std::move(types); } /** * @brief Set data types for columns to be read. * * @param types Vector dtypes in string format */ void set_dtypes(std::map<std::string, data_type> types) { _dtypes = std::move(types); } /** * @brief Set data types for a potentially nested column hierarchy. * * @param types Map of column names to schema_element to support arbitrary nesting of data types */ void set_dtypes(std::map<std::string, schema_element> types) { _dtypes = std::move(types); } /** * @brief Set the compression type. * * @param comp_type The compression type used */ void set_compression(compression_type comp_type) { _compression = comp_type; } /** * @brief Set number of bytes to skip from source start. * * @param offset Number of bytes of offset */ void set_byte_range_offset(size_type offset) { _byte_range_offset = offset; } /** * @brief Set number of bytes to read. * * @param size Number of bytes to read */ void set_byte_range_size(size_type size) { _byte_range_size = size; } /** * @brief Set whether to read the file as a json object per line. * * @param val Boolean value to enable/disable the option to read each line as a json object */ void enable_lines(bool val) { _lines = val; } /** * @brief Set whether to parse dates as DD/MM versus MM/DD. * * @param val Boolean value to enable/disable day first parsing format */ void enable_dayfirst(bool val) { _dayfirst = val; } /** * @brief Set whether to use the legacy reader. * * @param val Boolean value to enable/disable the legacy reader */ void enable_legacy(bool val) { _legacy = val; } /** * @brief Set whether the reader should keep quotes of string values. * * @param val Boolean value to indicate whether the reader should keep quotes * of string values */ void enable_keep_quotes(bool val) { _keep_quotes = val; } /** * @brief Specifies the JSON reader's behavior on invalid JSON lines. * * @param val An enum value to indicate the JSON reader's behavior on invalid JSON lines. */ void set_recovery_mode(json_recovery_mode_t val) { _recovery_mode = val; } }; /** * @brief Builds settings to use for `read_json()`. */ class json_reader_options_builder { json_reader_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit json_reader_options_builder() = default; /** * @brief Constructor from source info. * * @param src The source information used to read avro file */ explicit json_reader_options_builder(source_info src) : options{std::move(src)} {} /** * @brief Set data types for columns to be read. * * @param types Vector of dtypes * @return this for chaining */ json_reader_options_builder& dtypes(std::vector<data_type> types) { options._dtypes = std::move(types); return *this; } /** * @brief Set data types for columns to be read. * * @param types Column name -> dtype map * @return this for chaining */ json_reader_options_builder& dtypes(std::map<std::string, data_type> types) { options._dtypes = std::move(types); return *this; } /** * @brief Set data types for columns to be read. * * @param types Column name -> schema_element map * @return this for chaining */ json_reader_options_builder& dtypes(std::map<std::string, schema_element> types) { options._dtypes = std::move(types); return *this; } /** * @brief Set the compression type. * * @param comp_type The compression type used * @return this for chaining */ json_reader_options_builder& compression(compression_type comp_type) { options._compression = comp_type; return *this; } /** * @brief Set number of bytes to skip from source start. * * @param offset Number of bytes of offset * @return this for chaining */ json_reader_options_builder& byte_range_offset(size_type offset) { options._byte_range_offset = offset; return *this; } /** * @brief Set number of bytes to read. * * @param size Number of bytes to read * @return this for chaining */ json_reader_options_builder& byte_range_size(size_type size) { options._byte_range_size = size; return *this; } /** * @brief Set whether to read the file as a json object per line. * * @param val Boolean value to enable/disable the option to read each line as a json object * @return this for chaining */ json_reader_options_builder& lines(bool val) { options._lines = val; return *this; } /** * @brief Set whether to parse dates as DD/MM versus MM/DD. * * @param val Boolean value to enable/disable day first parsing format * @return this for chaining */ json_reader_options_builder& dayfirst(bool val) { options._dayfirst = val; return *this; } /** * @brief Set whether to use the legacy reader. * * @param val Boolean value to enable/disable legacy parsing * @return this for chaining */ json_reader_options_builder& legacy(bool val) { options._legacy = val; return *this; } /** * @brief Set whether the reader should keep quotes of string values. * * @param val Boolean value to indicate whether the reader should keep quotes * of string values * @return this for chaining */ json_reader_options_builder& keep_quotes(bool val) { options._keep_quotes = val; return *this; } /** * @brief Specifies the JSON reader's behavior on invalid JSON lines. * * @param val An enum value to indicate the JSON reader's behavior on invalid JSON lines. * @return this for chaining */ json_reader_options_builder& recovery_mode(json_recovery_mode_t val) { options._recovery_mode = val; return *this; } /** * @brief move json_reader_options member once it's built. */ operator json_reader_options&&() { return std::move(options); } /** * @brief move json_reader_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `json_reader_options` object r-value reference */ json_reader_options&& build() { return std::move(options); } }; /** * @brief Reads a JSON dataset into a set of columns. * * The following code snippet demonstrates how to read a dataset from a file: * @code * auto source = cudf::io::source_info("dataset.json"); * auto options = cudf::io::read_json_options::builder(source); * auto result = cudf::io::read_json(options); * @endcode * * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate device memory of the table in the returned * table_with_metadata. * * @return The set of columns along with metadata */ table_with_metadata read_json( json_reader_options options, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group /** * @addtogroup io_writers * @{ * @file */ /** *@brief Builder to build options for `writer_json()`. */ class json_writer_options_builder; /** * @brief Settings to use for `write_json()`. */ class json_writer_options { // Specify the sink to use for writer output sink_info _sink; // Set of columns to output table_view _table; // string to use for null entries std::string _na_rep = ""; // Indicates whether to output nulls as 'null' or exclude the field bool _include_nulls = false; // Indicates whether to use JSON lines for records format bool _lines = false; // maximum number of rows to write in each chunk (limits memory use) size_type _rows_per_chunk = std::numeric_limits<size_type>::max(); // string to use for values != 0 in INT8 types (default 'true') std::string _true_value = std::string{"true"}; // string to use for values == 0 in INT8 types (default 'false') std::string _false_value = std::string{"false"}; // Names of all columns; if empty, writer will generate column names std::optional<table_metadata> _metadata; // Optional column names /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit json_writer_options(sink_info const& sink, table_view const& table) : _sink(sink), _table(table), _rows_per_chunk(table.num_rows()) { } friend json_writer_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit json_writer_options() = default; /** * @brief Create builder to create `json_writer_options`. * * @param sink The sink used for writer output * @param table Table to be written to output * * @return Builder to build json_writer_options */ static json_writer_options_builder builder(sink_info const& sink, table_view const& table); /** * @brief Returns sink used for writer output. * * @return sink used for writer output */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns table that would be written to output. * * @return Table that would be written to output */ [[nodiscard]] table_view const& get_table() const { return _table; } /** * @brief Returns metadata information. * * @return Metadata information */ [[nodiscard]] std::optional<table_metadata> const& get_metadata() const { return _metadata; } /** * @brief Returns string to used for null entries. * * @return string to used for null entries */ [[nodiscard]] std::string const& get_na_rep() const { return _na_rep; } /** * @brief Whether to output nulls as 'null'. * * @return `true` if nulls are output as 'null' */ [[nodiscard]] bool is_enabled_include_nulls() const { return _include_nulls; } /** * @brief Whether to use JSON lines for records format. * * @return `true` if JSON lines is used for records format */ [[nodiscard]] bool is_enabled_lines() const { return _lines; } /** * @brief Returns maximum number of rows to process for each file write. * * @return Maximum number of rows to process for each file write */ [[nodiscard]] size_type get_rows_per_chunk() const { return _rows_per_chunk; } /** * @brief Returns string used for values != 0 in INT8 types. * * @return string used for values != 0 in INT8 types */ [[nodiscard]] std::string const& get_true_value() const { return _true_value; } /** * @brief Returns string used for values == 0 in INT8 types. * * @return string used for values == 0 in INT8 types */ [[nodiscard]] std::string const& get_false_value() const { return _false_value; } // Setter /** * @brief Sets table to be written to output. * * @param tbl Table for the output */ void set_table(table_view tbl) { _table = tbl; } /** * @brief Sets metadata. * * @param metadata Associated metadata */ void set_metadata(table_metadata metadata) { _metadata = std::move(metadata); } /** * @brief Sets string to used for null entries. * * @param val String to represent null value */ void set_na_rep(std::string val) { _na_rep = std::move(val); } /** * @brief Enables/Disables output of nulls as 'null'. * * @param val Boolean value to enable/disable */ void enable_include_nulls(bool val) { _include_nulls = val; } /** * @brief Enables/Disables JSON lines for records format. * * @param val Boolean value to enable/disable JSON lines */ void enable_lines(bool val) { _lines = val; } /** * @brief Sets maximum number of rows to process for each file write. * * @param val Number of rows per chunk */ void set_rows_per_chunk(size_type val) { _rows_per_chunk = val; } /** * @brief Sets string used for values != 0 in INT8 types. * * @param val String to represent values != 0 in INT8 types */ void set_true_value(std::string val) { _true_value = std::move(val); } /** * @brief Sets string used for values == 0 in INT8 types. * * @param val String to represent values == 0 in INT8 types */ void set_false_value(std::string val) { _false_value = std::move(val); } }; /** * @brief Builder to build options for `writer_json()` */ class json_writer_options_builder { json_writer_options options; ///< Options to be built. public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit json_writer_options_builder() = default; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit json_writer_options_builder(sink_info const& sink, table_view const& table) : options{sink, table} { } /** * @brief Sets table to be written to output. * * @param tbl Table for the output * @return this for chaining */ json_writer_options_builder& table(table_view tbl) { options._table = tbl; return *this; } /** * @brief Sets optional metadata (with column names). * * @param metadata metadata (with column names) * @return this for chaining */ json_writer_options_builder& metadata(table_metadata metadata) { options._metadata = std::move(metadata); return *this; } /** * @brief Sets string to used for null entries. * * @param val String to represent null value * @return this for chaining */ json_writer_options_builder& na_rep(std::string val) { options._na_rep = std::move(val); return *this; }; /** * @brief Enables/Disables output of nulls as 'null'. * * @param val Boolean value to enable/disable * @return this for chaining */ json_writer_options_builder& include_nulls(bool val) { options._include_nulls = val; return *this; } /** * @brief Enables/Disables JSON lines for records format. * * @param val Boolean value to enable/disable * @return this for chaining */ json_writer_options_builder& lines(bool val) { options._lines = val; return *this; } /** * @brief Sets maximum number of rows to process for each file write. * * @param val Number of rows per chunk * @return this for chaining */ json_writer_options_builder& rows_per_chunk(int val) { options._rows_per_chunk = val; return *this; } /** * @brief Sets string used for values != 0 in INT8 types. * * @param val String to represent values != 0 in INT8 types * @return this for chaining */ json_writer_options_builder& true_value(std::string val) { options._true_value = std::move(val); return *this; } /** * @brief Sets string used for values == 0 in INT8 types. * * @param val String to represent values == 0 in INT8 types * @return this for chaining */ json_writer_options_builder& false_value(std::string val) { options._false_value = std::move(val); return *this; } /** * @brief move `json_writer_options` member once it's built. */ operator json_writer_options&&() { return std::move(options); } /** * @brief move `json_writer_options` member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `json_writer_options` object's r-value reference */ json_writer_options&& build() { return std::move(options); } }; /** * @brief Writes a set of columns to JSON format. * * The following code snippet demonstrates how to write columns to a file: * @code * auto destination = cudf::io::sink_info("dataset.json"); * auto options = cudf::io::json_writer_options(destination, table->view()) * .na_rep(na) * .lines(lines) * .rows_per_chunk(rows_per_chunk); * * cudf::io::write_json(options); * @endcode * * @param options Settings for controlling writing behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ void write_json(json_writer_options const& options, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/csv.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/types.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <memory> #include <string> #include <unordered_map> #include <variant> #include <vector> namespace cudf { namespace io { /** * @addtogroup io_readers * @{ * @file */ /** *@brief Builder to build options for `read_csv()`. */ class csv_reader_options_builder; /** * @brief Settings to use for `read_csv()`. */ class csv_reader_options { source_info _source; // Read settings // Specify the compression format of the source or infer from file extension compression_type _compression = compression_type::AUTO; // Bytes to skip from the source start std::size_t _byte_range_offset = 0; // Bytes to read; always reads complete rows std::size_t _byte_range_size = 0; // Names of all the columns; if empty then names are auto-generated std::vector<std::string> _names; // If there is no header or names, prepend this to the column ID as the name std::string _prefix; // Whether to rename duplicate column names bool _mangle_dupe_cols = true; // Filter settings // Names of columns to read; empty is all columns std::vector<std::string> _use_cols_names; // Indexes of columns to read; empty is all columns std::vector<int> _use_cols_indexes; // Rows to read; -1 is all size_type _nrows = -1; // Rows to skip from the start size_type _skiprows = 0; // Rows to skip from the end size_type _skipfooter = 0; // Header row index size_type _header = 0; // Parsing settings // Line terminator char _lineterminator = '\n'; // Field delimiter char _delimiter = ','; // Numeric data thousands separator; cannot match delimiter char _thousands = '\0'; // Decimal point character; cannot match delimiter char _decimal = '.'; // Comment line start character char _comment = '\0'; bool _windowslinetermination = false; // Treat whitespace as field delimiter; overrides character delimiter bool _delim_whitespace = false; // Skip whitespace after the delimiter bool _skipinitialspace = false; // Ignore empty lines or parse line values as invalid bool _skip_blank_lines = true; // Treatment of quoting behavior quote_style _quoting = quote_style::MINIMAL; // Quoting character (if `quoting` is true) char _quotechar = '"'; // Whether a quote inside a value is double-quoted bool _doublequote = true; // Names of columns to read as datetime std::vector<std::string> _parse_dates_names; // Indexes of columns to read as datetime std::vector<int> _parse_dates_indexes; // Names of columns to parse as hexadecimal std::vector<std::string> _parse_hex_names; // Indexes of columns to parse as hexadecimal std::vector<int> _parse_hex_indexes; // Conversion settings // Per-column types; disables type inference on those columns std::variant<std::vector<data_type>, std::map<std::string, data_type>> _dtypes; // Additional values to recognize as boolean true values std::vector<std::string> _true_values{"True", "TRUE", "true"}; // Additional values to recognize as boolean false values std::vector<std::string> _false_values{"False", "FALSE", "false"}; // Additional values to recognize as null values std::vector<std::string> _na_values; // Whether to keep the built-in default NA values bool _keep_default_na = true; // Whether to disable null filter; disabling can improve performance bool _na_filter = true; // Whether to parse dates as DD/MM versus MM/DD bool _dayfirst = false; // Cast timestamp columns to a specific type data_type _timestamp_type{type_id::EMPTY}; /** * @brief Constructor from source info. * * @param src source information used to read csv file */ explicit csv_reader_options(source_info src) : _source{std::move(src)} {} friend csv_reader_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ csv_reader_options() = default; /** * @brief Creates a `csv_reader_options_builder` which will build `csv_reader_options`. * * @param src Source information to read csv file * @return Builder to build reader options */ static csv_reader_options_builder builder(source_info src); /** * @brief Returns source info. * * @return Source info */ [[nodiscard]] source_info const& get_source() const { return _source; } /** * @brief Returns compression format of the source. * * @return Compression format of the source */ [[nodiscard]] compression_type get_compression() const { return _compression; } /** * @brief Returns number of bytes to skip from source start. * * @return Number of bytes to skip from source start */ [[nodiscard]] std::size_t get_byte_range_offset() const { return _byte_range_offset; } /** * @brief Returns number of bytes to read. * * @return Number of bytes to read */ [[nodiscard]] std::size_t get_byte_range_size() const { return _byte_range_size; } /** * @brief Returns number of bytes to read with padding. * * @return Number of bytes to read with padding */ [[nodiscard]] std::size_t get_byte_range_size_with_padding() const { if (_byte_range_size == 0) { return 0; } else { return _byte_range_size + get_byte_range_padding(); } } /** * @brief Returns number of bytes to pad when reading. * * @return Number of bytes to pad when reading */ [[nodiscard]] std::size_t get_byte_range_padding() const { auto const num_names = _names.size(); auto const num_dtypes = std::visit([](auto const& dtypes) { return dtypes.size(); }, _dtypes); auto const num_columns = std::max(num_dtypes, num_names); auto const max_row_bytes = 16 * 1024; // 16KB auto const column_bytes = 64; auto const base_padding = 1024; // 1KB if (num_columns == 0) { // Use flat size if the number of columns is not known return max_row_bytes; } // Expand the size based on the number of columns, if available return base_padding + num_columns * column_bytes; } /** * @brief Returns names of the columns. * * @return Names of the columns */ [[nodiscard]] std::vector<std::string> const& get_names() const { return _names; } /** * @brief Returns prefix to be used for column ID. * * @return Prefix to be used for column ID */ [[nodiscard]] std::string get_prefix() const { return _prefix; } /** * @brief Whether to rename duplicate column names. * * @return `true` if duplicate column names are renamed */ [[nodiscard]] bool is_enabled_mangle_dupe_cols() const { return _mangle_dupe_cols; } /** * @brief Returns names of the columns to be read. * * @return Names of the columns to be read */ [[nodiscard]] std::vector<std::string> const& get_use_cols_names() const { return _use_cols_names; } /** * @brief Returns indexes of columns to read. * * @return Indexes of columns to read */ [[nodiscard]] std::vector<int> const& get_use_cols_indexes() const { return _use_cols_indexes; } /** * @brief Returns number of rows to read. * * @return Number of rows to read */ [[nodiscard]] size_type get_nrows() const { return _nrows; } /** * @brief Returns number of rows to skip from start. * * @return Number of rows to skip from start */ [[nodiscard]] size_type get_skiprows() const { return _skiprows; } /** * @brief Returns number of rows to skip from end. * * @return Number of rows to skip from end */ [[nodiscard]] size_type get_skipfooter() const { return _skipfooter; } /** * @brief Returns header row index. * * @return Header row index */ [[nodiscard]] size_type get_header() const { return _header; } /** * @brief Returns line terminator. * * @return Line terminator */ [[nodiscard]] char get_lineterminator() const { return _lineterminator; } /** * @brief Returns field delimiter. * * @return Field delimiter */ [[nodiscard]] char get_delimiter() const { return _delimiter; } /** * @brief Returns numeric data thousands separator. * * @return Numeric data thousands separator */ [[nodiscard]] char get_thousands() const { return _thousands; } /** * @brief Returns decimal point character. * * @return Decimal point character */ [[nodiscard]] char get_decimal() const { return _decimal; } /** * @brief Returns comment line start character. * * @return Comment line start character */ [[nodiscard]] char get_comment() const { return _comment; } /** * @brief Whether to treat `\r\n` as line terminator. * * @return `true` if `\r\n` is treated as line terminator */ [[nodiscard]] bool is_enabled_windowslinetermination() const { return _windowslinetermination; } /** * @brief Whether to treat whitespace as field delimiter. * * @return `true` if whitespace is treated as field delimiter */ [[nodiscard]] bool is_enabled_delim_whitespace() const { return _delim_whitespace; } /** * @brief Whether to skip whitespace after the delimiter. * * @return `true` if whitespace is skipped after the delimiter */ [[nodiscard]] bool is_enabled_skipinitialspace() const { return _skipinitialspace; } /** * @brief Whether to ignore empty lines or parse line values as invalid. * * @return `true` if empty lines or parse line values are ignored as invalid */ [[nodiscard]] bool is_enabled_skip_blank_lines() const { return _skip_blank_lines; } /** * @brief Returns quoting style. * * @return Quoting style */ [[nodiscard]] quote_style get_quoting() const { return _quoting; } /** * @brief Returns quoting character. * * @return Quoting character */ [[nodiscard]] char get_quotechar() const { return _quotechar; } /** * @brief Whether a quote inside a value is double-quoted. * * @return `true` if a quote inside a value is double-quoted */ [[nodiscard]] bool is_enabled_doublequote() const { return _doublequote; } /** * @brief Returns names of columns to read as datetime. * * @return Names of columns to read as datetime */ [[nodiscard]] std::vector<std::string> const& get_parse_dates_names() const { return _parse_dates_names; } /** * @brief Returns indexes of columns to read as datetime. * * @return Indexes of columns to read as datetime */ [[nodiscard]] std::vector<int> const& get_parse_dates_indexes() const { return _parse_dates_indexes; } /** * @brief Returns names of columns to read as hexadecimal. * * @return Names of columns to read as hexadecimal */ [[nodiscard]] std::vector<std::string> const& get_parse_hex_names() const { return _parse_hex_names; } /** * @brief Returns indexes of columns to read as hexadecimal. * * @return Indexes of columns to read as hexadecimal */ [[nodiscard]] std::vector<int> const& get_parse_hex_indexes() const { return _parse_hex_indexes; } /** * @brief Returns per-column types. * * @return Per-column types */ std::variant<std::vector<data_type>, std::map<std::string, data_type>> const& get_dtypes() const { return _dtypes; } /** * @brief Returns additional values to recognize as boolean true values. * * @return Additional values to recognize as boolean true values */ std::vector<std::string> const& get_true_values() const { return _true_values; } /** * @brief Returns additional values to recognize as boolean false values. * * @return Additional values to recognize as boolean false values */ std::vector<std::string> const& get_false_values() const { return _false_values; } /** * @brief Returns additional values to recognize as null values. * * @return Additional values to recognize as null values */ std::vector<std::string> const& get_na_values() const { return _na_values; } /** * @brief Whether to keep the built-in default NA values. * * @return `true` if the built-in default NA values are kept */ bool is_enabled_keep_default_na() const { return _keep_default_na; } /** * @brief Whether to disable null filter. * * @return `true` if null filter is enabled */ bool is_enabled_na_filter() const { return _na_filter; } /** * @brief Whether to parse dates as DD/MM versus MM/DD. * * @return True if dates are parsed as DD/MM, false if MM/DD */ bool is_enabled_dayfirst() const { return _dayfirst; } /** * @brief Returns timestamp_type to which all timestamp columns will be cast. * * @return timestamp_type to which all timestamp columns will be cast */ data_type get_timestamp_type() const { return _timestamp_type; } /** * @brief Sets compression format of the source. * * @param comp Compression type */ void set_compression(compression_type comp) { _compression = comp; } /** * @brief Sets number of bytes to skip from source start. * * @param offset Number of bytes of offset */ void set_byte_range_offset(std::size_t offset) { if ((offset != 0) and ((_skiprows != 0) or (_skipfooter != 0) or (_nrows != -1))) { CUDF_FAIL( "When there is valid value in skiprows or skipfooter or nrows, offset can't have non-zero " "value"); } _byte_range_offset = offset; } /** * @brief Sets number of bytes to read. * * @param size Number of bytes to read */ void set_byte_range_size(std::size_t size) { if ((size != 0) and ((_skiprows != 0) or (_skipfooter != 0) or (_nrows != -1))) { CUDF_FAIL( "If the value of any of skiprows, skipfooter or nrows is valid, range size cannot be " "non-zero."); } _byte_range_size = size; } /** * @brief Sets names of the column. * * @param col_names Vector of column names */ void set_names(std::vector<std::string> col_names) { _names = std::move(col_names); } /** * @brief Sets prefix to be used for column ID. * * @param pfx String used as prefix in for each column name */ void set_prefix(std::string pfx) { _prefix = pfx; } /** * @brief Sets whether to rename duplicate column names. * * @param val Boolean value to enable/disable */ void enable_mangle_dupe_cols(bool val) { _mangle_dupe_cols = val; } /** * @brief Sets names of the columns to be read. * * @param col_names Vector of column names that are needed */ void set_use_cols_names(std::vector<std::string> col_names) { _use_cols_names = std::move(col_names); } /** * @brief Sets indexes of columns to read. * * @param col_indices Vector of column indices that are needed */ void set_use_cols_indexes(std::vector<int> col_indices) { _use_cols_indexes = std::move(col_indices); } /** * @brief Sets number of rows to read. * * @param nrows Number of rows to read */ void set_nrows(size_type nrows) { CUDF_EXPECTS((nrows == 0) or (_skipfooter == 0), "Cannot use both `nrows` and `skipfooter`"); if ((nrows != -1) and ((_byte_range_offset != 0) or (_byte_range_size != 0))) { CUDF_FAIL( "nrows can't be a non negative value if range offset and/or range size has been set"); } _nrows = nrows; } /** * @brief Sets number of rows to skip from start. * * @param skiprows Number of rows to skip */ void set_skiprows(size_type skiprows) { if ((skiprows != 0) and ((_byte_range_offset != 0) or (_byte_range_size != 0))) { CUDF_FAIL("skiprows must be zero if range offset or range size has been set", std::invalid_argument); } _skiprows = skiprows; } /** * @brief Sets number of rows to skip from end. * * @param skipfooter Number of rows to skip */ void set_skipfooter(size_type skipfooter) { CUDF_EXPECTS((skipfooter == 0) or (_nrows == -1), "Cannot use both `nrows` and `skipfooter`", std::invalid_argument); if ((skipfooter != 0) and ((_byte_range_offset != 0) or (_byte_range_size != 0))) { CUDF_FAIL("skipfooter must be zero if range offset or range size has been set", std::invalid_argument); } _skipfooter = skipfooter; } /** * @brief Sets header row index. * * @param hdr Index where header row is located */ void set_header(size_type hdr) { _header = hdr; } /** * @brief Sets line terminator * * @param term A character to indicate line termination */ void set_lineterminator(char term) { _lineterminator = term; } /** * @brief Sets field delimiter. * * @param delim A character to indicate delimiter */ void set_delimiter(char delim) { _delimiter = delim; } /** * @brief Sets numeric data thousands separator. * * @param val A character that separates thousands */ void set_thousands(char val) { _thousands = val; } /** * @brief Sets decimal point character. * * @param val A character that indicates decimal values */ void set_decimal(char val) { _decimal = val; } /** * @brief Sets comment line start character. * * @param val A character that indicates comment */ void set_comment(char val) { _comment = val; } /** * @brief Sets whether to treat `\r\n` as line terminator. * * @param val Boolean value to enable/disable */ void enable_windowslinetermination(bool val) { _windowslinetermination = val; } /** * @brief Sets whether to treat whitespace as field delimiter. * * @param val Boolean value to enable/disable */ void enable_delim_whitespace(bool val) { _delim_whitespace = val; } /** * @brief Sets whether to skip whitespace after the delimiter. * * @param val Boolean value to enable/disable */ void enable_skipinitialspace(bool val) { _skipinitialspace = val; } /** * @brief Sets whether to ignore empty lines or parse line values as invalid. * * @param val Boolean value to enable/disable */ void enable_skip_blank_lines(bool val) { _skip_blank_lines = val; } /** * @brief Sets the expected quoting style used in the input CSV data. * * Note: Only the following quoting styles are supported: * 1. MINIMAL: String columns containing special characters like row-delimiters/ * field-delimiter/quotes will be quoted. * 2. NONE: No quoting is done for any columns. * * @param quoting Quoting style used */ void set_quoting(quote_style quoting) { CUDF_EXPECTS(quoting == quote_style::MINIMAL || quoting == quote_style::NONE, "Only MINIMAL and NONE are supported for quoting."); _quoting = quoting; } /** * @brief Sets quoting character. * * @param ch A character to indicate quoting */ void set_quotechar(char ch) { _quotechar = ch; } /** * @brief Sets a quote inside a value is double-quoted. * * @param val Boolean value to enable/disable */ void enable_doublequote(bool val) { _doublequote = val; } /** * @brief Sets names of columns to read as datetime. * * @param col_names Vector of column names to infer as datetime */ void set_parse_dates(std::vector<std::string> col_names) { _parse_dates_names = std::move(col_names); } /** * @brief Sets indexes of columns to read as datetime. * * @param col_indices Vector of column indices to infer as datetime */ void set_parse_dates(std::vector<int> col_indices) { _parse_dates_indexes = std::move(col_indices); } /** * @brief Sets names of columns to parse as hexadecimal * * @param col_names Vector of column names to parse as hexadecimal */ void set_parse_hex(std::vector<std::string> col_names) { _parse_hex_names = std::move(col_names); } /** * @brief Sets indexes of columns to parse as hexadecimal * * @param col_indices Vector of column indices to parse as hexadecimal */ void set_parse_hex(std::vector<int> col_indices) { _parse_hex_indexes = std::move(col_indices); } /** * @brief Sets per-column types * * @param types Column name -> data type map specifying the columns' target data types */ void set_dtypes(std::map<std::string, data_type> types) { _dtypes = std::move(types); } /** * @brief Sets per-column types * * @param types Vector specifying the columns' target data types */ void set_dtypes(std::vector<data_type> types) { _dtypes = std::move(types); } /** * @brief Sets additional values to recognize as boolean true values. * * @param vals Vector of values to be considered to be `true` */ void set_true_values(std::vector<std::string> vals) { _true_values.insert(_true_values.end(), vals.begin(), vals.end()); } /** * @brief Sets additional values to recognize as boolean false values. * * @param vals Vector of values to be considered to be `false` */ void set_false_values(std::vector<std::string> vals) { _false_values.insert(_false_values.end(), vals.begin(), vals.end()); } /** * @brief Sets additional values to recognize as null values. * * @param vals Vector of values to be considered to be null */ void set_na_values(std::vector<std::string> vals) { if ((!vals.empty()) and (!_na_filter)) { CUDF_FAIL("Can't set na_values when na_filtering is disabled"); } _na_values = std::move(vals); } /** * @brief Sets whether to keep the built-in default NA values. * * @param val Boolean value to enable/disable */ void enable_keep_default_na(bool val) { _keep_default_na = val; } /** * @brief Sets whether to disable null filter. * * @param val Boolean value to enable/disable */ void enable_na_filter(bool val) { if (!val) { _na_values.clear(); } _na_filter = val; } /** * @brief Sets whether to parse dates as DD/MM versus MM/DD. * * @param val Boolean value to enable/disable */ void enable_dayfirst(bool val) { _dayfirst = val; } /** * @brief Sets timestamp_type to which all timestamp columns will be cast. * * @param type Dtype to which all timestamp column will be cast */ void set_timestamp_type(data_type type) { _timestamp_type = type; } }; /** * @brief Builder to build options for `read_csv()`. * */ class csv_reader_options_builder { csv_reader_options options; ///< Options to be built. public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ csv_reader_options_builder() = default; /** * @brief Constructor from source info. * * @param src The source information used to read csv file */ csv_reader_options_builder(source_info src) : options{std::move(src)} {} /** * @brief Sets compression format of the source. * * @param comp Compression type * @return this for chaining */ csv_reader_options_builder& compression(compression_type comp) { options._compression = comp; return *this; } /** * @brief Sets number of bytes to skip from source start. * * @param offset Number of bytes of offset * @return this for chaining */ csv_reader_options_builder& byte_range_offset(std::size_t offset) { options.set_byte_range_offset(offset); return *this; } /** * @brief Sets number of bytes to read. * * @param size Number of bytes to read * @return this for chaining */ csv_reader_options_builder& byte_range_size(std::size_t size) { options.set_byte_range_size(size); return *this; } /** * @brief Sets names of the column. * * @param col_names Vector of column names * @return this for chaining */ csv_reader_options_builder& names(std::vector<std::string> col_names) { options._names = std::move(col_names); return *this; } /** * @brief Sets prefix to be used for column ID. * * @param pfx String used as prefix in for each column name * @return this for chaining */ csv_reader_options_builder& prefix(std::string pfx) { options._prefix = pfx; return *this; } /** * @brief Sets whether to rename duplicate column names. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& mangle_dupe_cols(bool val) { options._mangle_dupe_cols = val; return *this; } /** * @brief Sets names of the columns to be read. * * @param col_names Vector of column names that are needed * @return this for chaining */ csv_reader_options_builder& use_cols_names(std::vector<std::string> col_names) { options._use_cols_names = std::move(col_names); return *this; } /** * @brief Sets indexes of columns to read. * * @param col_indices Vector of column indices that are needed * @return this for chaining */ csv_reader_options_builder& use_cols_indexes(std::vector<int> col_indices) { options._use_cols_indexes = std::move(col_indices); return *this; } /** * @brief Sets number of rows to read. * * @param rows Number of rows to read * @return this for chaining */ csv_reader_options_builder& nrows(size_type rows) { options.set_nrows(rows); return *this; } /** * @brief Sets number of rows to skip from start. * * @param skip Number of rows to skip * @return this for chaining */ csv_reader_options_builder& skiprows(size_type skip) { options.set_skiprows(skip); return *this; } /** * @brief Sets number of rows to skip from end. * * @param skip Number of rows to skip * @return this for chaining */ csv_reader_options_builder& skipfooter(size_type skip) { options.set_skipfooter(skip); return *this; } /** * @brief Sets header row index. * * @param hdr Index where header row is located * @return this for chaining */ csv_reader_options_builder& header(size_type hdr) { options._header = hdr; return *this; } /** * @brief Sets line terminator. * * @param term A character to indicate line termination * @return this for chaining */ csv_reader_options_builder& lineterminator(char term) { options._lineterminator = term; return *this; } /** * @brief Sets field delimiter * * @param delim A character to indicate delimiter * @return this for chaining */ csv_reader_options_builder& delimiter(char delim) { options._delimiter = delim; return *this; } /** * @brief Sets numeric data thousands separator. * * @param val A character that separates thousands * @return this for chaining */ csv_reader_options_builder& thousands(char val) { options._thousands = val; return *this; } /** * @brief Sets decimal point character. * * @param val A character that indicates decimal values * @return this for chaining */ csv_reader_options_builder& decimal(char val) { options._decimal = val; return *this; } /** * @brief Sets comment line start character. * * @param val A character that indicates comment * @return this for chaining */ csv_reader_options_builder& comment(char val) { options._comment = val; return *this; } /** * @brief Sets whether to treat `\r\n` as line terminator. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& windowslinetermination(bool val) { options._windowslinetermination = val; return *this; } /** * @brief Sets whether to treat whitespace as field delimiter. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& delim_whitespace(bool val) { options._delim_whitespace = val; return *this; } /** * @brief Sets whether to skip whitespace after the delimiter. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& skipinitialspace(bool val) { options._skipinitialspace = val; return *this; } /** * @brief Sets whether to ignore empty lines or parse line values as invalid. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& skip_blank_lines(bool val) { options._skip_blank_lines = val; return *this; } /** * @brief Sets quoting style. * * @param style Quoting style used * @return this for chaining */ csv_reader_options_builder& quoting(quote_style style) { options._quoting = style; return *this; } /** * @brief Sets quoting character. * * @param ch A character to indicate quoting * @return this for chaining */ csv_reader_options_builder& quotechar(char ch) { options._quotechar = ch; return *this; } /** * @brief Sets a quote inside a value is double-quoted. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& doublequote(bool val) { options._doublequote = val; return *this; } /** * @brief Sets names of columns to read as datetime. * * @param col_names Vector of column names to read as datetime * @return this for chaining */ csv_reader_options_builder& parse_dates(std::vector<std::string> col_names) { options._parse_dates_names = std::move(col_names); return *this; } /** * @brief Sets indexes of columns to read as datetime. * * @param col_indices Vector of column indices to read as datetime * @return this for chaining */ csv_reader_options_builder& parse_dates(std::vector<int> col_indices) { options._parse_dates_indexes = std::move(col_indices); return *this; } /** * @brief Sets names of columns to parse as hexadecimal. * * @param col_names Vector of column names to parse as hexadecimal * @return this for chaining */ csv_reader_options_builder& parse_hex(std::vector<std::string> col_names) { options._parse_hex_names = std::move(col_names); return *this; } /** * @brief Sets indexes of columns to parse as hexadecimal. * * @param col_indices Vector of column indices to parse as hexadecimal * @return this for chaining */ csv_reader_options_builder& parse_hex(std::vector<int> col_indices) { options._parse_hex_indexes = std::move(col_indices); return *this; } /** * @brief Sets per-column types. * * @param types Column name -> data type map specifying the columns' target data types * @return this for chaining */ csv_reader_options_builder& dtypes(std::map<std::string, data_type> types) { options._dtypes = std::move(types); return *this; } /** * @brief Sets per-column types. * * @param types Vector of data types in which the column needs to be read * @return this for chaining */ csv_reader_options_builder& dtypes(std::vector<data_type> types) { options._dtypes = std::move(types); return *this; } /** * @brief Sets additional values to recognize as boolean true values. * * @param vals Vector of values to be considered to be `true` * @return this for chaining */ csv_reader_options_builder& true_values(std::vector<std::string> vals) { options._true_values.insert(options._true_values.end(), vals.begin(), vals.end()); return *this; } /** * @brief Sets additional values to recognize as boolean false values. * * @param vals Vector of values to be considered to be `false` * @return this for chaining */ csv_reader_options_builder& false_values(std::vector<std::string> vals) { options._false_values.insert(options._false_values.end(), vals.begin(), vals.end()); return *this; } /** * @brief Sets additional values to recognize as null values. * * @param vals Vector of values to be considered to be null * @return this for chaining */ csv_reader_options_builder& na_values(std::vector<std::string> vals) { options.set_na_values(std::move(vals)); return *this; } /** * @brief Sets whether to keep the built-in default NA values. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& keep_default_na(bool val) { options.enable_keep_default_na(val); return *this; } /** * @brief Sets whether to disable null filter. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& na_filter(bool val) { options.enable_na_filter(val); return *this; } /** * @brief Sets whether to parse dates as DD/MM versus MM/DD. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_reader_options_builder& dayfirst(bool val) { options._dayfirst = val; return *this; } /** * @brief Sets timestamp_type to which all timestamp columns will be cast. * * @param type Dtype to which all timestamp column will be cast * @return this for chaining */ csv_reader_options_builder& timestamp_type(data_type type) { options._timestamp_type = type; return *this; } /** * @brief move csv_reader_options member once it's built. */ operator csv_reader_options&&() { return std::move(options); } /** * @brief move csv_reader_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `csv_reader_options` object's r-value reference */ csv_reader_options&& build() { return std::move(options); } }; /** * @brief Reads a CSV dataset into a set of columns. * * The following code snippet demonstrates how to read a dataset from a file: * @code * auto source = cudf::io::source_info("dataset.csv"); * auto options = cudf::io::csv_reader_options::builder(source); * auto result = cudf::io::read_csv(options); * @endcode * * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate device memory of the table in the returned * table_with_metadata * * @return The set of columns along with metadata */ table_with_metadata read_csv( csv_reader_options options, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group /** * @addtogroup io_writers * @{ * @file */ /** *@brief Builder to build options for `writer_csv()`. */ class csv_writer_options_builder; /** * @brief Settings to use for `write_csv()`. */ class csv_writer_options { // Specify the sink to use for writer output sink_info _sink; // Set of columns to output table_view _table; // string to use for null entries std::string _na_rep = ""; // Indicates whether to write headers to csv bool _include_header = true; // maximum number of rows to write in each chunk (limits memory use) size_type _rows_per_chunk = std::numeric_limits<size_type>::max(); // character to use for separating lines (default "\n") std::string _line_terminator = "\n"; // character to use for separating column values (default ",") char _inter_column_delimiter = ','; // string to use for values != 0 in INT8 types (default 'true') std::string _true_value = std::string{"true"}; // string to use for values == 0 in INT8 types (default 'false') std::string _false_value = std::string{"false"}; // Names of all columns; if empty, writer will generate column names std::vector<std::string> _names; // Quote style. Currently only MINIMAL and NONE are supported. quote_style _quoting = quote_style::MINIMAL; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit csv_writer_options(sink_info const& sink, table_view const& table) : _sink(sink), _table(table), _rows_per_chunk(table.num_rows()) { } friend csv_writer_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit csv_writer_options() = default; /** * @brief Create builder to create `csv_writer_options`. * * @param sink The sink used for writer output * @param table Table to be written to output * * @return Builder to build csv_writer_options */ static csv_writer_options_builder builder(sink_info const& sink, table_view const& table); /** * @brief Returns sink used for writer output. * * @return sink used for writer output */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns table that would be written to output. * * @return Table that would be written to output */ [[nodiscard]] table_view const& get_table() const { return _table; } /** * @brief Returns names of the columns. * * @return Names of the columns in the output file */ [[nodiscard]] std::vector<std::string> const& get_names() const { return _names; } /** * @brief Returns string to used for null entries. * * @return string to used for null entries */ [[nodiscard]] std::string get_na_rep() const { return _na_rep; } /** * @brief Whether to write headers to csv. * * @return `true` if writing headers to csv */ [[nodiscard]] bool is_enabled_include_header() const { return _include_header; } /** * @brief Returns maximum number of rows to process for each file write. * * @return Maximum number of rows to process for each file write */ [[nodiscard]] size_type get_rows_per_chunk() const { return _rows_per_chunk; } /** * @brief Returns character used for separating lines. * * @return Character used for separating lines */ [[nodiscard]] std::string get_line_terminator() const { return _line_terminator; } /** * @brief Returns character used for separating column values. * * @return Character used for separating column values. */ [[nodiscard]] char get_inter_column_delimiter() const { return _inter_column_delimiter; } /** * @brief Returns string used for values != 0 in INT8 types. * * @return string used for values != 0 in INT8 types */ [[nodiscard]] std::string get_true_value() const { return _true_value; } /** * @brief Returns string used for values == 0 in INT8 types. * * @return string used for values == 0 in INT8 types */ [[nodiscard]] std::string get_false_value() const { return _false_value; } /** * @brief Returns the quote style for the writer. * * Note: Only MINIMAL and NONE are supported. * 1. MINIMAL: String columns containing special characters like row-delimiters * field-delimiter/quotes will be quoted. * 2. NONE: No quoting is done for any columns. * * @return quote_style The quote style for the writer */ [[nodiscard]] quote_style get_quoting() const { return _quoting; } // Setter /** * @brief Sets optional associated column names. * @param names Associated column names */ void set_names(std::vector<std::string> names) { _names = std::move(names); } /** * @brief Sets string to used for null entries. * * @param val String to represent null value */ void set_na_rep(std::string val) { _na_rep = val; } /** * @brief Enables/Disables headers being written to csv. * * @param val Boolean value to enable/disable */ void enable_include_header(bool val) { _include_header = val; } /** * @brief Sets maximum number of rows to process for each file write. * * @param val Number of rows per chunk */ void set_rows_per_chunk(size_type val) { _rows_per_chunk = val; } /** * @brief Sets character used for separating lines. * * @param term Character to represent line termination */ void set_line_terminator(std::string term) { _line_terminator = term; } /** * @brief Sets character used for separating column values. * * @param delim Character to delimit column values */ void set_inter_column_delimiter(char delim) { _inter_column_delimiter = delim; } /** * @brief Sets string used for values != 0 in INT8 types. * * @param val String to represent values != 0 in INT8 types */ void set_true_value(std::string val) { _true_value = val; } /** * @brief Sets string used for values == 0 in INT8 types. * * @param val String to represent values == 0 in INT8 types */ void set_false_value(std::string val) { _false_value = val; } /** * @brief (Re)sets the table being written. * * @param table Table to be written */ void set_table(table_view const& table) { _table = table; } /** * @brief Sets the quote style for the writer. * * Note: Only the following quote styles are supported: * 1. MINIMAL: String columns containing special characters like row-delimiters/ * field-delimiter/quotes will be quoted. * 2. NONE: No quoting is done for any columns. * * @param quoting The new quote_style for the writer. */ void set_quoting(quote_style quoting) { CUDF_EXPECTS(quoting == quote_style::MINIMAL || quoting == quote_style::NONE, "Only MINIMAL and NONE are supported for quoting."); _quoting = quoting; } }; /** * @brief Builder to build options for `writer_csv()` */ class csv_writer_options_builder { csv_writer_options options; ///< Options to be built. public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit csv_writer_options_builder() = default; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit csv_writer_options_builder(sink_info const& sink, table_view const& table) : options{sink, table} { } /** * @brief Sets optional column names. * * @param names Column names * @return this for chaining */ csv_writer_options_builder& names(std::vector<std::string> names) { options._names = names; return *this; } /** * @brief Sets string to used for null entries. * * @param val String to represent null value * @return this for chaining */ csv_writer_options_builder& na_rep(std::string val) { options._na_rep = val; return *this; }; /** * @brief Enables/Disables headers being written to csv. * * @param val Boolean value to enable/disable * @return this for chaining */ csv_writer_options_builder& include_header(bool val) { options._include_header = val; return *this; } /** * @brief Sets maximum number of rows to process for each file write. * * @param val Number of rows per chunk * @return this for chaining */ csv_writer_options_builder& rows_per_chunk(int val) { options._rows_per_chunk = val; return *this; } /** * @brief Sets character used for separating lines. * * @param term Character to represent line termination * @return this for chaining */ csv_writer_options_builder& line_terminator(std::string term) { options._line_terminator = term; return *this; } /** * @brief Sets character used for separating column values. * * @param delim Character to delimit column values * @return this for chaining */ csv_writer_options_builder& inter_column_delimiter(char delim) { options._inter_column_delimiter = delim; return *this; } /** * @brief Sets string used for values != 0 in INT8 types. * * @param val String to represent values != 0 in INT8 types * @return this for chaining */ csv_writer_options_builder& true_value(std::string val) { options._true_value = val; return *this; } /** * @brief Sets string used for values == 0 in INT8 types. * * @param val String to represent values == 0 in INT8 types * @return this for chaining */ csv_writer_options_builder& false_value(std::string val) { options._false_value = val; return *this; } /** * @brief Sets the quote style for the writer. * * Only MINIMAL and NONE are supported. * * @param quoting The new quote style for the writer. * @return this for chaining */ csv_writer_options_builder& quoting(quote_style quoting) { options.set_quoting(quoting); return *this; } /** * @brief move `csv_writer_options` member once it's built. */ operator csv_writer_options&&() { return std::move(options); } /** * @brief move `csv_writer_options` member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `csv_writer_options` object's r-value reference */ csv_writer_options&& build() { return std::move(options); } }; /** * @brief Writes a set of columns to CSV format. * * The following code snippet demonstrates how to write columns to a file: * @code * auto destination = cudf::io::sink_info("dataset.csv"); * auto options = cudf::io::csv_writer_options(destination, table->view()) * .na_rep(na) * .include_header(include_header) * .rows_per_chunk(rows_per_chunk); * * cudf::io::write_csv(options); * @endcode * * @param options Settings for controlling writing behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ void write_csv(csv_writer_options const& options, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/datasource.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <future> #include <memory> namespace cudf { //! IO interfaces namespace io { /** * @addtogroup io_datasources * @{ * @file */ /** * @brief Interface class for providing input data to the readers. */ class datasource { public: template <typename Container> class owning_buffer; // forward declaration /** * @brief Interface class for buffers that the datasource returns to the caller. * * Provides a basic interface to return the data address and size. */ class buffer { public: /** * @pure @brief Returns the buffer size in bytes. * * @return Buffer size in bytes */ [[nodiscard]] virtual size_t size() const = 0; /** * @pure @brief Returns the address of the data in the buffer. * * @return Address of the data in the buffer */ [[nodiscard]] virtual uint8_t const* data() const = 0; /** * @brief Base class destructor */ virtual ~buffer() {} /** * @brief Factory to construct a datasource buffer object from a container. * * @tparam Container Type of the container to construct the buffer from * @param data_owner The container to construct the buffer from (ownership is transferred) * @return Constructed buffer object */ template <typename Container> static std::unique_ptr<buffer> create(Container&& data_owner) { return std::make_unique<owning_buffer<Container>>(std::move(data_owner)); } }; /** * @brief Creates a source from a file path. * * @param[in] filepath Path to the file to use * @param[in] offset Bytes from the start of the file (the default is zero) * @param[in] size Bytes from the offset; use zero for entire file (the default is zero) * @return Constructed datasource object */ static std::unique_ptr<datasource> create(std::string const& filepath, size_t offset = 0, size_t size = 0); /** * @brief Creates a source from a host memory buffer. * # @deprecated Since 23.04 * * @param[in] buffer Host buffer object * @return Constructed datasource object */ static std::unique_ptr<datasource> create(host_buffer const& buffer); /** * @brief Creates a source from a host memory buffer. * * @param[in] buffer Host buffer object * @return Constructed datasource object */ static std::unique_ptr<datasource> create(cudf::host_span<std::byte const> buffer); /** * @brief Creates a source from a device memory buffer. * * @param buffer Device buffer object * @return Constructed datasource object */ static std::unique_ptr<datasource> create(cudf::device_span<std::byte const> buffer); /** * @brief Creates a source from an user implemented datasource object. * * @param[in] source Non-owning pointer to the datasource object * @return Constructed datasource object */ static std::unique_ptr<datasource> create(datasource* source); /** * @brief Creates a vector of datasources, one per element in the input vector. * * @param[in] args vector of parameters * @return Constructed vector of datasource objects */ template <typename T> static std::vector<std::unique_ptr<datasource>> create(std::vector<T> const& args) { std::vector<std::unique_ptr<datasource>> sources; sources.reserve(args.size()); std::transform(args.cbegin(), args.cend(), std::back_inserter(sources), [](auto const& arg) { return datasource::create(arg); }); return sources; } /** * @brief Base class destructor */ virtual ~datasource(){}; /** * @brief Returns a buffer with a subset of data from the source. * * @param[in] offset Bytes from the start * @param[in] size Bytes to read * * @return The data buffer (can be smaller than size) */ virtual std::unique_ptr<datasource::buffer> host_read(size_t offset, size_t size) = 0; /** * @brief Reads a selected range into a preallocated buffer. * * @param[in] offset Bytes from the start * @param[in] size Bytes to read * @param[in] dst Address of the existing host memory * * @return The number of bytes read (can be smaller than size) */ virtual size_t host_read(size_t offset, size_t size, uint8_t* dst) = 0; /** * @brief Whether or not this source supports reading directly into device memory. * * If this function returns true, the datasource will receive calls to device_read() instead of * host_read() when the reader processes the data on the device. Most readers will still make * host_read() calls, for the parts of input that are processed on the host (e.g. metadata). * * Data source implementations that don't support direct device reads don't need to override this * function. The implementations that do should override it to return false. * * @return bool Whether this source supports device_read() calls */ [[nodiscard]] virtual bool supports_device_read() const { return false; } /** * @brief Estimates whether a direct device read would be more optimal for the given size. * * @param size Number of bytes to read * @return whether the device read is expected to be more performant for the given size */ [[nodiscard]] virtual bool is_device_read_preferred(size_t size) const { return supports_device_read(); } /** * @brief Returns a device buffer with a subset of data from the source. * * For optimal performance, should only be called when `is_device_read_preferred` returns `true`. * Data source implementations that don't support direct device reads don't need to override this * function. * * @throws cudf::logic_error the object does not support direct device reads, i.e. * `supports_device_read` returns `false`. * * @param offset Number of bytes from the start * @param size Number of bytes to read * @param stream CUDA stream to use * * @return The data buffer in the device memory */ virtual std::unique_ptr<datasource::buffer> device_read(size_t offset, size_t size, rmm::cuda_stream_view stream) { CUDF_FAIL("datasource classes that support device_read must override it."); } /** * @brief Reads a selected range into a preallocated device buffer * * For optimal performance, should only be called when `is_device_read_preferred` returns `true`. * Data source implementations that don't support direct device reads don't need to override this * function. * * @throws cudf::logic_error when the object does not support direct device reads, i.e. * `supports_device_read` returns `false`. * * @param offset Number of bytes from the start * @param size Number of bytes to read * @param dst Address of the existing device memory * @param stream CUDA stream to use * * @return The number of bytes read (can be smaller than size) */ virtual size_t device_read(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) { CUDF_FAIL("datasource classes that support device_read must override it."); } /** * @brief Asynchronously reads a selected range into a preallocated device buffer * * Returns a future value that contains the number of bytes read. Calling `get()` method of the * return value synchronizes this function. * * For optimal performance, should only be called when `is_device_read_preferred` returns `true`. * Data source implementations that don't support direct device reads don't need to override this * function. * * @throws cudf::logic_error when the object does not support direct device reads, i.e. * `supports_device_read` returns `false`. * * @param offset Number of bytes from the start * @param size Number of bytes to read * @param dst Address of the existing device memory * @param stream CUDA stream to use * * @return The number of bytes read as a future value (can be smaller than size) */ virtual std::future<size_t> device_read_async(size_t offset, size_t size, uint8_t* dst, rmm::cuda_stream_view stream) { CUDF_FAIL("datasource classes that support device_read_async must override it."); } /** * @brief Returns the size of the data in the source. * * @return The size of the source data in bytes */ [[nodiscard]] virtual size_t size() const = 0; /** * @brief Returns whether the source contains any data. * * @return True if there is data, False otherwise */ [[nodiscard]] virtual bool is_empty() const { return size() == 0; } /** * @brief Implementation for non owning buffer where datasource holds buffer until destruction. */ class non_owning_buffer : public buffer { public: non_owning_buffer() {} /** * @brief Construct a new non owning buffer object * * @param data The data buffer * @param size The size of the data buffer */ non_owning_buffer(uint8_t const* data, size_t size) : _data(data), _size(size) {} /** * @brief Returns the size of the buffer. * * @return The size of the buffer in bytes */ [[nodiscard]] size_t size() const override { return _size; } /** * @brief Returns the pointer to the buffer. * * @return Pointer to the buffer */ [[nodiscard]] uint8_t const* data() const override { return _data; } private: uint8_t const* _data{nullptr}; size_t _size{0}; }; /** * @brief Derived implementation of `buffer` that owns the data. * * Can use different container types to hold the data buffer. * * @tparam Container Type of the container object that owns the data */ template <typename Container> class owning_buffer : public buffer { public: /** * @brief Moves the input container into the newly created object. * * @param data_owner The container to construct the buffer from (ownership is transferred) */ owning_buffer(Container&& data_owner) : _data(std::move(data_owner)), _data_ptr(_data.data()), _size(_data.size()) { } /** * @brief Moves the input container into the newly created object, and exposes a subspan of the * buffer. * * @param data_owner The container to construct the buffer from (ownership is transferred) * @param data_ptr Pointer to the start of the subspan * @param size The size of the subspan */ owning_buffer(Container&& data_owner, uint8_t const* data_ptr, size_t size) : _data(std::move(data_owner)), _data_ptr(data_ptr), _size(size) { } /** * @brief Returns the size of the buffer. * * @return The size of the buffer in bytes */ [[nodiscard]] size_t size() const override { return _size; } /** * @brief Returns the pointer to the data in the buffer. * * @return Pointer to the data in the buffer */ [[nodiscard]] uint8_t const* data() const override { return static_cast<uint8_t const*>(_data_ptr); } private: Container _data; void const* _data_ptr; size_t _size; }; }; /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/orc_types.hpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> namespace cudf::io::orc { enum CompressionKind : uint8_t { NONE = 0, ZLIB = 1, SNAPPY = 2, LZO = 3, LZ4 = 4, ZSTD = 5, }; enum TypeKind : int8_t { INVALID_TYPE_KIND = -1, BOOLEAN = 0, BYTE = 1, SHORT = 2, INT = 3, LONG = 4, FLOAT = 5, DOUBLE = 6, STRING = 7, BINARY = 8, TIMESTAMP = 9, LIST = 10, MAP = 11, STRUCT = 12, UNION = 13, DECIMAL = 14, DATE = 15, VARCHAR = 16, CHAR = 17, }; enum StreamKind : int8_t { INVALID_STREAM_KIND = -1, PRESENT = 0, // boolean stream of whether the next value is non-null DATA = 1, // the primary data stream LENGTH = 2, // the length of each value for variable length data DICTIONARY_DATA = 3, // the dictionary blob DICTIONARY_COUNT = 4, // deprecated prior to Hive 0.11 SECONDARY = 5, // a secondary data stream ROW_INDEX = 6, // the index for seeking to particular row groups BLOOM_FILTER = 7, // original bloom filters used before ORC-101 BLOOM_FILTER_UTF8 = 8, // bloom filters that consistently use utf8 }; enum ColumnEncodingKind : int8_t { INVALID_ENCODING_KIND = -1, DIRECT = 0, // the encoding is mapped directly to the stream using RLE v1 DICTIONARY = 1, // the encoding uses a dictionary of unique values using RLE v1 DIRECT_V2 = 2, // the encoding is direct using RLE v2 DICTIONARY_V2 = 3, // the encoding is dictionary-based using RLE v2 }; enum ProtofType : uint8_t { VARINT = 0, FIXED64 = 1, FIXEDLEN = 2, START_GROUP = 3, // deprecated END_GROUP = 4, // deprecated FIXED32 = 5, INVALID_6 = 6, INVALID_7 = 7, }; } // namespace cudf::io::orc
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/arrow_io_source.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "datasource.hpp" #include <arrow/filesystem/filesystem.h> #include <arrow/io/interfaces.h> #include <memory> #include <string> namespace cudf::io { /** * @addtogroup io_datasources * @{ * @file */ /** * @brief Implementation class for reading from an Apache Arrow file. The file * could be a memory-mapped file or other implementation supported by Arrow. */ class arrow_io_source : public datasource { public: /** * @brief Constructs an object from an Apache Arrow Filesystem URI * * @param arrow_uri Apache Arrow Filesystem URI */ explicit arrow_io_source(std::string const& arrow_uri); /** * @brief Constructs an object from an `arrow` source object. * * @param file The `arrow` object from which the data is read */ explicit arrow_io_source(std::shared_ptr<arrow::io::RandomAccessFile> file) : arrow_file(file) {} /** * @brief Returns a buffer with a subset of data from the `arrow` source. * * @param offset The offset in bytes from which to read * @param size The number of bytes to read * @return A buffer with the read data */ std::unique_ptr<buffer> host_read(size_t offset, size_t size) override; /** * @brief Reads a selected range from the `arrow` source into a preallocated buffer. * * @param[in] offset The offset in bytes from which to read * @param[in] size The number of bytes to read * @param[out] dst The preallocated buffer to read into * @return The number of bytes read */ size_t host_read(size_t offset, size_t size, uint8_t* dst) override; /** * @brief Returns the size of the data in the `arrow` source. * * @return The size of the data in the `arrow` source */ [[nodiscard]] size_t size() const override; private: std::shared_ptr<arrow::fs::FileSystem> filesystem; std::shared_ptr<arrow::io::RandomAccessFile> arrow_file; }; /** @} */ // end of group } // namespace cudf::io
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/orc.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/detail/orc.hpp> #include <cudf/io/types.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <memory> #include <optional> #include <string> #include <unordered_map> #include <vector> namespace cudf { namespace io { /** * @addtogroup io_readers * @{ * @file */ constexpr size_t default_stripe_size_bytes = 64 * 1024 * 1024; ///< 64MB default orc stripe size constexpr size_type default_stripe_size_rows = 1000000; ///< 1M rows default orc stripe rows constexpr size_type default_row_index_stride = 10000; ///< 10K rows default orc row index stride /** * @brief Builds settings to use for `read_orc()`. */ class orc_reader_options_builder; /** * @brief Settings to use for `read_orc()`. */ class orc_reader_options { source_info _source; // Names of column to read; `nullopt` is all std::optional<std::vector<std::string>> _columns; // List of individual stripes to read (ignored if empty) std::vector<std::vector<size_type>> _stripes; // Rows to skip from the start; ORC stores the number of rows as uint64_t uint64_t _skip_rows = 0; // Rows to read; `nullopt` is all std::optional<size_type> _num_rows; // Whether to use row index to speed-up reading bool _use_index = true; // Whether to use numpy-compatible dtypes bool _use_np_dtypes = true; // Cast timestamp columns to a specific type data_type _timestamp_type{type_id::EMPTY}; // Columns that should be read as Decimal128 std::vector<std::string> _decimal128_columns; friend orc_reader_options_builder; /** * @brief Constructor from source info. * * @param src source information used to read orc file */ explicit orc_reader_options(source_info src) : _source{std::move(src)} {} public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ orc_reader_options() = default; /** * @brief Creates `orc_reader_options_builder` which will build `orc_reader_options`. * * @param src Source information to read orc file * @return Builder to build reader options */ static orc_reader_options_builder builder(source_info src); /** * @brief Returns source info. * * @return Source info */ [[nodiscard]] source_info const& get_source() const { return _source; } /** * @brief Returns names of the columns to read, if set. * * @return Names of the columns to read; `nullopt` if the option is not set */ [[nodiscard]] auto const& get_columns() const { return _columns; } /** * @brief Returns vector of vectors, stripes to read for each input source * * @return Vector of vectors, stripes to read for each input source */ [[nodiscard]] auto const& get_stripes() const { return _stripes; } /** * @brief Returns number of rows to skip from the start. * * @return Number of rows to skip from the start */ uint64_t get_skip_rows() const { return _skip_rows; } /** * @brief Returns number of row to read. * * @return Number of rows to read; `nullopt` if the option hasn't been set (in which case the file * is read until the end) */ std::optional<size_type> const& get_num_rows() const { return _num_rows; } /** * @brief Whether to use row index to speed-up reading. * * @return `true` if row index is used to speed-up reading */ bool is_enabled_use_index() const { return _use_index; } /** * @brief Whether to use numpy-compatible dtypes. * * @return `true` if numpy-compatible dtypes are used */ bool is_enabled_use_np_dtypes() const { return _use_np_dtypes; } /** * @brief Returns timestamp type to which timestamp column will be cast. * * @return Timestamp type to which timestamp column will be cast */ data_type get_timestamp_type() const { return _timestamp_type; } /** * @brief Returns fully qualified names of columns that should be read as 128-bit Decimal. * * @return Fully qualified names of columns that should be read as 128-bit Decimal */ std::vector<std::string> const& get_decimal128_columns() const { return _decimal128_columns; } // Setters /** * @brief Sets names of the column to read. * * @param col_names Vector of column names */ void set_columns(std::vector<std::string> col_names) { _columns = std::move(col_names); } /** * @brief Sets list of stripes to read for each input source * * @param stripes Vector of vectors, mapping stripes to read to input sources * * @throw cudf::logic_error if a non-empty vector is passed, and `skip_rows` has been previously * set * @throw cudf::logic_error if a non-empty vector is passed, and `num_rows` has been previously * set */ void set_stripes(std::vector<std::vector<size_type>> stripes) { CUDF_EXPECTS(stripes.empty() or (_skip_rows == 0), "Can't set stripes along with skip_rows"); CUDF_EXPECTS(stripes.empty() or not _num_rows.has_value(), "Can't set stripes along with num_rows"); _stripes = std::move(stripes); } /** * @brief Sets number of rows to skip from the start. * * @param rows Number of rows * * @throw cudf::logic_error if a negative value is passed * @throw cudf::logic_error if stripes have been previously set */ void set_skip_rows(uint64_t rows) { CUDF_EXPECTS(rows == 0 or _stripes.empty(), "Can't set both skip_rows along with stripes"); _skip_rows = rows; } /** * @brief Sets number of row to read. * * @param nrows Number of rows * * @throw cudf::logic_error if a negative value is passed * @throw cudf::logic_error if stripes have been previously set */ void set_num_rows(size_type nrows) { CUDF_EXPECTS(nrows >= 0, "num_rows cannot be negative"); CUDF_EXPECTS(_stripes.empty(), "Can't set both num_rows and stripes"); _num_rows = nrows; } /** * @brief Enable/Disable use of row index to speed-up reading. * * @param use Boolean value to enable/disable row index use */ void enable_use_index(bool use) { _use_index = use; } /** * @brief Enable/Disable use of numpy-compatible dtypes * * @param use Boolean value to enable/disable */ void enable_use_np_dtypes(bool use) { _use_np_dtypes = use; } /** * @brief Sets timestamp type to which timestamp column will be cast. * * @param type Type of timestamp */ void set_timestamp_type(data_type type) { _timestamp_type = type; } /** * @brief Set columns that should be read as 128-bit Decimal * * @param val Vector of fully qualified column names */ void set_decimal128_columns(std::vector<std::string> val) { _decimal128_columns = std::move(val); } }; /** * @brief Builds settings to use for `read_orc()`. */ class orc_reader_options_builder { orc_reader_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit orc_reader_options_builder() = default; /** * @brief Constructor from source info. * * @param src The source information used to read orc file */ explicit orc_reader_options_builder(source_info src) : options{std::move(src)} {}; /** * @brief Sets names of the column to read. * * @param col_names Vector of column names * @return this for chaining */ orc_reader_options_builder& columns(std::vector<std::string> col_names) { options._columns = std::move(col_names); return *this; } /** * @brief Sets list of individual stripes to read per source * * @param stripes Vector of vectors, mapping stripes to read to input sources * @return this for chaining */ orc_reader_options_builder& stripes(std::vector<std::vector<size_type>> stripes) { options.set_stripes(std::move(stripes)); return *this; } /** * @brief Sets number of rows to skip from the start. * * @param rows Number of rows * @return this for chaining */ orc_reader_options_builder& skip_rows(uint64_t rows) { options.set_skip_rows(rows); return *this; } /** * @brief Sets number of row to read. * * @param nrows Number of rows * @return this for chaining */ orc_reader_options_builder& num_rows(size_type nrows) { options.set_num_rows(nrows); return *this; } /** * @brief Enable/Disable use of row index to speed-up reading. * * @param use Boolean value to enable/disable row index use * @return this for chaining */ orc_reader_options_builder& use_index(bool use) { options._use_index = use; return *this; } /** * @brief Enable/Disable use of numpy-compatible dtypes. * * @param use Boolean value to enable/disable * @return this for chaining */ orc_reader_options_builder& use_np_dtypes(bool use) { options._use_np_dtypes = use; return *this; } /** * @brief Sets timestamp type to which timestamp column will be cast. * * @param type Type of timestamp * @return this for chaining */ orc_reader_options_builder& timestamp_type(data_type type) { options._timestamp_type = type; return *this; } /** * @brief Columns that should be read as 128-bit Decimal * * @param val Vector of column names * @return this for chaining */ orc_reader_options_builder& decimal128_columns(std::vector<std::string> val) { options._decimal128_columns = std::move(val); return *this; } /** * @brief move orc_reader_options member once it's built. */ operator orc_reader_options&&() { return std::move(options); } /** * @brief move orc_reader_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `orc_reader_options` object's r-value reference */ orc_reader_options&& build() { return std::move(options); } }; /** * @brief Reads an ORC dataset into a set of columns. * * The following code snippet demonstrates how to read a dataset from a file: * @code * auto source = cudf::io::source_info("dataset.orc"); * auto options = cudf::io::orc_reader_options::builder(source); * auto result = cudf::io::read_orc(options); * @endcode * * @param options Settings for controlling reading behavior * @param mr Device memory resource used to allocate device memory of the table in the returned * table_with_metadata. * * @return The set of columns */ table_with_metadata read_orc( orc_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group /** * @addtogroup io_writers * @{ * @file */ /** * @brief Builds settings to use for `write_orc()`. */ class orc_writer_options_builder; /** * @brief Constants to disambiguate statistics terminology for ORC. * * ORC refers to its finest granularity of row-grouping as "row group", * which corresponds to Parquet "pages". * Similarly, ORC's "stripe" corresponds to a Parquet "row group". * The following constants disambiguate the terminology for the statistics * collected at each level. */ static constexpr statistics_freq ORC_STATISTICS_STRIPE = statistics_freq::STATISTICS_ROWGROUP; static constexpr statistics_freq ORC_STATISTICS_ROW_GROUP = statistics_freq::STATISTICS_PAGE; /** * @brief Settings to use for `write_orc()`. */ class orc_writer_options { // Specify the sink to use for writer output sink_info _sink; // Specify the compression format to use compression_type _compression = compression_type::AUTO; // Specify frequency of statistics collection statistics_freq _stats_freq = ORC_STATISTICS_ROW_GROUP; // Maximum size of each stripe (unless smaller than a single row group) size_t _stripe_size_bytes = default_stripe_size_bytes; // Maximum number of rows in stripe (unless smaller than a single row group) size_type _stripe_size_rows = default_stripe_size_rows; // Row index stride (maximum number of rows in each row group) size_type _row_index_stride = default_row_index_stride; // Set of columns to output table_view _table; // Optional associated metadata std::optional<table_input_metadata> _metadata; // Optional footer key_value_metadata std::map<std::string, std::string> _user_data; // Optional compression statistics std::shared_ptr<writer_compression_statistics> _compression_stats; // Specify whether string dictionaries should be alphabetically sorted bool _enable_dictionary_sort = true; friend orc_writer_options_builder; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit orc_writer_options(sink_info const& sink, table_view const& table) : _sink(sink), _table(table) { } public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit orc_writer_options() = default; /** * @brief Create builder to create `orc_writer_options`. * * @param sink The sink used for writer output * @param table Table to be written to output * * @return Builder to build `orc_writer_options` */ static orc_writer_options_builder builder(sink_info const& sink, table_view const& table); /** * @brief Returns sink info. * * @return Sink info */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns compression type. * * @return Compression type */ [[nodiscard]] compression_type get_compression() const { return _compression; } /** * @brief Whether writing column statistics is enabled/disabled. * * @return `true` if writing column statistics is enabled */ [[nodiscard]] bool is_enabled_statistics() const { return _stats_freq != statistics_freq::STATISTICS_NONE; } /** * @brief Returns frequency of statistics collection. * * @return Frequency of statistics collection */ [[nodiscard]] statistics_freq get_statistics_freq() const { return _stats_freq; } /** * @brief Returns maximum stripe size, in bytes. * * @return Maximum stripe size, in bytes */ [[nodiscard]] auto get_stripe_size_bytes() const { return _stripe_size_bytes; } /** * @brief Returns maximum stripe size, in rows. * * @return Maximum stripe size, in rows */ [[nodiscard]] auto get_stripe_size_rows() const { return _stripe_size_rows; } /** * @brief Returns the row index stride. * * @return Row index stride */ auto get_row_index_stride() const { auto const unaligned_stride = std::min(_row_index_stride, get_stripe_size_rows()); return unaligned_stride - unaligned_stride % 8; } /** * @brief Returns table to be written to output. * * @return Table to be written to output */ [[nodiscard]] table_view get_table() const { return _table; } /** * @brief Returns associated metadata. * * @return Associated metadata */ [[nodiscard]] auto const& get_metadata() const { return _metadata; } /** * @brief Returns Key-Value footer metadata information. * * @return Key-Value footer metadata information */ [[nodiscard]] std::map<std::string, std::string> const& get_key_value_metadata() const { return _user_data; } /** * @brief Returns a shared pointer to the user-provided compression statistics. * * @return Compression statistics */ [[nodiscard]] std::shared_ptr<writer_compression_statistics> get_compression_statistics() const { return _compression_stats; } /** * @brief Returns whether string dictionaries should be sorted. * * @return `true` if string dictionaries should be sorted */ [[nodiscard]] bool get_enable_dictionary_sort() const { return _enable_dictionary_sort; } // Setters /** * @brief Sets compression type. * * @param comp Compression type */ void set_compression(compression_type comp) { _compression = comp; } /** * @brief Choose granularity of statistics collection. * * The granularity can be set to: * - cudf::io::STATISTICS_NONE: No statistics are collected. * - cudf::io::ORC_STATISTICS_STRIPE: Statistics are collected for each ORC stripe. * - cudf::io::ORC_STATISTICS_ROWGROUP: Statistics are collected for each ORC row group. * * @param val Frequency of statistics collection */ void enable_statistics(statistics_freq val) { _stats_freq = val; } /** * @brief Sets the maximum stripe size, in bytes. * * @param size_bytes Maximum stripe size, in bytes to be set * * @throw cudf::logic_error if a value below the minimal size is passed */ void set_stripe_size_bytes(size_t size_bytes) { CUDF_EXPECTS(size_bytes >= 64 << 10, "64KB is the minimum stripe size"); _stripe_size_bytes = size_bytes; } /** * @brief Sets the maximum stripe size, in rows. * * If the stripe size is smaller that the row group size, row group size will be reduced to math * the stripe size. * * @param size_rows Maximum stripe size, in rows to be set * * @throw cudf::logic_error if a value below the minimal number of rows is passed */ void set_stripe_size_rows(size_type size_rows) { CUDF_EXPECTS(size_rows >= 512, "Maximum stripe size cannot be smaller than 512"); _stripe_size_rows = size_rows; } /** * @brief Sets the row index stride. * * Rounded down to a multiple of 8. * * @param stride Row index stride to be set * * @throw cudf::logic_error if a value below the minimal row index stride is passed */ void set_row_index_stride(size_type stride) { CUDF_EXPECTS(stride >= 512, "Row index stride cannot be smaller than 512"); _row_index_stride = stride; } /** * @brief Sets table to be written to output. * * @param tbl Table for the output */ void set_table(table_view tbl) { _table = tbl; } /** * @brief Sets associated metadata * * @param meta Associated metadata */ void set_metadata(table_input_metadata meta) { _metadata = std::move(meta); } /** * @brief Sets metadata. * * @param metadata Key-Value footer metadata */ void set_key_value_metadata(std::map<std::string, std::string> metadata) { _user_data = std::move(metadata); } /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be updated after writing */ void set_compression_statistics(std::shared_ptr<writer_compression_statistics> comp_stats) { _compression_stats = std::move(comp_stats); } /** * @brief Sets whether string dictionaries should be sorted. * * @param val Boolean value to enable/disable */ void set_enable_dictionary_sort(bool val) { _enable_dictionary_sort = val; } }; /** * @brief Builds settings to use for `write_orc()`. */ class orc_writer_options_builder { orc_writer_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ orc_writer_options_builder() = default; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ orc_writer_options_builder(sink_info const& sink, table_view const& table) : options{sink, table} { } /** * @brief Sets compression type. * * @param comp The compression type to use * @return this for chaining */ orc_writer_options_builder& compression(compression_type comp) { options._compression = comp; return *this; } /** * @brief Choose granularity of column statistics to be written * * The granularity can be set to: * - cudf::io::STATISTICS_NONE: No statistics are collected. * - cudf::io::ORC_STATISTICS_STRIPE: Statistics are collected for each ORC stripe. * - cudf::io::ORC_STATISTICS_ROWGROUP: Statistics are collected for each ORC row group. * * @param val Level of statistics collection * @return this for chaining */ orc_writer_options_builder& enable_statistics(statistics_freq val) { options._stats_freq = val; return *this; } /** * @brief Sets the maximum stripe size, in bytes. * * @param val maximum stripe size * @return this for chaining */ orc_writer_options_builder& stripe_size_bytes(size_t val) { options.set_stripe_size_bytes(val); return *this; } /** * @brief Sets the maximum number of rows in output stripes. * * @param val maximum number or rows * @return this for chaining */ orc_writer_options_builder& stripe_size_rows(size_type val) { options.set_stripe_size_rows(val); return *this; } /** * @brief Sets the row index stride. * * @param val new row index stride * @return this for chaining */ orc_writer_options_builder& row_index_stride(size_type val) { options.set_row_index_stride(val); return *this; } /** * @brief Sets table to be written to output. * * @param tbl Table for the output * @return this for chaining */ orc_writer_options_builder& table(table_view tbl) { options._table = tbl; return *this; } /** * @brief Sets associated metadata. * * @param meta Associated metadata * @return this for chaining */ orc_writer_options_builder& metadata(table_input_metadata meta) { options._metadata = std::move(meta); return *this; } /** * @brief Sets Key-Value footer metadata. * * @param metadata Key-Value footer metadata * @return this for chaining */ orc_writer_options_builder& key_value_metadata(std::map<std::string, std::string> metadata) { options._user_data = std::move(metadata); return *this; } /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be filled once writer is done * @return this for chaining */ orc_writer_options_builder& compression_statistics( std::shared_ptr<writer_compression_statistics> const& comp_stats) { options._compression_stats = comp_stats; return *this; } /** * @brief Sets whether string dictionaries should be sorted. * * @param val Boolean value to enable/disable * @return this for chaining */ orc_writer_options_builder& enable_dictionary_sort(bool val) { options._enable_dictionary_sort = val; return *this; } /** * @brief move orc_writer_options member once it's built. */ operator orc_writer_options&&() { return std::move(options); } /** * @brief move orc_writer_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `orc_writer_options` object's r-value reference */ orc_writer_options&& build() { return std::move(options); } }; /** * @brief Writes a set of columns to ORC format. * * The following code snippet demonstrates how to write columns to a file: * @code * auto destination = cudf::io::sink_info("dataset.orc"); * auto options = cudf::io::orc_writer_options::builder(destination, table->view()); * cudf::io::write_orc(options); * @endcode * * @param options Settings for controlling reading behavior */ void write_orc(orc_writer_options const& options); /** * @brief Builds settings to use for `write_orc_chunked()`. */ class chunked_orc_writer_options_builder; /** * @brief Settings to use for `write_orc_chunked()`. */ class chunked_orc_writer_options { // Specify the sink to use for writer output sink_info _sink; // Specify the compression format to use compression_type _compression = compression_type::AUTO; // Specify granularity of statistics collection statistics_freq _stats_freq = ORC_STATISTICS_ROW_GROUP; // Maximum size of each stripe (unless smaller than a single row group) size_t _stripe_size_bytes = default_stripe_size_bytes; // Maximum number of rows in stripe (unless smaller than a single row group) size_type _stripe_size_rows = default_stripe_size_rows; // Row index stride (maximum number of rows in each row group) size_type _row_index_stride = default_row_index_stride; // Optional associated metadata std::optional<table_input_metadata> _metadata; // Optional footer key_value_metadata std::map<std::string, std::string> _user_data; // Optional compression statistics std::shared_ptr<writer_compression_statistics> _compression_stats; // Specify whether string dictionaries should be alphabetically sorted bool _enable_dictionary_sort = true; friend chunked_orc_writer_options_builder; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output */ chunked_orc_writer_options(sink_info const& sink) : _sink(sink) {} public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit chunked_orc_writer_options() = default; /** * @brief Create builder to create `chunked_orc_writer_options`. * * @param sink The sink used for writer output * * @return Builder to build chunked_orc_writer_options */ static chunked_orc_writer_options_builder builder(sink_info const& sink); /** * @brief Returns sink info. * * @return Sink info */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns compression type. * * @return Compression type */ [[nodiscard]] compression_type get_compression() const { return _compression; } /** * @brief Returns granularity of statistics collection. * * @return Granularity of statistics collection */ [[nodiscard]] statistics_freq get_statistics_freq() const { return _stats_freq; } /** * @brief Returns maximum stripe size, in bytes. * * @return Maximum stripe size, in bytes */ [[nodiscard]] auto get_stripe_size_bytes() const { return _stripe_size_bytes; } /** * @brief Returns maximum stripe size, in rows. * * @return Maximum stripe size, in rows */ [[nodiscard]] auto get_stripe_size_rows() const { return _stripe_size_rows; } /** * @brief Returns the row index stride. * * @return Row index stride */ auto get_row_index_stride() const { auto const unaligned_stride = std::min(_row_index_stride, get_stripe_size_rows()); return unaligned_stride - unaligned_stride % 8; } /** * @brief Returns associated metadata. * * @return Associated metadata */ [[nodiscard]] auto const& get_metadata() const { return _metadata; } /** * @brief Returns Key-Value footer metadata information. * * @return Key-Value footer metadata information */ [[nodiscard]] std::map<std::string, std::string> const& get_key_value_metadata() const { return _user_data; } /** * @brief Returns a shared pointer to the user-provided compression statistics. * * @return Compression statistics */ [[nodiscard]] std::shared_ptr<writer_compression_statistics> get_compression_statistics() const { return _compression_stats; } /** * @brief Returns whether string dictionaries should be sorted. * * @return `true` if string dictionaries should be sorted */ [[nodiscard]] bool get_enable_dictionary_sort() const { return _enable_dictionary_sort; } // Setters /** * @brief Sets compression type. * * @param comp The compression type to use */ void set_compression(compression_type comp) { _compression = comp; } /** * @brief Choose granularity of statistics collection * * The granularity can be set to: * - cudf::io::STATISTICS_NONE: No statistics are collected. * - cudf::io::ORC_STATISTICS_STRIPE: Statistics are collected for each ORC stripe. * - cudf::io::ORC_STATISTICS_ROWGROUP: Statistics are collected for each ORC row group. * * @param val Frequency of statistics collection */ void enable_statistics(statistics_freq val) { _stats_freq = val; } /** * @brief Sets the maximum stripe size, in bytes. * * @param size_bytes Maximum stripe size, in bytes to be set * * @throw cudf::logic_error if a value below the minimal stripe size is passed */ void set_stripe_size_bytes(size_t size_bytes) { CUDF_EXPECTS(size_bytes >= 64 << 10, "64KB is the minimum stripe size"); _stripe_size_bytes = size_bytes; } /** * @brief Sets the maximum stripe size, in rows. * * If the stripe size is smaller that the row group size, row group size will be reduced to math * the stripe size. * * @param size_rows Maximum stripe size, in rows to be set * * @throw cudf::logic_error if a value below the minimal number of rows in a stripe is passed */ void set_stripe_size_rows(size_type size_rows) { CUDF_EXPECTS(size_rows >= 512, "maximum stripe size cannot be smaller than 512"); _stripe_size_rows = size_rows; } /** * @brief Sets the row index stride. * * Rounded down to a multiple of 8. * * @param stride Row index stride to be set * * @throw cudf::logic_error if a value below the minimal number of rows in a row group is passed */ void set_row_index_stride(size_type stride) { CUDF_EXPECTS(stride >= 512, "Row index stride cannot be smaller than 512"); _row_index_stride = stride; } /** * @brief Sets associated metadata. * * @param meta Associated metadata */ void metadata(table_input_metadata meta) { _metadata = std::move(meta); } /** * @brief Sets Key-Value footer metadata. * * @param metadata Key-Value footer metadata */ void set_key_value_metadata(std::map<std::string, std::string> metadata) { _user_data = std::move(metadata); } /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be updated after writing */ void set_compression_statistics(std::shared_ptr<writer_compression_statistics> comp_stats) { _compression_stats = std::move(comp_stats); } /** * @brief Sets whether string dictionaries should be sorted. * * @param val Boolean value to enable/disable */ void set_enable_dictionary_sort(bool val) { _enable_dictionary_sort = val; } }; /** * @brief Builds settings to use for `write_orc_chunked()`. */ class chunked_orc_writer_options_builder { chunked_orc_writer_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ chunked_orc_writer_options_builder() = default; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output */ explicit chunked_orc_writer_options_builder(sink_info const& sink) : options{sink} {} /** * @brief Sets compression type. * * @param comp The compression type to use * @return this for chaining */ chunked_orc_writer_options_builder& compression(compression_type comp) { options._compression = comp; return *this; } /** * @brief Choose granularity of statistics collection * * The granularity can be set to: * - cudf::io::STATISTICS_NONE: No statistics are collected. * - cudf::io::ORC_STATISTICS_STRIPE: Statistics are collected for each ORC stripe. * - cudf::io::ORC_STATISTICS_ROWGROUP: Statistics are collected for each ORC row group. * * @param val Frequency of statistics collection * @return this for chaining */ chunked_orc_writer_options_builder& enable_statistics(statistics_freq val) { options._stats_freq = val; return *this; } /** * @brief Sets the maximum stripe size, in bytes. * * @param val maximum stripe size * @return this for chaining */ chunked_orc_writer_options_builder& stripe_size_bytes(size_t val) { options.set_stripe_size_bytes(val); return *this; } /** * @brief Sets the maximum number of rows in output stripes. * * @param val maximum number or rows * @return this for chaining */ chunked_orc_writer_options_builder& stripe_size_rows(size_type val) { options.set_stripe_size_rows(val); return *this; } /** * @brief Sets the row index stride. * * @param val new row index stride * @return this for chaining */ chunked_orc_writer_options_builder& row_index_stride(size_type val) { options.set_row_index_stride(val); return *this; } /** * @brief Sets associated metadata. * * @param meta Associated metadata * @return this for chaining */ chunked_orc_writer_options_builder& metadata(table_input_metadata meta) { options._metadata = std::move(meta); return *this; } /** * @brief Sets Key-Value footer metadata. * * @param metadata Key-Value footer metadata * @return this for chaining */ chunked_orc_writer_options_builder& key_value_metadata( std::map<std::string, std::string> metadata) { options._user_data = std::move(metadata); return *this; } /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be filled once writer is done * @return this for chaining */ chunked_orc_writer_options_builder& compression_statistics( std::shared_ptr<writer_compression_statistics> const& comp_stats) { options._compression_stats = comp_stats; return *this; } /** * @brief Sets whether string dictionaries should be sorted. * * @param val Boolean value to enable/disable * @return this for chaining */ chunked_orc_writer_options_builder& enable_dictionary_sort(bool val) { options._enable_dictionary_sort = val; return *this; } /** * @brief move chunked_orc_writer_options member once it's built. */ operator chunked_orc_writer_options&&() { return std::move(options); } /** * @brief move chunked_orc_writer_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `chunked_orc_writer_options` object's r-value reference */ chunked_orc_writer_options&& build() { return std::move(options); } }; /** * @brief Chunked orc writer class writes an ORC file in a chunked/stream form. * * The intent of the write_orc_chunked_ path is to allow writing of an * arbitrarily large / arbitrary number of rows to an ORC file in multiple passes. * * The following code snippet demonstrates how to write a single ORC file containing * one logical table by writing a series of individual cudf::tables. * @code * ... * std::string filepath = "dataset.orc"; * cudf::io::chunked_orc_writer_options options = cudf::io::chunked_orc_writer_options * options::builder(cudf::sink_info(filepath)); * ... * orc_chunked_writer writer(options) * writer.write(table0) * writer.write(table1) * ... * writer.close(); * @endcode */ class orc_chunked_writer { public: /** * @brief Default constructor, this should never be used. * This is added just to satisfy cython. */ orc_chunked_writer() = default; /** * @brief Constructor with chunked writer options * * @param[in] options options used to write table */ orc_chunked_writer(chunked_orc_writer_options const& options); /** * @brief Writes table to output. * * @param[in] table Table that needs to be written * @return returns reference of the class object */ orc_chunked_writer& write(table_view const& table); /** * @brief Finishes the chunked/streamed write process. */ void close(); /// Unique pointer to impl writer class std::unique_ptr<cudf::io::detail::orc::writer> writer; }; /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/avro.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "types.hpp" #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <memory> #include <string> #include <vector> namespace cudf { namespace io { /** * @addtogroup io_readers * @{ * @file */ class avro_reader_options_builder; /** * @brief Settings to use for `read_avro()`. */ class avro_reader_options { source_info _source; // Names of column to read; empty is all std::vector<std::string> _columns; // Rows to skip from the start; size_type _skip_rows = 0; // Rows to read; -1 is all size_type _num_rows = -1; /** * @brief Constructor from source info. * * @param src source information used to read avro file */ explicit avro_reader_options(source_info src) : _source{std::move(src)} {} friend avro_reader_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ avro_reader_options() = default; /** * @brief Returns source info. * * @return Source info */ [[nodiscard]] source_info const& get_source() const { return _source; } /** * @brief Returns names of the columns to be read. * * @return Names of the columns to be read */ [[nodiscard]] std::vector<std::string> get_columns() const { return _columns; } /** * @brief Returns number of rows to skip from the start. * * @return Number of rows to skip from the start */ [[nodiscard]] size_type get_skip_rows() const { return _skip_rows; } /** * @brief Returns number of rows to read. * * @return Number of rows to read */ [[nodiscard]] size_type get_num_rows() const { return _num_rows; } /** * @brief Set names of the column to be read. * * @param col_names Vector of column names */ void set_columns(std::vector<std::string> col_names) { _columns = std::move(col_names); } /** * @brief Sets number of rows to skip. * * @param val Number of rows to skip from start */ void set_skip_rows(size_type val) { _skip_rows = val; } /** * @brief Sets number of rows to read. * * @param val Number of rows to read after skip */ void set_num_rows(size_type val) { _num_rows = val; } /** * @brief create avro_reader_options_builder which will build avro_reader_options. * * @param src source information used to read avro file * @returns builder to build reader options */ static avro_reader_options_builder builder(source_info src); }; /** * @brief Builder to build options for `read_avro()`. */ class avro_reader_options_builder { avro_reader_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ avro_reader_options_builder() = default; /** * @brief Constructor from source info. * * @param src The source information used to read avro file */ explicit avro_reader_options_builder(source_info src) : options{std::move(src)} {} /** * @brief Set names of the column to be read. * * @param col_names Vector of column names * @return this for chaining */ avro_reader_options_builder& columns(std::vector<std::string> col_names) { options._columns = std::move(col_names); return *this; } /** * @brief Sets number of rows to skip. * * @param val Number of rows to skip from start * @return this for chaining */ avro_reader_options_builder& skip_rows(size_type val) { options._skip_rows = val; return *this; } /** * @brief Sets number of rows to read. * * @param val Number of rows to read after skip * @return this for chaining */ avro_reader_options_builder& num_rows(size_type val) { options._num_rows = val; return *this; } /** * @brief move avro_reader_options member once it's built. */ operator avro_reader_options&&() { return std::move(options); } /** * @brief move avro_reader_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `avro_reader_options` object's r-value reference */ avro_reader_options&& build() { return std::move(options); } }; /** * @brief Reads an Avro dataset into a set of columns. * * The following code snippet demonstrates how to read a dataset from a file: * @code * auto source = cudf::io::source_info("dataset.avro"); * auto options = cudf::io::avro_reader_options::builder(source); * auto result = cudf::io::read_avro(options); * @endcode * * @param options Settings for controlling reading behavior * @param mr Device memory resource used to allocate device memory of the table in the returned * table_with_metadata * * @return The set of columns along with metadata */ table_with_metadata read_avro( avro_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/data_sink.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <algorithm> #include <future> #include <memory> #include <string> #include <vector> namespace cudf { //! IO interfaces namespace io { /** * @addtogroup io_datasinks * @{ * @file */ /** * @brief Interface class for storing the output data from the writers */ class data_sink { public: /** * @brief Create a sink from a file path * * @param[in] filepath Path to the file to use * @return Constructed data_sink object */ static std::unique_ptr<data_sink> create(std::string const& filepath); /** * @brief Create a sink from a std::vector * * @param[in,out] buffer Pointer to the output vector * @return Constructed data_sink object */ static std::unique_ptr<data_sink> create(std::vector<char>* buffer); /** * @brief Create a void sink (one that does no actual io) * * A useful code path for benchmarking, to eliminate physical * hardware randomness from profiling. * * @return Constructed data_sink object */ static std::unique_ptr<data_sink> create(); /** * @brief Create a wrapped custom user data sink * * @param[in] user_sink User-provided data sink (typically custom class) * * The data sink returned here is not the one passed by the user. It is an internal * class that wraps the user pointer. The principle is to allow the user to declare * a custom sink instance and use it across multiple write() calls. * * @return Constructed data_sink object */ static std::unique_ptr<data_sink> create(cudf::io::data_sink* const user_sink); /** * @brief Creates a vector of data sinks, one per element in the input vector. * * @param[in] args vector of parameters * @return Constructed vector of data sinks */ template <typename T> static std::vector<std::unique_ptr<data_sink>> create(std::vector<T> const& args) { std::vector<std::unique_ptr<data_sink>> sinks; sinks.reserve(args.size()); std::transform(args.cbegin(), args.cend(), std::back_inserter(sinks), [](auto const& arg) { return data_sink::create(arg); }); return sinks; } /** * @brief Base class destructor */ virtual ~data_sink(){}; /** * @pure @brief Append the buffer content to the sink * * @param[in] data Pointer to the buffer to be written into the sink object * @param[in] size Number of bytes to write */ virtual void host_write(void const* data, size_t size) = 0; /** * @brief Whether or not this sink supports writing from gpu memory addresses. * * Internal to some of the file format writers, we have code that does things like * * tmp_buffer = alloc_temp_buffer(); * cudaMemcpy(tmp_buffer, device_buffer, size); * sink->write(tmp_buffer, size); * * In the case where the sink type is itself a memory buffered write, this ends up * being effectively a second memcpy. So a useful optimization for a "smart" * custom data_sink is to do it's own internal management of the movement * of data between cpu and gpu; turning the internals of the writer into simply * * sink->device_write(device_buffer, size) * * If this function returns true, the data_sink will receive calls to device_write() * instead of write() when possible. However, it is still possible to receive * write() calls as well. * * @return If this writer supports device_write() calls */ [[nodiscard]] virtual bool supports_device_write() const { return false; } /** * @brief Estimates whether a direct device write would be more optimal for the given size. * * @param size Number of bytes to write * @return whether the device write is expected to be more performant for the given size */ [[nodiscard]] virtual bool is_device_write_preferred(size_t size) const { return supports_device_write(); } /** * @brief Append the buffer content to the sink from a gpu address * * For optimal performance, should only be called when `is_device_write_preferred` returns `true`. * Data sink implementations that don't support direct device writes don't need to override * this function. * * @throws cudf::logic_error the object does not support direct device writes, i.e. * `supports_device_write` returns `false`. * * @param gpu_data Pointer to the buffer to be written into the sink object * @param size Number of bytes to write * @param stream CUDA stream to use */ virtual void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) { CUDF_FAIL("data_sink classes that support device_write must override it."); } /** * @brief Asynchronously append the buffer content to the sink from a gpu address * * For optimal performance, should only be called when `is_device_write_preferred` returns `true`. * Data sink implementations that don't support direct device writes don't need to override * this function. * * `gpu_data` must not be freed until this call is synchronized. * @code{.pseudo} * auto result = device_write_async(gpu_data, size, stream); * result.wait(); // OR result.get() * @endcode * * @throws cudf::logic_error the object does not support direct device writes, i.e. * `supports_device_write` returns `false`. * @throws cudf::logic_error * * @param gpu_data Pointer to the buffer to be written into the sink object * @param size Number of bytes to write * @param stream CUDA stream to use * @return a future that can be used to synchronize the call */ virtual std::future<void> device_write_async(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) { CUDF_FAIL("data_sink classes that support device_write_async must override it."); } /** * @pure @brief Flush the data written into the sink */ virtual void flush() = 0; /** * @pure @brief Returns the total number of bytes written into this sink * * @return Total number of bytes written into this sink */ virtual size_t bytes_written() = 0; }; /** @} */ // end of group } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf
rapidsai_public_repos/cudf/cpp/include/cudf/io/parquet.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/ast/expressions.hpp> #include <cudf/io/detail/parquet.hpp> #include <cudf/io/types.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <iostream> #include <memory> #include <optional> #include <string> #include <vector> namespace cudf::io { /** * @addtogroup io_readers * @{ * @file */ constexpr size_t default_row_group_size_bytes = 128 * 1024 * 1024; ///< 128MB per row group constexpr size_type default_row_group_size_rows = 1000000; ///< 1 million rows per row group constexpr size_t default_max_page_size_bytes = 512 * 1024; ///< 512KB per page constexpr size_type default_max_page_size_rows = 20000; ///< 20k rows per page constexpr int32_t default_column_index_truncate_length = 64; ///< truncate to 64 bytes constexpr size_t default_max_dictionary_size = 1024 * 1024; ///< 1MB dictionary size constexpr size_type default_max_page_fragment_size = 5000; ///< 5000 rows per page fragment class parquet_reader_options_builder; /** * @brief Settings for `read_parquet()`. */ class parquet_reader_options { source_info _source; // Path in schema of column to read; `nullopt` is all std::optional<std::vector<std::string>> _columns; // List of individual row groups to read (ignored if empty) std::vector<std::vector<size_type>> _row_groups; // Number of rows to skip from the start; Parquet stores the number of rows as int64_t int64_t _skip_rows = 0; // Number of rows to read; `nullopt` is all std::optional<size_type> _num_rows; // Predicate filter as AST to filter output rows. std::optional<std::reference_wrapper<ast::expression const>> _filter; // Whether to store string data as categorical type bool _convert_strings_to_categories = false; // Whether to use PANDAS metadata to load columns bool _use_pandas_metadata = true; // Cast timestamp columns to a specific type data_type _timestamp_type{type_id::EMPTY}; std::optional<std::vector<reader_column_schema>> _reader_column_schema; /** * @brief Constructor from source info. * * @param src source information used to read parquet file */ explicit parquet_reader_options(source_info src) : _source{std::move(src)} {} friend parquet_reader_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit parquet_reader_options() = default; /** * @brief Creates a parquet_reader_options_builder which will build parquet_reader_options. * * @param src Source information to read parquet file * @return Builder to build reader options */ static parquet_reader_options_builder builder(source_info src); /** * @brief Returns source info. * * @return Source info */ [[nodiscard]] source_info const& get_source() const { return _source; } /** * @brief Returns true/false depending on whether strings should be converted to categories or * not. * * @return `true` if strings should be converted to categories */ [[nodiscard]] bool is_enabled_convert_strings_to_categories() const { return _convert_strings_to_categories; } /** * @brief Returns true/false depending whether to use pandas metadata or not while reading. * * @return `true` if pandas metadata is used while reading */ [[nodiscard]] bool is_enabled_use_pandas_metadata() const { return _use_pandas_metadata; } /** * @brief Returns optional tree of metadata. * * @return vector of reader_column_schema objects. */ [[nodiscard]] std::optional<std::vector<reader_column_schema>> get_column_schema() const { return _reader_column_schema; } /** * @brief Returns number of rows to skip from the start. * * @return Number of rows to skip from the start */ [[nodiscard]] int64_t get_skip_rows() const { return _skip_rows; } /** * @brief Returns number of rows to read. * * @return Number of rows to read; `nullopt` if the option hasn't been set (in which case the file * is read until the end) */ [[nodiscard]] std::optional<size_type> const& get_num_rows() const { return _num_rows; } /** * @brief Returns names of column to be read, if set. * * @return Names of column to be read; `nullopt` if the option is not set */ [[nodiscard]] auto const& get_columns() const { return _columns; } /** * @brief Returns list of individual row groups to be read. * * @return List of individual row groups to be read */ [[nodiscard]] auto const& get_row_groups() const { return _row_groups; } /** * @brief Returns AST based filter for predicate pushdown. * * @return AST expression to use as filter */ [[nodiscard]] auto const& get_filter() const { return _filter; } /** * @brief Returns timestamp type used to cast timestamp columns. * * @return Timestamp type used to cast timestamp columns */ data_type get_timestamp_type() const { return _timestamp_type; } /** * @brief Sets names of the columns to be read. * * @param col_names Vector of column names */ void set_columns(std::vector<std::string> col_names) { _columns = std::move(col_names); } /** * @brief Sets vector of individual row groups to read. * * @param row_groups Vector of row groups to read */ void set_row_groups(std::vector<std::vector<size_type>> row_groups); /** * @brief Sets AST based filter for predicate pushdown. * * @param filter AST expression to use as filter */ void set_filter(ast::expression const& filter) { _filter = filter; } /** * @brief Sets to enable/disable conversion of strings to categories. * * @param val Boolean value to enable/disable conversion of string columns to categories */ void enable_convert_strings_to_categories(bool val) { _convert_strings_to_categories = val; } /** * @brief Sets to enable/disable use of pandas metadata to read. * * @param val Boolean value whether to use pandas metadata */ void enable_use_pandas_metadata(bool val) { _use_pandas_metadata = val; } /** * @brief Sets reader column schema. * * @param val Tree of schema nodes to enable/disable conversion of binary to string columns. * Note default is to convert to string columns. */ void set_column_schema(std::vector<reader_column_schema> val) { _reader_column_schema = std::move(val); } /** * @brief Sets number of rows to skip. * * @param val Number of rows to skip from start */ void set_skip_rows(int64_t val); /** * @brief Sets number of rows to read. * * @param val Number of rows to read after skip */ void set_num_rows(size_type val); /** * @brief Sets timestamp_type used to cast timestamp columns. * * @param type The timestamp data_type to which all timestamp columns need to be cast */ void set_timestamp_type(data_type type) { _timestamp_type = type; } }; /** * @brief Builds parquet_reader_options to use for `read_parquet()`. */ class parquet_reader_options_builder { parquet_reader_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ parquet_reader_options_builder() = default; /** * @brief Constructor from source info. * * @param src The source information used to read parquet file */ explicit parquet_reader_options_builder(source_info src) : options{std::move(src)} {} /** * @brief Sets names of the columns to be read. * * @param col_names Vector of column names * @return this for chaining */ parquet_reader_options_builder& columns(std::vector<std::string> col_names) { options._columns = std::move(col_names); return *this; } /** * @brief Sets vector of individual row groups to read. * * @param row_groups Vector of row groups to read * @return this for chaining */ parquet_reader_options_builder& row_groups(std::vector<std::vector<size_type>> row_groups) { options.set_row_groups(std::move(row_groups)); return *this; } /** * @brief Sets vector of individual row groups to read. * * @param filter Vector of row groups to read * @return this for chaining */ parquet_reader_options_builder& filter(ast::expression const& filter) { options.set_filter(filter); return *this; } /** * @brief Sets enable/disable conversion of strings to categories. * * @param val Boolean value to enable/disable conversion of string columns to categories * @return this for chaining */ parquet_reader_options_builder& convert_strings_to_categories(bool val) { options._convert_strings_to_categories = val; return *this; } /** * @brief Sets to enable/disable use of pandas metadata to read. * * @param val Boolean value whether to use pandas metadata * @return this for chaining */ parquet_reader_options_builder& use_pandas_metadata(bool val) { options._use_pandas_metadata = val; return *this; } /** * @brief Sets reader metadata. * * @param val Tree of metadata information. * @return this for chaining */ parquet_reader_options_builder& set_column_schema(std::vector<reader_column_schema> val) { options._reader_column_schema = std::move(val); return *this; } /** * @brief Sets number of rows to skip. * * @param val Number of rows to skip from start * @return this for chaining */ parquet_reader_options_builder& skip_rows(int64_t val) { options.set_skip_rows(val); return *this; } /** * @brief Sets number of rows to read. * * @param val Number of rows to read after skip * @return this for chaining */ parquet_reader_options_builder& num_rows(size_type val) { options.set_num_rows(val); return *this; } /** * @brief timestamp_type used to cast timestamp columns. * * @param type The timestamp data_type to which all timestamp columns need to be cast * @return this for chaining */ parquet_reader_options_builder& timestamp_type(data_type type) { options._timestamp_type = type; return *this; } /** * @brief move parquet_reader_options member once it's built. */ operator parquet_reader_options&&() { return std::move(options); } /** * @brief move parquet_reader_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `parquet_reader_options` object's r-value reference */ parquet_reader_options&& build() { return std::move(options); } }; /** * @brief Reads a Parquet dataset into a set of columns. * * The following code snippet demonstrates how to read a dataset from a file: * @code * auto source = cudf::io::source_info("dataset.parquet"); * auto options = cudf::io::parquet_reader_options::builder(source); * auto result = cudf::io::read_parquet(options); * @endcode * * @param options Settings for controlling reading behavior * @param mr Device memory resource used to allocate device memory of the table in the returned * table_with_metadata * * @return The set of columns along with metadata */ table_with_metadata read_parquet( parquet_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief The chunked parquet reader class to read Parquet file iteratively in to a series of * tables, chunk by chunk. * * This class is designed to address the reading issue when reading very large Parquet files such * that the sizes of their column exceed the limit that can be stored in cudf column. By reading the * file content by chunks using this class, each chunk is guaranteed to have its sizes stay within * the given limit. */ class chunked_parquet_reader { public: /** * @brief Default constructor, this should never be used. * * This is added just to satisfy cython. */ chunked_parquet_reader() = default; /** * @brief Constructor for chunked reader. * * This constructor requires the same `parquet_reader_option` parameter as in * `cudf::read_parquet()`, and an additional parameter to specify the size byte limit of the * output table for each reading. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit * @param options The options used to read Parquet file * @param mr Device memory resource to use for device memory allocation */ chunked_parquet_reader( std::size_t chunk_read_limit, parquet_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Constructor for chunked reader. * * This constructor requires the same `parquet_reader_option` parameter as in * `cudf::read_parquet()`, with additional parameters to specify the size byte limit of the * output table for each reading, and a byte limit on the amount of temporary memory to use * when reading. pass_read_limit affects how many row groups we can read at a time by limiting * the amount of memory dedicated to decompression space. pass_read_limit is a hint, not an * absolute limit - if a single row group cannot fit within the limit given, it will still be * loaded. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit * @param pass_read_limit Limit on the amount of memory used for reading and decompressing data or * `0` if there is no limit * @param options The options used to read Parquet file * @param mr Device memory resource to use for device memory allocation */ chunked_parquet_reader( std::size_t chunk_read_limit, std::size_t pass_read_limit, parquet_reader_options const& options, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Destructor, destroying the internal reader instance. * * Since the declaration of the internal `reader` object does not exist in this header, this * destructor needs to be defined in a separate source file which can access to that object's * declaration. */ ~chunked_parquet_reader(); /** * @brief Check if there is any data in the given file has not yet read. * * @return A boolean value indicating if there is any data left to read */ [[nodiscard]] bool has_next() const; /** * @brief Read a chunk of rows in the given Parquet file. * * The sequence of returned tables, if concatenated by their order, guarantees to form a complete * dataset as reading the entire given file at once. * * An empty table will be returned if the given file is empty, or all the data in the file has * been read and returned by the previous calls. * * @return An output `cudf::table` along with its metadata */ [[nodiscard]] table_with_metadata read_chunk() const; private: std::unique_ptr<cudf::io::parquet::detail::chunked_reader> reader; }; /** @} */ // end of group /** * @addtogroup io_writers * @{ * @file */ class parquet_writer_options_builder; /** * @brief Settings for `write_parquet()`. */ class parquet_writer_options { // Specify the sink to use for writer output sink_info _sink; // Specify the compression format to use compression_type _compression = compression_type::SNAPPY; // Specify the level of statistics in the output file statistics_freq _stats_level = statistics_freq::STATISTICS_ROWGROUP; // Sets of columns to output table_view _table; // Partitions described as {start_row, num_rows} pairs std::vector<partition_info> _partitions; // Optional associated metadata std::optional<table_input_metadata> _metadata; // Optional footer key_value_metadata std::vector<std::map<std::string, std::string>> _user_data; // Parquet writer can write INT96 or TIMESTAMP_MICROS. Defaults to TIMESTAMP_MICROS. // If true then overrides any per-column setting in _metadata. bool _write_timestamps_as_int96 = false; // Parquet writer can write timestamps as UTC // Defaults to true because libcudf timestamps are implicitly UTC bool _write_timestamps_as_UTC = true; // Column chunks file paths to be set in the raw output metadata. One per output file std::vector<std::string> _column_chunks_file_paths; // Maximum size of each row group (unless smaller than a single page) size_t _row_group_size_bytes = default_row_group_size_bytes; // Maximum number of rows in row group (unless smaller than a single page) size_type _row_group_size_rows = default_row_group_size_rows; // Maximum size of each page (uncompressed) size_t _max_page_size_bytes = default_max_page_size_bytes; // Maximum number of rows in a page size_type _max_page_size_rows = default_max_page_size_rows; // Maximum size of min or max values in column index int32_t _column_index_truncate_length = default_column_index_truncate_length; // When to use dictionary encoding for data dictionary_policy _dictionary_policy = dictionary_policy::ALWAYS; // Maximum size of column chunk dictionary (in bytes) size_t _max_dictionary_size = default_max_dictionary_size; // Maximum number of rows in a page fragment std::optional<size_type> _max_page_fragment_size; // Optional compression statistics std::shared_ptr<writer_compression_statistics> _compression_stats; // write V2 page headers? bool _v2_page_headers = false; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit parquet_writer_options(sink_info const& sink, table_view const& table) : _sink(sink), _table(table) { } friend parquet_writer_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ parquet_writer_options() = default; /** * @brief Create builder to create `parquet_writer_options`. * * @param sink The sink used for writer output * @param table Table to be written to output * * @return Builder to build parquet_writer_options */ static parquet_writer_options_builder builder(sink_info const& sink, table_view const& table); /** * @brief Create builder to create `parquet_writer_options`. * * @return parquet_writer_options_builder */ static parquet_writer_options_builder builder(); /** * @brief Returns sink info. * * @return Sink info */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns compression format used. * * @return Compression format */ [[nodiscard]] compression_type get_compression() const { return _compression; } /** * @brief Returns level of statistics requested in output file. * * @return level of statistics requested in output file */ [[nodiscard]] statistics_freq get_stats_level() const { return _stats_level; } /** * @brief Returns table_view. * * @return Table view */ [[nodiscard]] table_view get_table() const { return _table; } /** * @brief Returns partitions. * * @return Partitions */ [[nodiscard]] std::vector<partition_info> const& get_partitions() const { return _partitions; } /** * @brief Returns associated metadata. * * @return Associated metadata */ [[nodiscard]] auto const& get_metadata() const { return _metadata; } /** * @brief Returns Key-Value footer metadata information. * * @return Key-Value footer metadata information */ std::vector<std::map<std::string, std::string>> const& get_key_value_metadata() const { return _user_data; } /** * @brief Returns `true` if timestamps will be written as INT96 * * @return `true` if timestamps will be written as INT96 */ bool is_enabled_int96_timestamps() const { return _write_timestamps_as_int96; } /** * @brief Returns `true` if timestamps will be written as UTC * * @return `true` if timestamps will be written as UTC */ [[nodiscard]] auto is_enabled_utc_timestamps() const { return _write_timestamps_as_UTC; } /** * @brief Returns Column chunks file paths to be set in the raw output metadata. * * @return Column chunks file paths to be set in the raw output metadata */ std::vector<std::string> const& get_column_chunks_file_paths() const { return _column_chunks_file_paths; } /** * @brief Returns maximum row group size, in bytes. * * @return Maximum row group size, in bytes */ auto get_row_group_size_bytes() const { return _row_group_size_bytes; } /** * @brief Returns maximum row group size, in rows. * * @return Maximum row group size, in rows */ auto get_row_group_size_rows() const { return _row_group_size_rows; } /** * @brief Returns the maximum uncompressed page size, in bytes. * * If set larger than the row group size, then this will return the row group size. * * @return Maximum uncompressed page size, in bytes */ auto get_max_page_size_bytes() const { return std::min(_max_page_size_bytes, get_row_group_size_bytes()); } /** * @brief Returns maximum page size, in rows. * * If set larger than the row group size, then this will return the row group size. * * @return Maximum page size, in rows */ auto get_max_page_size_rows() const { return std::min(_max_page_size_rows, get_row_group_size_rows()); } /** * @brief Returns maximum length of min or max values in column index, in bytes. * * @return length min/max will be truncated to */ auto get_column_index_truncate_length() const { return _column_index_truncate_length; } /** * @brief Returns policy for dictionary use. * * @return policy for dictionary use */ [[nodiscard]] dictionary_policy get_dictionary_policy() const { return _dictionary_policy; } /** * @brief Returns maximum dictionary size, in bytes. * * @return Maximum dictionary size, in bytes. */ [[nodiscard]] auto get_max_dictionary_size() const { return _max_dictionary_size; } /** * @brief Returns maximum page fragment size, in rows. * * @return Maximum page fragment size, in rows. */ [[nodiscard]] auto get_max_page_fragment_size() const { return _max_page_fragment_size; } /** * @brief Returns a shared pointer to the user-provided compression statistics. * * @return Compression statistics */ [[nodiscard]] std::shared_ptr<writer_compression_statistics> get_compression_statistics() const { return _compression_stats; } /** * @brief Returns `true` if V2 page headers should be written. * * @return `true` if V2 page headers should be written. */ [[nodiscard]] auto is_enabled_write_v2_headers() const { return _v2_page_headers; } /** * @brief Sets partitions. * * @param partitions Partitions of input table in {start_row, num_rows} pairs. If specified, must * be same size as number of sinks in sink_info */ void set_partitions(std::vector<partition_info> partitions); /** * @brief Sets metadata. * * @param metadata Associated metadata */ void set_metadata(table_input_metadata metadata) { _metadata = std::move(metadata); } /** * @brief Sets metadata. * * @param metadata Key-Value footer metadata */ void set_key_value_metadata(std::vector<std::map<std::string, std::string>> metadata); /** * @brief Sets the level of statistics. * * @param sf Level of statistics requested in the output file */ void set_stats_level(statistics_freq sf) { _stats_level = sf; } /** * @brief Sets compression type. * * @param compression The compression type to use */ void set_compression(compression_type compression) { _compression = compression; } /** * @brief Sets timestamp writing preferences. INT96 timestamps will be written * if `true` and TIMESTAMP_MICROS will be written if `false`. * * @param req Boolean value to enable/disable writing of INT96 timestamps */ void enable_int96_timestamps(bool req) { _write_timestamps_as_int96 = req; } /** * @brief Sets preference for writing timestamps as UTC. Write timestamps as UTC if set to `true`. * * @param val Boolean value to enable/disable writing of timestamps as UTC. */ void enable_utc_timestamps(bool val) { _write_timestamps_as_UTC = val; } /** * @brief Sets column chunks file path to be set in the raw output metadata. * * @param file_paths Vector of Strings which indicates file path. Must be same size as number of * data sinks in sink info */ void set_column_chunks_file_paths(std::vector<std::string> file_paths); /** * @brief Sets the maximum row group size, in bytes. * * @param size_bytes Maximum row group size, in bytes to set */ void set_row_group_size_bytes(size_t size_bytes); /** * @brief Sets the maximum row group size, in rows. * * @param size_rows Maximum row group size, in rows to set */ void set_row_group_size_rows(size_type size_rows); /** * @brief Sets the maximum uncompressed page size, in bytes. * * @param size_bytes Maximum uncompressed page size, in bytes to set */ void set_max_page_size_bytes(size_t size_bytes); /** * @brief Sets the maximum page size, in rows. * * @param size_rows Maximum page size, in rows to set */ void set_max_page_size_rows(size_type size_rows); /** * @brief Sets the maximum length of min or max values in column index, in bytes. * * @param size_bytes length min/max will be truncated to */ void set_column_index_truncate_length(int32_t size_bytes); /** * @brief Sets the policy for dictionary use. * * @param policy Policy for dictionary use */ void set_dictionary_policy(dictionary_policy policy); /** * @brief Sets the maximum dictionary size, in bytes. * * @param size_bytes Maximum dictionary size, in bytes */ void set_max_dictionary_size(size_t size_bytes); /** * @brief Sets the maximum page fragment size, in rows. * * @param size_rows Maximum page fragment size, in rows. */ void set_max_page_fragment_size(size_type size_rows); /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be updated after writing */ void set_compression_statistics(std::shared_ptr<writer_compression_statistics> comp_stats) { _compression_stats = std::move(comp_stats); } /** * @brief Sets preference for V2 page headers. Write V2 page headers if set to `true`. * * @param val Boolean value to enable/disable writing of V2 page headers. */ void enable_write_v2_headers(bool val) { _v2_page_headers = val; } }; /** * @brief Class to build `parquet_writer_options`. */ class parquet_writer_options_builder { parquet_writer_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ explicit parquet_writer_options_builder() = default; /** * @brief Constructor from sink and table. * * @param sink The sink used for writer output * @param table Table to be written to output */ explicit parquet_writer_options_builder(sink_info const& sink, table_view const& table) : options(sink, table) { } /** * @brief Sets partitions in parquet_writer_options. * * @param partitions Partitions of input table in {start_row, num_rows} pairs. If specified, must * be same size as number of sinks in sink_info * @return this for chaining */ parquet_writer_options_builder& partitions(std::vector<partition_info> partitions); /** * @brief Sets metadata in parquet_writer_options. * * @param metadata Associated metadata * @return this for chaining */ parquet_writer_options_builder& metadata(table_input_metadata metadata) { options._metadata = std::move(metadata); return *this; } /** * @brief Sets Key-Value footer metadata in parquet_writer_options. * * @param metadata Key-Value footer metadata * @return this for chaining */ parquet_writer_options_builder& key_value_metadata( std::vector<std::map<std::string, std::string>> metadata); /** * @brief Sets the level of statistics in parquet_writer_options. * * @param sf Level of statistics requested in the output file * @return this for chaining */ parquet_writer_options_builder& stats_level(statistics_freq sf) { options._stats_level = sf; return *this; } /** * @brief Sets compression type in parquet_writer_options. * * @param compression The compression type to use * @return this for chaining */ parquet_writer_options_builder& compression(compression_type compression) { options._compression = compression; return *this; } /** * @brief Sets column chunks file path to be set in the raw output metadata. * * @param file_paths Vector of Strings which indicates file path. Must be same size as number of * data sinks * @return this for chaining */ parquet_writer_options_builder& column_chunks_file_paths(std::vector<std::string> file_paths); /** * @brief Sets the maximum row group size, in bytes. * * @param val maximum row group size * @return this for chaining */ parquet_writer_options_builder& row_group_size_bytes(size_t val) { options.set_row_group_size_bytes(val); return *this; } /** * @brief Sets the maximum number of rows in output row groups. * * @param val maximum number or rows * @return this for chaining */ parquet_writer_options_builder& row_group_size_rows(size_type val) { options.set_row_group_size_rows(val); return *this; } /** * @brief Sets the maximum uncompressed page size, in bytes. * * Serves as a hint to the writer, and can be exceeded under certain circumstances. * Cannot be larger than the row group size in bytes, and will be adjusted to * match if it is. * * @param val maximum page size * @return this for chaining */ parquet_writer_options_builder& max_page_size_bytes(size_t val) { options.set_max_page_size_bytes(val); return *this; } /** * @brief Sets the maximum page size, in rows. Counts only top-level rows, ignoring any nesting. * Cannot be larger than the row group size in rows, and will be adjusted to match if it is. * * @param val maximum rows per page * @return this for chaining */ parquet_writer_options_builder& max_page_size_rows(size_type val) { options.set_max_page_size_rows(val); return *this; } /** * @brief Sets the desired maximum size in bytes for min and max values in the column index. * * Values exceeding this limit will be truncated, but modified such that they will still * be valid lower and upper bounds. This only applies to variable length types, such as string. * Maximum values will not be truncated if there is no suitable truncation that results in * a valid upper bound. * * Default value is 64. * * @param val length min/max will be truncated to, with 0 indicating no truncation * @return this for chaining */ parquet_writer_options_builder& column_index_truncate_length(int32_t val) { options.set_column_index_truncate_length(val); return *this; } /** * @brief Sets the policy for dictionary use. * * Certain compression algorithms (e.g Zstandard) have limits on how large of a buffer can * be compressed. In some circumstances, the dictionary can grow beyond this limit, which * will prevent the column from being compressed. This setting controls how the writer * should act in these circumstances. A setting of dictionary_policy::ADAPTIVE will disable * dictionary encoding for columns where the dictionary exceeds the limit. A setting of * dictionary_policy::NEVER will disable the use of dictionary encoding globally. A setting of * dictionary_policy::ALWAYS will allow the use of dictionary encoding even if it will result in * the disabling of compression for columns that would otherwise be compressed. * * The default value is dictionary_policy::ALWAYS. * * @param val policy for dictionary use * @return this for chaining */ parquet_writer_options_builder& dictionary_policy(enum dictionary_policy val); /** * @brief Sets the maximum dictionary size, in bytes. * * Disables dictionary encoding for any column chunk where the dictionary will * exceed this limit. Only used when the dictionary_policy is set to 'ADAPTIVE'. * * Default value is 1048576 (1MiB). * * @param val maximum dictionary size * @return this for chaining */ parquet_writer_options_builder& max_dictionary_size(size_t val); /** * @brief Sets the maximum page fragment size, in rows. * * Files with nested schemas or very long strings may need a page fragment size * smaller than the default value of 5000 to ensure a single fragment will not * exceed the desired maximum page size in bytes. * * @param val maximum page fragment size * @return this for chaining */ parquet_writer_options_builder& max_page_fragment_size(size_type val); /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be filled once writer is done * @return this for chaining */ parquet_writer_options_builder& compression_statistics( std::shared_ptr<writer_compression_statistics> const& comp_stats) { options._compression_stats = comp_stats; return *this; } /** * @brief Sets whether int96 timestamps are written or not in parquet_writer_options. * * @param enabled Boolean value to enable/disable int96 timestamps * @return this for chaining */ parquet_writer_options_builder& int96_timestamps(bool enabled) { options._write_timestamps_as_int96 = enabled; return *this; } /** * @brief Set to true if timestamps are to be written as UTC. * * @param enabled Boolean value to enable/disable writing of timestamps as UTC. * @return this for chaining */ parquet_writer_options_builder& utc_timestamps(bool enabled) { options._write_timestamps_as_UTC = enabled; return *this; } /** * @brief Set to true if V2 page headers are to be written. * * @param enabled Boolean value to enable/disable writing of V2 page headers. * @return this for chaining */ parquet_writer_options_builder& write_v2_headers(bool enabled); /** * @brief move parquet_writer_options member once it's built. */ operator parquet_writer_options&&() { return std::move(options); } /** * @brief move parquet_writer_options member once it's built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `parquet_writer_options` object's r-value reference */ parquet_writer_options&& build() { return std::move(options); } }; /** * @brief Writes a set of columns to parquet format. * * The following code snippet demonstrates how to write columns to a file: * @code * auto destination = cudf::io::sink_info("dataset.parquet"); * auto options = cudf::io::parquet_writer_options::builder(destination, table->view()); * cudf::io::write_parquet(options); * @endcode * * @param options Settings for controlling writing behavior * @return A blob that contains the file metadata (parquet FileMetadata thrift message) if * requested in parquet_writer_options (empty blob otherwise). */ std::unique_ptr<std::vector<uint8_t>> write_parquet(parquet_writer_options const& options); /** * @brief Merges multiple raw metadata blobs that were previously created by write_parquet * into a single metadata blob. * * @ingroup io_writers * * @param[in] metadata_list List of input file metadata * @return A parquet-compatible blob that contains the data for all row groups in the list */ std::unique_ptr<std::vector<uint8_t>> merge_row_group_metadata( std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list); class chunked_parquet_writer_options_builder; /** * @brief Settings for `write_parquet_chunked()`. */ class chunked_parquet_writer_options { // Specify the sink to use for writer output sink_info _sink; // Specify the compression format to use compression_type _compression = compression_type::AUTO; // Specify the level of statistics in the output file statistics_freq _stats_level = statistics_freq::STATISTICS_ROWGROUP; // Optional associated metadata. std::optional<table_input_metadata> _metadata; // Optional footer key_value_metadata std::vector<std::map<std::string, std::string>> _user_data; // Parquet writer can write INT96 or TIMESTAMP_MICROS. Defaults to TIMESTAMP_MICROS. // If true then overrides any per-column setting in _metadata. bool _write_timestamps_as_int96 = false; // Parquet writer can write timestamps as UTC. Defaults to true. bool _write_timestamps_as_UTC = true; // Maximum size of each row group (unless smaller than a single page) size_t _row_group_size_bytes = default_row_group_size_bytes; // Maximum number of rows in row group (unless smaller than a single page) size_type _row_group_size_rows = default_row_group_size_rows; // Maximum size of each page (uncompressed) size_t _max_page_size_bytes = default_max_page_size_bytes; // Maximum number of rows in a page size_type _max_page_size_rows = default_max_page_size_rows; // Maximum size of min or max values in column index int32_t _column_index_truncate_length = default_column_index_truncate_length; // When to use dictionary encoding for data dictionary_policy _dictionary_policy = dictionary_policy::ALWAYS; // Maximum size of column chunk dictionary (in bytes) size_t _max_dictionary_size = default_max_dictionary_size; // Maximum number of rows in a page fragment std::optional<size_type> _max_page_fragment_size; // Optional compression statistics std::shared_ptr<writer_compression_statistics> _compression_stats; // write V2 page headers? bool _v2_page_headers = false; /** * @brief Constructor from sink. * * @param sink Sink used for writer output */ explicit chunked_parquet_writer_options(sink_info const& sink) : _sink(sink) {} friend chunked_parquet_writer_options_builder; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ chunked_parquet_writer_options() = default; /** * @brief Returns sink info. * * @return Sink info */ [[nodiscard]] sink_info const& get_sink() const { return _sink; } /** * @brief Returns compression format used. * * @return Compression format */ [[nodiscard]] compression_type get_compression() const { return _compression; } /** * @brief Returns level of statistics requested in output file. * * @return Level of statistics requested in output file */ [[nodiscard]] statistics_freq get_stats_level() const { return _stats_level; } /** * @brief Returns metadata information. * * @return Metadata information */ [[nodiscard]] auto const& get_metadata() const { return _metadata; } /** * @brief Returns Key-Value footer metadata information. * * @return Key-Value footer metadata information */ std::vector<std::map<std::string, std::string>> const& get_key_value_metadata() const { return _user_data; } /** * @brief Returns `true` if timestamps will be written as INT96 * * @return `true` if timestamps will be written as INT96 */ bool is_enabled_int96_timestamps() const { return _write_timestamps_as_int96; } /** * @brief Returns `true` if timestamps will be written as UTC * * @return `true` if timestamps will be written as UTC */ [[nodiscard]] auto is_enabled_utc_timestamps() const { return _write_timestamps_as_UTC; } /** * @brief Returns maximum row group size, in bytes. * * @return Maximum row group size, in bytes */ auto get_row_group_size_bytes() const { return _row_group_size_bytes; } /** * @brief Returns maximum row group size, in rows. * * @return Maximum row group size, in rows */ auto get_row_group_size_rows() const { return _row_group_size_rows; } /** * @brief Returns maximum uncompressed page size, in bytes. * * If set larger than the row group size, then this will return the * row group size. * * @return Maximum uncompressed page size, in bytes */ auto get_max_page_size_bytes() const { return std::min(_max_page_size_bytes, get_row_group_size_bytes()); } /** * @brief Returns maximum page size, in rows. * * If set larger than the row group size, then this will return the row group size. * * @return Maximum page size, in rows */ auto get_max_page_size_rows() const { return std::min(_max_page_size_rows, get_row_group_size_rows()); } /** * @brief Returns maximum length of min or max values in column index, in bytes. * * @return length min/max will be truncated to */ auto get_column_index_truncate_length() const { return _column_index_truncate_length; } /** * @brief Returns policy for dictionary use. * * @return policy for dictionary use */ [[nodiscard]] dictionary_policy get_dictionary_policy() const { return _dictionary_policy; } /** * @brief Returns maximum dictionary size, in bytes. * * @return Maximum dictionary size, in bytes. */ [[nodiscard]] auto get_max_dictionary_size() const { return _max_dictionary_size; } /** * @brief Returns maximum page fragment size, in rows. * * @return Maximum page fragment size, in rows. */ [[nodiscard]] auto get_max_page_fragment_size() const { return _max_page_fragment_size; } /** * @brief Returns a shared pointer to the user-provided compression statistics. * * @return Compression statistics */ [[nodiscard]] std::shared_ptr<writer_compression_statistics> get_compression_statistics() const { return _compression_stats; } /** * @brief Returns `true` if V2 page headers should be written. * * @return `true` if V2 page headers should be written. */ [[nodiscard]] auto is_enabled_write_v2_headers() const { return _v2_page_headers; } /** * @brief Sets metadata. * * @param metadata Associated metadata */ void set_metadata(table_input_metadata metadata) { _metadata = std::move(metadata); } /** * @brief Sets Key-Value footer metadata. * * @param metadata Key-Value footer metadata */ void set_key_value_metadata(std::vector<std::map<std::string, std::string>> metadata); /** * @brief Sets the level of statistics in parquet_writer_options. * * @param sf Level of statistics requested in the output file */ void set_stats_level(statistics_freq sf) { _stats_level = sf; } /** * @brief Sets compression type. * * @param compression The compression type to use */ void set_compression(compression_type compression) { _compression = compression; } /** * @brief Sets timestamp writing preferences. * * INT96 timestamps will be written if `true` and TIMESTAMP_MICROS will be written if `false`. * * @param req Boolean value to enable/disable writing of INT96 timestamps */ void enable_int96_timestamps(bool req) { _write_timestamps_as_int96 = req; } /** * @brief Sets preference for writing timestamps as UTC. Write timestamps as UTC if set to `true`. * * @param val Boolean value to enable/disable writing of timestamps as UTC. */ void enable_utc_timestamps(bool val) { _write_timestamps_as_UTC = val; } /** * @brief Sets the maximum row group size, in bytes. * * @param size_bytes Maximum row group size, in bytes to set */ void set_row_group_size_bytes(size_t size_bytes); /** * @brief Sets the maximum row group size, in rows. * * @param size_rows The maximum row group size, in rows to set */ void set_row_group_size_rows(size_type size_rows); /** * @brief Sets the maximum uncompressed page size, in bytes. * * @param size_bytes Maximum uncompressed page size, in bytes to set */ void set_max_page_size_bytes(size_t size_bytes); /** * @brief Sets the maximum page size, in rows. * * @param size_rows The maximum page size, in rows to set */ void set_max_page_size_rows(size_type size_rows); /** * @brief Sets the maximum length of min or max values in column index, in bytes. * * @param size_bytes length min/max will be truncated to */ void set_column_index_truncate_length(int32_t size_bytes); /** * @brief Sets the policy for dictionary use. * * @param policy Policy for dictionary use */ void set_dictionary_policy(dictionary_policy policy); /** * @brief Sets the maximum dictionary size, in bytes. * * @param size_bytes Maximum dictionary size, in bytes */ void set_max_dictionary_size(size_t size_bytes); /** * @brief Sets the maximum page fragment size, in rows. * * @param size_rows Maximum page fragment size, in rows. */ void set_max_page_fragment_size(size_type size_rows); /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be updated after writing */ void set_compression_statistics(std::shared_ptr<writer_compression_statistics> comp_stats) { _compression_stats = std::move(comp_stats); } /** * @brief Sets preference for V2 page headers. Write V2 page headers if set to `true`. * * @param val Boolean value to enable/disable writing of V2 page headers. */ void enable_write_v2_headers(bool val) { _v2_page_headers = val; } /** * @brief creates builder to build chunked_parquet_writer_options. * * @param sink sink to use for writer output * * @return Builder to build `chunked_parquet_writer_options` */ static chunked_parquet_writer_options_builder builder(sink_info const& sink); }; /** * @brief Builds options for chunked_parquet_writer_options. */ class chunked_parquet_writer_options_builder { chunked_parquet_writer_options options; public: /** * @brief Default constructor. * * This has been added since Cython requires a default constructor to create objects on stack. */ chunked_parquet_writer_options_builder() = default; /** * @brief Constructor from sink. * * @param sink The sink used for writer output */ chunked_parquet_writer_options_builder(sink_info const& sink) : options(sink){}; /** * @brief Sets metadata to chunked_parquet_writer_options. * * @param metadata Associated metadata * @return this for chaining */ chunked_parquet_writer_options_builder& metadata(table_input_metadata metadata) { options._metadata = std::move(metadata); return *this; } /** * @brief Sets Key-Value footer metadata in parquet_writer_options. * * @param metadata Key-Value footer metadata * @return this for chaining */ chunked_parquet_writer_options_builder& key_value_metadata( std::vector<std::map<std::string, std::string>> metadata); /** * @brief Sets the level of statistics in chunked_parquet_writer_options. * * @param sf Level of statistics requested in the output file * @return this for chaining */ chunked_parquet_writer_options_builder& stats_level(statistics_freq sf) { options._stats_level = sf; return *this; } /** * @brief Sets compression type to chunked_parquet_writer_options. * * @param compression The compression type to use * @return this for chaining */ chunked_parquet_writer_options_builder& compression(compression_type compression) { options._compression = compression; return *this; } /** * @brief Set to true if timestamps should be written as * int96 types instead of int64 types. Even though int96 is deprecated and is * not an internal type for cudf, it needs to be written for backwards * compatibility reasons. * * @param enabled Boolean value to enable/disable int96 timestamps * @return this for chaining */ chunked_parquet_writer_options_builder& int96_timestamps(bool enabled) { options._write_timestamps_as_int96 = enabled; return *this; } /** * @brief Set to true if timestamps are to be written as UTC. * * @param enabled Boolean value to enable/disable writing of timestamps as UTC. * @return this for chaining */ chunked_parquet_writer_options_builder& utc_timestamps(bool enabled) { options._write_timestamps_as_UTC = enabled; return *this; } /** * @brief Set to true if V2 page headers are to be written. * * @param enabled Boolean value to enable/disable writing of V2 page headers. * @return this for chaining */ chunked_parquet_writer_options_builder& write_v2_headers(bool enabled); /** * @brief Sets the maximum row group size, in bytes. * * @param val maximum row group size * @return this for chaining */ chunked_parquet_writer_options_builder& row_group_size_bytes(size_t val) { options.set_row_group_size_bytes(val); return *this; } /** * @brief Sets the maximum number of rows in output row groups. * * @param val maximum number or rows * @return this for chaining */ chunked_parquet_writer_options_builder& row_group_size_rows(size_type val) { options.set_row_group_size_rows(val); return *this; } /** * @brief Sets the maximum uncompressed page size, in bytes. * * Serves as a hint to the writer, and can be exceeded under certain circumstances. Cannot be * larger than the row group size in bytes, and will be adjusted to match if it is. * * @param val maximum page size * @return this for chaining */ chunked_parquet_writer_options_builder& max_page_size_bytes(size_t val) { options.set_max_page_size_bytes(val); return *this; } /** * @brief Sets the maximum page size, in rows. Counts only top-level rows, ignoring any nesting. * Cannot be larger than the row group size in rows, and will be adjusted to match if it is. * * @param val maximum rows per page * @return this for chaining */ chunked_parquet_writer_options_builder& max_page_size_rows(size_type val) { options.set_max_page_size_rows(val); return *this; } /** * @brief Sets the desired maximum size in bytes for min and max values in the column index. * * Values exceeding this limit will be truncated, but modified such that they will still * be valid lower and upper bounds. This only applies to variable length types, such as string. * Maximum values will not be truncated if there is no suitable truncation that results in * a valid upper bound. * * Default value is 64. * * @param val length min/max will be truncated to, with 0 indicating no truncation * @return this for chaining */ chunked_parquet_writer_options_builder& column_index_truncate_length(int32_t val) { options.set_column_index_truncate_length(val); return *this; } /** * @brief Sets the policy for dictionary use. * * Certain compression algorithms (e.g Zstandard) have limits on how large of a buffer can * be compressed. In some circumstances, the dictionary can grow beyond this limit, which * will prevent the column from being compressed. This setting controls how the writer * should act in these circumstances. A setting of dictionary_policy::ADAPTIVE will disable * dictionary encoding for columns where the dictionary exceeds the limit. A setting of * dictionary_policy::NEVER will disable the use of dictionary encoding globally. A setting of * dictionary_policy::ALWAYS will allow the use of dictionary encoding even if it will result in * the disabling of compression for columns that would otherwise be compressed. * * The default value is dictionary_policy::ALWAYS. * * @param val policy for dictionary use * @return this for chaining */ chunked_parquet_writer_options_builder& dictionary_policy(enum dictionary_policy val); /** * @brief Sets the maximum dictionary size, in bytes. * * Disables dictionary encoding for any column chunk where the dictionary will * exceed this limit. Only used when the dictionary_policy is set to 'ADAPTIVE'. * * Default value is 1048576 (1MiB). * * @param val maximum dictionary size * @return this for chaining */ chunked_parquet_writer_options_builder& max_dictionary_size(size_t val); /** * @brief Sets the maximum page fragment size, in rows. * * Files with nested schemas or very long strings may need a page fragment size * smaller than the default value of 5000 to ensure a single fragment will not * exceed the desired maximum page size in bytes. * * @param val maximum page fragment size * @return this for chaining */ chunked_parquet_writer_options_builder& max_page_fragment_size(size_type val); /** * @brief Sets the pointer to the output compression statistics. * * @param comp_stats Pointer to compression statistics to be filled once writer is done * @return this for chaining */ chunked_parquet_writer_options_builder& compression_statistics( std::shared_ptr<writer_compression_statistics> const& comp_stats) { options._compression_stats = comp_stats; return *this; } /** * @brief move chunked_parquet_writer_options member once it's built. */ operator chunked_parquet_writer_options&&() { return std::move(options); } /** * @brief move chunked_parquet_writer_options member once it's is built. * * This has been added since Cython does not support overloading of conversion operators. * * @return Built `chunked_parquet_writer_options` object's r-value reference */ chunked_parquet_writer_options&& build() { return std::move(options); } }; /** * @brief chunked parquet writer class to handle options and write tables in chunks. * * The intent of the parquet_chunked_writer is to allow writing of an * arbitrarily large / arbitrary number of rows to a parquet file in multiple passes. * * The following code snippet demonstrates how to write a single parquet file containing * one logical table by writing a series of individual cudf::tables. * * @code * auto destination = cudf::io::sink_info("dataset.parquet"); * auto options = cudf::io::chunked_parquet_writer_options::builder(destination, table->view()); * auto writer = cudf::io::parquet_chunked_writer(options); * * writer.write(table0) * writer.write(table1) * writer.close() * @endcode */ class parquet_chunked_writer { public: /** * @brief Default constructor, this should never be used. * This is added just to satisfy cython. */ parquet_chunked_writer() = default; /** * @brief Constructor with chunked writer options * * @param[in] options options used to write table */ parquet_chunked_writer(chunked_parquet_writer_options const& options); /** * @brief Writes table to output. * * @param[in] table Table that needs to be written * @param[in] partitions Optional partitions to divide the table into. If specified, must be same * size as number of sinks. * * @throws cudf::logic_error If the number of partitions is not the same as number of sinks * @throws rmm::bad_alloc if there is insufficient space for temporary buffers * @return returns reference of the class object */ parquet_chunked_writer& write(table_view const& table, std::vector<partition_info> const& partitions = {}); /** * @brief Finishes the chunked/streamed write process. * * @param[in] column_chunks_file_paths Column chunks file path to be set in the raw output * metadata * @return A parquet-compatible blob that contains the data for all rowgroups in the list only if * `column_chunks_file_paths` is provided, else null. */ std::unique_ptr<std::vector<uint8_t>> close( std::vector<std::string> const& column_chunks_file_paths = {}); /// Unique pointer to impl writer class std::unique_ptr<parquet::detail::writer> writer; }; /** @} */ // end of group } // namespace cudf::io
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/json.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/json.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf::io::json::detail { /** * @brief Reads and returns the entire data set. * * @param sources Input `datasource` objects to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation * * @return cudf::table object that contains the array of cudf::column. */ table_with_metadata read_json(host_span<std::unique_ptr<datasource>> sources, json_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Write an entire dataset to JSON format. * * @param sink Output sink * @param table The set of columns * @param options Settings for controlling behavior * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void write_json(data_sink* sink, table_view const& table, json_writer_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace cudf::io::json::detail
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/csv.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/csv.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace detail { namespace csv { /** * @brief Reads the entire dataset. * * @param sources Input `datasource` object to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation * * @return The set of columns along with table metadata */ table_with_metadata read_csv(std::unique_ptr<cudf::io::datasource>&& source, csv_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Write an entire dataset to CSV format. * * @param sink Output sink * @param table The set of columns * @param column_names Column names for the output CSV * @param options Settings for controlling behavior * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void write_csv(data_sink* sink, table_view const& table, host_span<std::string const> column_names, csv_writer_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace csv } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/tokenize_json.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/json.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> namespace cudf::io::json { /// Type used to represent the atomic symbol type used within the finite-state machine using SymbolT = char; /// Type used to represent the stack alphabet (i.e.: empty-stack, struct, list) using StackSymbolT = char; /// Type used to index into the symbols within the JSON input using SymbolOffsetT = uint32_t; /// Type large enough to support indexing up to max nesting level (must be signed) using StackLevelT = int8_t; /// Type used to represent a symbol group id of the input alphabet in the pushdown automaton using PdaInputSymbolGroupIdT = char; /// Type used to represent a symbol group id of the stack alphabet in the pushdown automaton using PdaStackSymbolGroupIdT = char; /// Type used to represent a (input-symbol, stack-symbol)-tuple in stack-symbol-major order using PdaSymbolGroupIdT = char; /// Type being emitted by the pushdown automaton transducer using PdaTokenT = char; /// Type used to represent the class of a node (or a node "category") within the tree representation using NodeT = char; /// Type used to index into the nodes within the tree of structs, lists, field names, and value /// nodes using NodeIndexT = size_type; /// Type large enough to represent tree depth from [0, max-tree-depth); may be an unsigned type using TreeDepthT = StackLevelT; constexpr NodeIndexT parent_node_sentinel = -1; /** * @brief Class of a node (or a node "category") within the tree representation */ enum node_t : NodeT { /// A node representing a struct NC_STRUCT, /// A node representing a list NC_LIST, /// A node representing a field name NC_FN, /// A node representing a string value NC_STR, /// A node representing a numeric or literal value (e.g., true, false, null) NC_VAL, /// A node representing a parser error NC_ERR, /// Total number of node classes NUM_NODE_CLASSES }; /** * @brief Tokens emitted while parsing a JSON input */ enum token_t : PdaTokenT { /// Beginning-of-struct token (on encounter of semantic '{') StructBegin, /// End-of-struct token (on encounter of semantic '}') StructEnd, /// Beginning-of-list token (on encounter of semantic '[') ListBegin, /// End-of-list token (on encounter of semantic ']') ListEnd, // Beginning-of-struct-member token StructMemberBegin, // End-of-struct-member token StructMemberEnd, /// Beginning-of-field-name token (on encounter of first quote) FieldNameBegin, /// End-of-field-name token (on encounter of a field name's second quote) FieldNameEnd, /// Beginning-of-string-value token (on encounter of the string's first quote) StringBegin, /// End-of-string token (on encounter of a string's second quote) StringEnd, /// Beginning-of-value token (first character of literal or numeric) ValueBegin, /// Post-value token (first character after a literal or numeric string) ValueEnd, /// Beginning-of-error token (on first encounter of a parsing error) ErrorBegin, /// Delimiting a JSON line for error recovery LineEnd, /// Total number of tokens NUM_TOKENS }; namespace detail { /** * @brief Parses the given JSON string and emits a sequence of tokens that demarcate relevant * sections from the input. * * @param json_in The JSON input * @param options Parsing options specifying the parsing behaviour * @param stream The CUDA stream to which kernels are dispatched * @param mr Optional, resource with which to allocate * @return Pair of device vectors, where the first vector represents the token types and the second * vector represents the index within the input corresponding to each token */ std::pair<rmm::device_uvector<PdaTokenT>, rmm::device_uvector<SymbolOffsetT>> get_token_stream( device_span<SymbolT const> json_in, cudf::io::json_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace cudf::io::json
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/utils.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace cudf { namespace io { namespace detail { /** * @brief Whether writer writes in chunks or all at once */ enum class single_write_mode : bool { YES, NO }; } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/orc.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/detail/utils.hpp> #include <cudf/io/types.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <memory> #include <string> #include <vector> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { // Forward declaration class orc_reader_options; class orc_writer_options; class chunked_orc_writer_options; namespace detail { namespace orc { /** * @brief Class to read ORC dataset data into columns. */ class reader { private: class impl; std::unique_ptr<impl> _impl; public: /** * @brief Constructor from an array of datasources * * @param sources Input `datasource` objects to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, orc_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Destructor explicitly declared to avoid inlining in header */ ~reader(); /** * @brief Reads the entire dataset. * * @param options Settings for controlling reading behavior * @return The set of columns along with table metadata */ table_with_metadata read(orc_reader_options const& options); }; /** * @brief Class to write ORC dataset data into columns. */ class writer { private: class impl; std::unique_ptr<impl> _impl; public: /** * @brief Constructor for output to a file. * * @param sink The data sink to write the data to * @param options Settings for controlling writing behavior * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches */ explicit writer(std::unique_ptr<cudf::io::data_sink> sink, orc_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Constructor with chunked writer options. * * @param sink The data sink to write the data to * @param options Settings for controlling writing behavior * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches */ explicit writer(std::unique_ptr<cudf::io::data_sink> sink, chunked_orc_writer_options const& options, single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Destructor explicitly declared to avoid inlining in header */ ~writer(); /** * @brief Writes a single subtable as part of a larger ORC file/table write. * * @param[in] table The table information to be written */ void write(table_view const& table); /** * @brief Finishes the chunked/streamed write process. */ void close(); }; } // namespace orc } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/avro.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/avro.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace detail { namespace avro { /** * @brief Reads the entire dataset. * * @param source Input `datasource` object to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation * * @return The set of columns along with table metadata */ table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source, avro_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace avro } // namespace detail } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/detail/parquet.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file parquet.hpp */ #pragma once #include <cudf/io/detail/utils.hpp> #include <cudf/io/parquet_metadata.hpp> #include <cudf/io/types.hpp> #include <cudf/table/table_view.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <string> #include <vector> namespace cudf::io { // Forward declaration class parquet_reader_options; class parquet_writer_options; class chunked_parquet_writer_options; namespace parquet::detail { /** * @brief Class to read Parquet dataset data into columns. */ class reader { protected: class impl; std::unique_ptr<impl> _impl; /** * @brief Default constructor, needed for subclassing. */ reader(); public: /** * @brief Constructor from an array of datasources * * @param sources Input `datasource` objects to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ explicit reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Destructor explicitly-declared to avoid inlined in header */ virtual ~reader(); /** * @brief Reads the dataset as per given options. * * @param options Settings for controlling reading behavior * * @return The set of columns along with table metadata */ table_with_metadata read(parquet_reader_options const& options); }; /** * @brief The reader class that supports iterative reading of a given file. * * This class intentionally subclasses the `reader` class with private inheritance to hide the * `reader::read()` API. As such, only chunked reading APIs are supported. */ class chunked_reader : private reader { public: /** * @brief Constructor from an output size memory limit and an input size memory limit and an array * of data sources with reader options. * * The typical usage should be similar to this: * ``` * do { * auto const chunk = reader.read_chunk(); * // Process chunk * } while (reader.has_next()); * * ``` * * If `chunk_read_limit == 0` (i.e., no output limit), and `pass_read_limit == 0` (no input * temporary memory size limit) a call to `read_chunk()` will read the whole file and return a * table containing all rows. * * The chunk_read_limit parameter controls the size of the output chunks produces. If the user * specifies 100 MB of data, the reader will attempt to return chunks containing tables that have * a total bytes size (over all columns) of 100 MB or less. This is a soft limit and the code * will not fail if it cannot satisfy the limit. It will make a best-effort atttempt only. * * The pass_read_limit parameter controls how much temporary memory is used in the process of * decoding the file. The primary contributor to this memory usage is the uncompressed size of * the data read out of the file and the decompressed (but not yet decoded) size of the data. The * granularity of a given pass is at the row group level. It will not attempt to read at the sub * row-group level. * * Combined, the way to visualize passes and chunks is as follows: * * @code{.pseudo} * for(each pass){ * for(each output chunk within a pass){ * return a table that fits within the output chunk limit * } * } * @endcode * * With a pass_read_limit of `0` you are simply saying you have one pass that reads the entire * file as normal. * * @param chunk_read_limit Limit on total number of bytes to be returned per read, * or `0` if there is no limit * @param pass_read_limit Limit on total amount of memory used for temporary computations during * loading, or `0` if there is no limit * @param sources Input `datasource` objects to read the dataset from * @param options Settings for controlling reading behavior * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource to use for device memory allocation */ explicit chunked_reader(std::size_t chunk_read_limit, std::size_t pass_read_limit, std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, parquet_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @brief Destructor explicitly-declared to avoid inlined in header. * * Since the declaration of the internal `_impl` object does not exist in this header, this * destructor needs to be defined in a separate source file which can access to that object's * declaration. */ ~chunked_reader(); /** * @copydoc cudf::io::chunked_parquet_reader::has_next */ [[nodiscard]] bool has_next() const; /** * @copydoc cudf::io::chunked_parquet_reader::read_chunk */ [[nodiscard]] table_with_metadata read_chunk() const; }; /** * @brief Class to write parquet dataset data into columns. */ class writer { private: class impl; std::unique_ptr<impl> _impl; public: /** * @brief Constructor for output to a file. * * @param sinks The data sinks to write the data to * @param options Settings for controlling writing behavior * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches */ explicit writer(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Constructor for writer to handle chunked parquet options. * * @param sinks The data sinks to write the data to * @param options Settings for controlling writing behavior for chunked writer * @param mode Option to write at once or in chunks * @param stream CUDA stream used for device memory operations and kernel launches * * @return A parquet-compatible blob that contains the data for all rowgroups in the list */ explicit writer(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, cudf::io::detail::single_write_mode mode, rmm::cuda_stream_view stream); /** * @brief Destructor explicitly-declared to avoid inlined in header */ ~writer(); /** * @brief Writes a single subtable as part of a larger parquet file/table write. * * @throws rmm::bad_alloc if there is insufficient space for temporary buffers * * @param[in] table The table information to be written * @param[in] partitions Optional partitions to divide the table into. If specified, must be same * size as number of sinks. */ void write(table_view const& table, std::vector<partition_info> const& partitions = {}); /** * @brief Finishes the chunked/streamed write process. * * @param[in] column_chunks_file_path Column chunks file path to be set in the raw output metadata * * @return A parquet-compatible blob that contains the data for all rowgroups in the list only if * `column_chunks_file_path` is provided, else null. */ std::unique_ptr<std::vector<uint8_t>> close( std::vector<std::string> const& column_chunks_file_path = {}); /** * @brief Merges multiple metadata blobs returned by write_all into a single metadata blob * * @param[in] metadata_list List of input file metadata * @return A parquet-compatible blob that contains the data for all rowgroups in the list */ static std::unique_ptr<std::vector<uint8_t>> merge_row_group_metadata( std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list); }; /** * @brief Reads metadata of parquet dataset. * * @param sources Dataset sources to read from * * @return parquet_metadata with parquet schema, number of rows, number of row groups and key-value * metadata. */ parquet_metadata read_parquet_metadata(host_span<std::unique_ptr<datasource> const> sources); } // namespace parquet::detail } // namespace cudf::io
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/multibyte_split.hpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/io/text/byte_range_info.hpp> #include <cudf/io/text/data_chunk_source.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <memory> #include <optional> namespace cudf { namespace io { namespace text { /** * @brief Parsing options for multibyte_split. */ struct parse_options { /** * @brief Only rows starting inside this byte range will be part of the output column. */ byte_range_info byte_range = create_byte_range_info_max(); /** * @brief Whether delimiters at the end of rows should be stripped from the output column */ bool strip_delimiters = false; }; /** * @brief Splits the source text into a strings column using a multiple byte delimiter. * * Providing a byte range allows multibyte_split to read a file partially, only returning the * offsets of delimiters which begin within the range. If thinking in terms of "records", where each * delimiter dictates the end of a record, all records which begin within the byte range provided * will be returned, including any record which may begin in the range but end outside of the * range. Records which begin outside of the range will ignored, even if those records end inside * the range. * * @code{.pseudo} * Examples: * source: "abc..def..ghi..jkl.." * delimiter: ".." * * byte_range: nullopt * return: ["abc..", "def..", "ghi..", jkl..", ""] * * byte_range: [0, 2) * return: ["abc.."] * * byte_range: [2, 9) * return: ["def..", "ghi.."] * * byte_range: [11, 2) * return: [] * * byte_range: [13, 7) * return: ["jkl..", ""] * @endcode * * @param source The source string * @param delimiter UTF-8 encoded string for which to find offsets in the source * @param options the parsing options to use (including byte range) * @param mr Memory resource to use for the device memory allocation * @return The strings found by splitting the source by the delimiter within the relevant byte * range. */ std::unique_ptr<cudf::column> multibyte_split( data_chunk_source const& source, std::string const& delimiter, parse_options options = {}, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); std::unique_ptr<cudf::column> multibyte_split( data_chunk_source const& source, std::string const& delimiter, std::optional<byte_range_info> byte_range, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); std::unique_ptr<cudf::column> multibyte_split(data_chunk_source const& source, std::string const& delimiter, rmm::mr::device_memory_resource* mr); } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/data_chunk_source_factories.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/datasource.hpp> #include <cudf/io/text/data_chunk_source.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/span.hpp> #include <memory> #include <string> namespace cudf::io::text { /** * @brief Creates a data source capable of producing device-buffered views of a datasource. * @param data the datasource to be exposed as a data chunk source * @return the data chunk source for the provided datasource. It must not outlive the datasource * used to construct it. */ std::unique_ptr<data_chunk_source> make_source(datasource& data); /** * @brief Creates a data source capable of producing device-buffered views of the given string. * @param data the host data to be exposed as a data chunk source. Its lifetime must be at least as * long as the lifetime of the returned data_chunk_source. * @return the data chunk source for the provided host data. It copies data from the host to the * device. */ std::unique_ptr<data_chunk_source> make_source(host_span<char const> data); /** * @brief Creates a data source capable of producing device-buffered views of the file * @param filename the filename of the file to be exposed as a data chunk source. * @return the data chunk source for the provided filename. It reads data from the file and copies * it to the device. */ std::unique_ptr<data_chunk_source> make_source_from_file(std::string_view filename); /** * @brief Creates a data source capable of producing device-buffered views of a BGZIP compressed * file. * @param filename the filename of the BGZIP-compressed file to be exposed as a data chunk source. * @return the data chunk source for the provided filename. It reads data from the file and copies * it to the device, where it will be decompressed. */ std::unique_ptr<data_chunk_source> make_source_from_bgzip_file(std::string_view filename); /** * @brief Creates a data source capable of producing device-buffered views of a BGZIP compressed * file with virtual record offsets. * @param filename the filename of the BGZIP-compressed file to be exposed as a data chunk source. * @param virtual_begin the virtual (Tabix) offset of the first byte to be read. Its upper 48 bits * describe the offset into the compressed file, its lower 16 bits describe the * block-local offset. * @param virtual_end the virtual (Tabix) offset one past the last byte to be read. * @return the data chunk source for the provided filename. It reads data from the file and copies * it to the device, where it will be decompressed. The chunk source only returns data * between the virtual offsets `virtual_begin` and `virtual_end`. */ std::unique_ptr<data_chunk_source> make_source_from_bgzip_file(std::string_view filename, uint64_t virtual_begin, uint64_t virtual_end); /** * @brief Creates a data source capable of producing views of the given device string scalar * @param data the device data to be exposed as a data chunk source. Its lifetime must be at least * as long as the lifetime of the returned data_chunk_source. * @return the data chunk source for the provided host data. It does not create any copies. */ std::unique_ptr<data_chunk_source> make_source(cudf::string_scalar& data); } // namespace cudf::io::text
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/data_chunk_source.hpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_pool.hpp> #include <rmm/device_buffer.hpp> namespace cudf { namespace io { namespace text { /** * @brief A contract guaranteeing stream-ordered memory access to the underlying device data. * * This class guarantees access to the underlying data for the stream on which the data was * allocated. Possible implementations may own the device data, or may only have a view over the * data. Any work enqueued to the stream on which this data was allocated is guaranteed to be * performed prior to the destruction of the underlying data, but otherwise no guarantees are made * regarding if or when the underlying data gets destroyed. */ class device_data_chunk { public: virtual ~device_data_chunk() = default; /** * @pure @brief Returns a pointer to the underlying device data. * * @return A pointer to the underlying device data */ [[nodiscard]] virtual char const* data() const = 0; /** * @pure @brief Returns the size of the underlying device data. * * @return The size of the underlying device data */ [[nodiscard]] virtual std::size_t size() const = 0; /** * @pure @brief Returns a span over the underlying device data. * * @return A span over the underlying device data */ virtual operator device_span<char const>() const = 0; }; /** * @brief a reader capable of producing views over device memory. * * The data chunk reader API encapsulates the idea of statefully traversing and loading a data * source. A data source may be a file, a region of device memory, or a region of host memory. * Reading data from these data sources efficiently requires different strategies depending on the * type of data source, type of compression, capabilities of the host and device, the data's * destination. Whole-file decompression should be hidden behind this interface. */ class data_chunk_reader { public: virtual ~data_chunk_reader() = default; /** * @pure @brief Skips the specified number of bytes in the data source. * * @param size The number of bytes to skip */ virtual void skip_bytes(std::size_t size) = 0; /** * @pure @brief Get the next chunk of bytes from the data source * * Performs any necessary work to read and prepare the underlying data source for consumption as a * view over device memory. Common implementations may read from a file, copy data from host * memory, allocate temporary memory, perform iterative decompression, or even launch device * kernels. * * @param size number of bytes to read * @param stream stream to associate allocations or perform work required to obtain chunk * @return a chunk of data up to @p size bytes. May return less than @p size bytes if * reader reaches end of underlying data source. Returned data must be accessed in stream order * relative to the specified @p stream */ virtual std::unique_ptr<device_data_chunk> get_next_chunk(std::size_t size, rmm::cuda_stream_view stream) = 0; }; /** * @brief a data source capable of creating a reader which can produce views of the data source in * device memory. */ class data_chunk_source { public: virtual ~data_chunk_source() = default; /** * @pure @brief Get a reader for the data source. * * @return `data_chunk_reader` object for the data source */ [[nodiscard]] virtual std::unique_ptr<data_chunk_reader> create_reader() const = 0; }; } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/byte_range_info.hpp
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/utilities/error.hpp> #include <cstdint> #include <vector> namespace cudf { namespace io { namespace text { /** * @brief stores offset and size used to indicate a byte range */ class byte_range_info { private: int64_t _offset; ///< offset in bytes int64_t _size; ///< size in bytes public: constexpr byte_range_info() noexcept : _offset(0), _size(0) {} /** * @brief Constructs a byte_range_info object * * @param offset offset in bytes * @param size size in bytes */ constexpr byte_range_info(int64_t offset, int64_t size) : _offset(offset), _size(size) { CUDF_EXPECTS(offset >= 0, "offset must be non-negative"); CUDF_EXPECTS(size >= 0, "size must be non-negative"); } /** * @brief Copy constructor * * @param other byte_range_info object to copy */ constexpr byte_range_info(byte_range_info const& other) noexcept = default; /** * @brief Copy assignment operator * * @param other byte_range_info object to copy * @return this object after copying */ constexpr byte_range_info& operator=(byte_range_info const& other) noexcept = default; /** * @brief Get the offset in bytes * * @return Offset in bytes */ [[nodiscard]] constexpr int64_t offset() { return _offset; } /** * @brief Get the size in bytes * * @return Size in bytes */ [[nodiscard]] constexpr int64_t size() { return _size; } /** * @brief Returns whether the span is empty. * * @return true iff the span is empty, i.e. `size() == 0` */ [[nodiscard]] constexpr bool empty() { return size() == 0; } }; /** * @brief Create a collection of consecutive ranges between [0, total_bytes). * * Each range wil be the same size except if `total_bytes` is not evenly divisible by * `range_count`, in which case the last range size will be the remainder. * * @param total_bytes total number of bytes in all ranges * @param range_count total number of ranges in which to divide bytes * @return Vector of range objects */ std::vector<byte_range_info> create_byte_range_infos_consecutive(int64_t total_bytes, int64_t range_count); /** * @brief Create a byte_range_info which represents as much of a file as possible. Specifically, * `[0, numeric_limit<int64_t>::max())`. * * @return `[0, numeric_limit<int64_t>::max())` */ byte_range_info create_byte_range_info_max(); } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io/text
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/detail/tile_state.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cub/block/block_scan.cuh> #include <cuda/atomic> namespace cudf { namespace io { namespace text { namespace detail { enum class scan_tile_status : uint8_t { oob, invalid, partial, inclusive, }; template <typename T> struct scan_tile_state_view { uint64_t num_tiles; cuda::atomic<scan_tile_status, cuda::thread_scope_device>* tile_status; T* tile_partial; T* tile_inclusive; __device__ inline void set_status(cudf::size_type tile_idx, scan_tile_status status) { auto const offset = (tile_idx + num_tiles) % num_tiles; tile_status[offset].store(status, cuda::memory_order_relaxed); } __device__ inline void set_partial_prefix(cudf::size_type tile_idx, T value) { auto const offset = (tile_idx + num_tiles) % num_tiles; cub::ThreadStore<cub::STORE_CG>(tile_partial + offset, value); tile_status[offset].store(scan_tile_status::partial); } __device__ inline void set_inclusive_prefix(cudf::size_type tile_idx, T value) { auto const offset = (tile_idx + num_tiles) % num_tiles; cub::ThreadStore<cub::STORE_CG>(tile_inclusive + offset, value); tile_status[offset].store(scan_tile_status::inclusive); } __device__ inline T get_prefix(cudf::size_type tile_idx, scan_tile_status& status) { auto const offset = (tile_idx + num_tiles) % num_tiles; while ((status = tile_status[offset].load(cuda::memory_order_relaxed)) == scan_tile_status::invalid) {} if (status == scan_tile_status::partial) { return cub::ThreadLoad<cub::LOAD_CG>(tile_partial + offset); } else { return cub::ThreadLoad<cub::LOAD_CG>(tile_inclusive + offset); } } }; template <typename T> struct scan_tile_state { rmm::device_uvector<cuda::atomic<scan_tile_status, cuda::thread_scope_device>> tile_status; rmm::device_uvector<T> tile_state_partial; rmm::device_uvector<T> tile_state_inclusive; scan_tile_state(cudf::size_type num_tiles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : tile_status(rmm::device_uvector<cuda::atomic<scan_tile_status, cuda::thread_scope_device>>( num_tiles, stream, mr)), tile_state_partial(rmm::device_uvector<T>(num_tiles, stream, mr)), tile_state_inclusive(rmm::device_uvector<T>(num_tiles, stream, mr)) { } operator scan_tile_state_view<T>() { return scan_tile_state_view<T>{tile_status.size(), tile_status.data(), tile_state_partial.data(), tile_state_inclusive.data()}; } inline T get_inclusive_prefix(cudf::size_type tile_idx, rmm::cuda_stream_view stream) const { auto const offset = (tile_idx + tile_status.size()) % tile_status.size(); return tile_state_inclusive.element(offset, stream); } }; template <typename T> struct scan_tile_state_callback { __device__ inline scan_tile_state_callback(scan_tile_state_view<T>& tile_state, cudf::size_type tile_idx) : _tile_state(tile_state), _tile_idx(tile_idx) { } __device__ inline T operator()(T const& block_aggregate) { T exclusive_prefix; if (threadIdx.x == 0) { _tile_state.set_partial_prefix(_tile_idx, block_aggregate); auto predecessor_idx = _tile_idx - 1; auto predecessor_status = scan_tile_status::invalid; // scan partials to form prefix auto window_partial = _tile_state.get_prefix(predecessor_idx, predecessor_status); while (predecessor_status != scan_tile_status::inclusive) { predecessor_idx--; auto predecessor_prefix = _tile_state.get_prefix(predecessor_idx, predecessor_status); window_partial = predecessor_prefix + window_partial; } exclusive_prefix = window_partial; _tile_state.set_inclusive_prefix(_tile_idx, exclusive_prefix + block_aggregate); } return exclusive_prefix; } scan_tile_state_view<T>& _tile_state; cudf::size_type _tile_idx; }; } // namespace detail } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io/text
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/detail/trie.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/io/text/detail/multistate.hpp> #include <cudf/utilities/span.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <algorithm> #include <queue> #include <string> #include <unordered_map> #include <vector> namespace cudf { namespace io { namespace text { namespace detail { struct trie_node { char token; uint8_t match_length; uint8_t child_begin; }; struct trie_device_view { device_span<trie_node const> _nodes; /** * @brief create a multistate which contains all partial path matches for the given token. */ constexpr multistate transition_init(char c) { auto result = multistate(); result.enqueue(0, 0); for (uint8_t curr = 0; curr < _nodes.size() - 1; curr++) { transition_enqueue_all(c, result, curr, curr); } return result; } /** * @brief create a new multistate by transitioning all states in the multistate by the given token * * Eliminates any partial matches that cannot transition using the given token. * * @note always enqueues (0, 0] as the first state of the returned multistate. */ constexpr multistate transition(char c, multistate const& states) { auto result = multistate(); result.enqueue(0, 0); for (uint8_t i = 0; i < states.size(); i++) { transition_enqueue_all(c, result, states.get_head(i), states.get_tail(i)); } return result; } /** * @brief returns true if the given index is associated with a matching state. */ constexpr bool is_match(uint16_t idx) { return static_cast<bool>(get_match_length(idx)); } /** * @brief returns the match length if the given index is associated with a matching state, * otherwise zero. */ constexpr uint8_t get_match_length(uint16_t idx) { return _nodes[idx].match_length; } private: constexpr void transition_enqueue_all( // char c, multistate& states, uint8_t head, uint8_t curr) { for (uint32_t tail = _nodes[curr].child_begin; tail < _nodes[curr + 1].child_begin; tail++) { if (_nodes[tail].token == c) { // states.enqueue(head, tail); } } } }; /** * @brief A flat trie contained in device memory. */ struct trie { private: cudf::size_type _max_duplicate_tokens; rmm::device_uvector<trie_node> _nodes; trie(cudf::size_type max_duplicate_tokens, rmm::device_uvector<trie_node>&& nodes) : _max_duplicate_tokens(max_duplicate_tokens), _nodes(std::move(nodes)) { } /** * @brief Used to build a hierarchical trie which can then be flattened. */ struct trie_builder_node { uint8_t match_length; std::unordered_map<char, std::unique_ptr<trie_builder_node>> children; /** * @brief Insert the string in to the trie tree, growing the trie as necessary */ void insert(std::string s) { insert(s.c_str(), s.size(), 0); } private: trie_builder_node& insert(char const* s, uint16_t size, uint8_t depth) { if (size == 0) { match_length = depth; return *this; } if (children[*s] == nullptr) { children[*s] = std::make_unique<trie_builder_node>(); } return children[*s]->insert(s + 1, size - 1, depth + 1); } }; public: /** * @brief Gets the number of nodes contained in this trie. */ [[nodiscard]] cudf::size_type size() const { return _nodes.size(); } /** * @brief A pessimistic count of duplicate tokens in the trie. Used to determine the maximum * possible stack size required to compute matches of this trie in parallel. */ [[nodiscard]] cudf::size_type max_duplicate_tokens() const { return _max_duplicate_tokens; } /** * @brief Create a trie which represents the given pattern. * * @param pattern The pattern to store in the trie * @param stream The stream to use for allocation and copy * @param mr Memory resource to use for the device memory allocation * @return The trie. */ static trie create(std::string const& pattern, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return create(std::vector<std::string>{pattern}, stream, mr); } /** * @brief Create a trie which represents the given pattern. * * @param pattern The patterns to store in the trie * @param stream The stream to use for allocation and copy * @param mr Memory resource to use for the device memory allocation * @return The trie. */ static trie create(std::vector<std::string> const& patterns, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::vector<char> tokens; std::vector<uint8_t> transitions; std::vector<uint8_t> match_length; // create the trie tree auto root = std::make_unique<trie_builder_node>(); for (auto& pattern : patterns) { root->insert(pattern); } // flatten auto sum = 1; transitions.emplace_back(sum); match_length.emplace_back(root->match_length); auto builder_nodes = std::queue<std::unique_ptr<trie_builder_node>>(); builder_nodes.push(std::move(root)); tokens.emplace_back(0); while (builder_nodes.size()) { auto layer_size = builder_nodes.size(); for (uint32_t i = 0; i < layer_size; i++) { auto node = std::move(builder_nodes.front()); builder_nodes.pop(); sum += node->children.size(); transitions.emplace_back(sum); for (auto& item : node->children) { match_length.emplace_back(item.second->match_length); tokens.emplace_back(item.first); builder_nodes.push(std::move(item.second)); } } } tokens.emplace_back(0); match_length.emplace_back(0); std::vector<trie_node> trie_nodes; auto token_counts = std::unordered_map<cudf::size_type, int32_t>(); for (uint32_t i = 0; i < tokens.size(); i++) { trie_nodes.emplace_back(trie_node{tokens[i], match_length[i], transitions[i]}); token_counts[tokens[i]]++; } auto most_common_token = std::max_element(token_counts.begin(), token_counts.end(), [](auto const& a, auto const& b) { return a.second < b.second; }); auto max_duplicate_tokens = most_common_token->second; return trie{max_duplicate_tokens, cudf::detail::make_device_uvector_sync(trie_nodes, stream, mr)}; } [[nodiscard]] trie_device_view view() const { return trie_device_view{_nodes}; } }; } // namespace detail } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io/text
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/detail/multistate.hpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> namespace cudf { namespace io { namespace text { namespace detail { /** * @brief Represents up to 7 segments */ struct multistate { public: /** * @brief The maximum state (head or tail) this multistate can represent */ static auto constexpr max_segment_value = 15; /** * @brief The maximum number of segments this multistate can represent */ static auto constexpr max_segment_count = 7; /** * @brief Enqueues a (head, tail] segment to this multistate * * @note: The behavior of this function is undefined if size() => max_segment_count */ constexpr void enqueue(uint8_t head, uint8_t tail) { _heads |= (head & 0xFu) << (_size * 4); _tails |= (tail & 0xFu) << (_size * 4); _size++; } /** * @brief get's the number of segments this multistate represents */ [[nodiscard]] constexpr uint8_t size() const { return _size; } /** * @brief get's the highest (____, tail] value this multistate represents */ [[nodiscard]] constexpr uint8_t max_tail() const { uint8_t maximum = 0; for (uint8_t i = 0; i < _size; i++) { maximum = std::max(maximum, get_tail(i)); } return maximum; } /** * @brief get's the Nth (head, ____] value state this multistate represents */ [[nodiscard]] constexpr uint8_t get_head(uint8_t idx) const { return (_heads >> (idx * 4)) & 0xFu; } /** * @brief get's the Nth (____, tail] value state this multistate represents */ [[nodiscard]] constexpr uint8_t get_tail(uint8_t idx) const { return (_tails >> (idx * 4)) & 0xFu; } private: uint8_t _size = 0; uint32_t _heads{}; uint32_t _tails{}; }; /** * @brief associatively inner-joins transition histories. * * Examples: * <(0, 5]> + <(5, 9]> = <(0, 9]> * <(0, 5]> + <(6, 9]> = <> * <(0, 1], (0, 2]> + <(2, 3], (1, 4]> = <(0, 4], (0, 3]> * <(0, 1], (0, 2]> + <(1, 3]> = <(0, 3]> * * Head and tail value are limited to [0, 1, ..., 16] * * @param lhs past segments * @param rhs future segments * @return full join of past and future segments */ constexpr multistate operator+(multistate const& lhs, multistate const& rhs) { // combine two multistates together by full-joining LHS tails to RHS heads, // and taking the corresponding LHS heads and RHS tails. multistate result; for (uint8_t lhs_idx = 0; lhs_idx < lhs.size(); lhs_idx++) { auto tail = lhs.get_tail(lhs_idx); for (uint8_t rhs_idx = 0; rhs_idx < rhs.size(); rhs_idx++) { auto head = rhs.get_head(rhs_idx); if (tail == head) { result.enqueue(lhs.get_head(lhs_idx), rhs.get_tail(rhs_idx)); } } } return result; } } // namespace detail } // namespace text } // namespace io } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include/cudf/io/text
rapidsai_public_repos/cudf/cpp/include/cudf/io/text/detail/bgzip_utils.hpp
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <zlib.h> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <algorithm> #include <array> #include <fstream> #include <limits> namespace cudf::io::text::detail::bgzip { struct header { int block_size; int extra_length; [[nodiscard]] int data_size() const { return block_size - extra_length - 20; } }; struct footer { uint32_t crc; uint32_t decompressed_size; }; /** * @brief Reads the full BGZIP header from the given input stream. Afterwards, the stream position * is at the first data byte. * * @param input_stream The input stream * @return The header storing the compressed size and extra subfield length */ header read_header(std::istream& input_stream); /** * @brief Reads the full BGZIP footer from the given input stream. Afterwards, the stream position * is after the last footer byte. * * @param input_stream The input stream * @return The footer storing uncompressed size and CRC32 */ footer read_footer(std::istream& input_stream); /** * @brief Writes a header for data of the given compressed size to the given stream. * * @param output_stream The output stream * @param compressed_size The size of the compressed data * @param pre_size_subfields Any GZIP extra subfields (need to be valid) to be placed before the * BGZIP block size subfield * @param post_size_subfields Any subfields to be placed after the BGZIP block size subfield */ void write_header(std::ostream& output_stream, uint16_t compressed_size, host_span<char const> pre_size_subfields, host_span<char const> post_size_subfields); /** * @brief Writes a footer for the given uncompressed data to the given stream. * * @param output_stream The output stream * @param data The data for which uncompressed size and CRC32 will be computed and written */ void write_footer(std::ostream& output_stream, host_span<char const> data); /** * @brief Writes the given data to the given stream as an uncompressed deflate block with BZGIP * header and footer. * * @param output_stream The output stream * @param data The uncompressed data * @param pre_size_subfields Any GZIP extra subfields (need to be valid) to be placed before the * BGZIP block size subfield * @param post_size_subfields Any subfields to be placed after the BGZIP block size subfield */ void write_uncompressed_block(std::ostream& output_stream, host_span<char const> data, host_span<char const> pre_size_subfields = {}, host_span<char const> post_size_subfields = {}); /** * @brief Writes the given data to the given stream as a compressed deflate block with BZGIP * header and footer. * * @param output_stream The output stream * @param data The uncompressed data * @param pre_size_subfields Any GZIP extra subfields (need to be valid) to be placed before the * BGZIP block size subfield * @param post_size_subfields Any subfields to be placed after the BGZIP block size subfield */ void write_compressed_block(std::ostream& output_stream, host_span<char const> data, host_span<char const> pre_size_subfields = {}, host_span<char const> post_size_subfields = {}); } // namespace cudf::io::text::detail::bgzip
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/ngrams_tokenize.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_ngrams * @{ * @file */ /** * @brief Returns a single column of strings by tokenizing the input strings * column and then producing ngrams of each string. * * An ngram is a grouping of 2 or more tokens with a separator. For example, * generating bigrams groups all adjacent pairs of tokens for a string. * * ``` * ["a bb ccc"] can be tokenized to ["a", "bb", "ccc"] * bigrams would generate ["a_bb", "bb_ccc"] and trigrams would generate ["a_bb_ccc"] * ``` * * The `delimiter` is used for tokenizing and may be zero or more characters. * If the `delimiter` is empty, whitespace (character code-point <= ' ') is used * for identifying tokens. * * Once tokens are identified, ngrams are produced by joining the tokens * with the specified separator. The generated ngrams use the tokens for each * string and not across strings in adjacent rows. * Any input string that contains fewer tokens than the specified ngrams value is * skipped and will not contribute to the output. Therefore, a bigram of a single * token is ignored as well as a trigram of 2 or less tokens. * * Tokens are found by locating delimiter(s) starting at the beginning of each string. * As each string is tokenized, the ngrams are generated using input column row order * to build the output column. That is, ngrams created in input row[i] will be placed in * the output column directly before ngrams created in input row[i+1]. * * The size of the output column will be the total number of ngrams generated from * the input strings column. * * @code{.pseudo} * Example: * s = ["a b c", "d e", "f g h i", "j"] * t = ngrams_tokenize(s, 2, " ", "_") * t is now ["a_b", "b_c", "d_e", "f_g", "g_h", "h_i"] * @endcode * * All null row entries are ignored and the output contains all valid rows. * * @param input Strings column to tokenize and produce ngrams from * @param ngrams The ngram number to generate * @param delimiter UTF-8 characters used to separate each string into tokens. * An empty string will separate tokens using whitespace. * @param separator The string to use for separating ngram tokens * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> ngrams_tokenize( cudf::strings_column_view const& input, cudf::size_type ngrams, cudf::string_scalar const& delimiter, cudf::string_scalar const& separator, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/minhash.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/hashing.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/span.hpp> namespace nvtext { /** * @addtogroup nvtext_minhash * @{ * @file */ /** * @brief Returns the minhash value for each string * * Hash values are computed from substrings of each string and the * minimum hash value is returned for each string. * * Any null row entries result in corresponding null output rows. * * This function uses MurmurHash3_x86_32 for the hash algorithm. * * @throw std::invalid_argument if the width < 2 * * @param input Strings column to compute minhash * @param seed Seed value used for the hash algorithm * @param width The character width used for apply substrings; * Default is 4 characters. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Minhash values for each string in input */ std::unique_ptr<cudf::column> minhash( cudf::strings_column_view const& input, cudf::numeric_scalar<uint32_t> seed = 0, cudf::size_type width = 4, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the minhash values for each string per seed * * Hash values are computed from substrings of each string and the * minimum hash value is returned for each string for each seed. * Each row of the list column are seed results for the corresponding * string. The order of the elements in each row match the order of * the seeds provided in the `seeds` parameter. * * This function uses MurmurHash3_x86_32 for the hash algorithm. * * Any null row entries result in corresponding null output rows. * * @throw std::invalid_argument if the width < 2 * @throw std::invalid_argument if seeds is empty * @throw std::overflow_error if `seeds * input.size()` exceeds the column size limit * * @param input Strings column to compute minhash * @param seeds Seed values used for the hash algorithm * @param width The character width used for apply substrings; * Default is 4 characters. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return List column of minhash values for each string per seed */ std::unique_ptr<cudf::column> minhash( cudf::strings_column_view const& input, cudf::device_span<uint32_t const> seeds, cudf::size_type width = 4, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the minhash value for each string * * Hash values are computed from substrings of each string and the * minimum hash value is returned for each string. * * Any null row entries result in corresponding null output rows. * * This function uses MurmurHash3_x64_128 for the hash algorithm. * The hash function returns 2 uint64 values but only the first value * is used with the minhash calculation. * * @throw std::invalid_argument if the width < 2 * * @param input Strings column to compute minhash * @param seed Seed value used for the hash algorithm * @param width The character width used for apply substrings; * Default is 4 characters. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Minhash values as UINT64 for each string in input */ std::unique_ptr<cudf::column> minhash64( cudf::strings_column_view const& input, cudf::numeric_scalar<uint64_t> seed = 0, cudf::size_type width = 4, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the minhash values for each string per seed * * Hash values are computed from substrings of each string and the * minimum hash value is returned for each string for each seed. * Each row of the list column are seed results for the corresponding * string. The order of the elements in each row match the order of * the seeds provided in the `seeds` parameter. * * This function uses MurmurHash3_x64_128 for the hash algorithm. * * Any null row entries result in corresponding null output rows. * * @throw std::invalid_argument if the width < 2 * @throw std::invalid_argument if seeds is empty * @throw std::overflow_error if `seeds * input.size()` exceeds the column size limit * * @param input Strings column to compute minhash * @param seeds Seed values used for the hash algorithm * @param width The character width used for apply substrings; * Default is 4 characters. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return List column of minhash values for each string per seed */ std::unique_ptr<cudf::column> minhash64( cudf::strings_column_view const& input, cudf::device_span<uint64_t const> seeds, cudf::size_type width = 4, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/subword_tokenize.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_tokenize * @{ * @file */ /** * @brief The vocabulary data for use with the subword_tokenize function. */ struct hashed_vocabulary { uint16_t first_token_id{}; ///< The first token id in the vocabulary uint16_t separator_token_id{}; ///< The separator token id in the vocabulary uint16_t unknown_token_id{}; ///< The unknown token id in the vocabulary uint32_t outer_hash_a{}; ///< The a parameter for the outer hash uint32_t outer_hash_b{}; ///< The b parameter for the outer hash uint16_t num_bins{}; ///< Number of bins std::unique_ptr<cudf::column> table; ///< uint64 column, the flattened hash table with key, value ///< pairs packed in 64-bits std::unique_ptr<cudf::column> bin_coefficients; ///< uint64 column, containing the hashing ///< parameters for each hash bin on the GPU std::unique_ptr<cudf::column> bin_offsets; ///< uint16 column, containing the start index of each ///< bin in the flattened hash table std::unique_ptr<cudf::column> cp_metadata; ///< uint32 column, The code point metadata table to use for normalization std::unique_ptr<cudf::column> aux_cp_table; ///< uint64 column, The auxiliary code point table to use for normalization }; /** * @brief Load the hashed vocabulary file into device memory. * * The object here can be used to call the subword_tokenize without * incurring the cost of loading the same file each time. * * @throw cudf::logic_error if the `filename_hashed_vocabulary` could not be opened. * * @param filename_hashed_vocabulary A path to the preprocessed vocab.txt file. * Note that this is the file AFTER python/perfect_hash.py has been used * for preprocessing. * @param mr Memory resource to allocate any returned objects. * @return vocabulary hash-table elements */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Result object for the subword_tokenize functions. */ struct tokenizer_result { /** * @brief The number of rows for the output token-ids. */ uint32_t nrows_tensor{}; /** * @brief The number of token-ids in each row. */ uint32_t sequence_length{}; /** * @brief A vector of token-ids for each row. * * The data is a flat matrix (nrows_tensor x sequence_length) of token-ids. * This column is of type UINT32 with no null entries. */ std::unique_ptr<cudf::column> tensor_token_ids; /** * @brief This mask identifies which tensor-token-ids are valid. * * This column is of type UINT32 with no null entries. */ std::unique_ptr<cudf::column> tensor_attention_mask; /** * @brief The metadata for each tensor row. * * There are three elements per tensor row [row-id, start_pos, stop_pos]) * This column is of type UINT32 with no null entries. */ std::unique_ptr<cudf::column> tensor_metadata; }; /** * @brief Creates a tokenizer that cleans the text, splits it into tokens and * returns token-ids from an input vocabulary. * * The strings are first normalized by converting to lower-case, removing * punctuation, replacing a select set of multi-byte characters and * whitespace characters. * * The strings are then tokenized by using whitespace as a delimiter. * Consecutive delimiters are ignored. Each token is then assigned * a 4-byte token-id mapped from the provided vocabulary table. * * Essentially each string is converted into one or more vectors of token-ids * in the output column. The total number of these vectors times `max_sequence_length` * is the size of the `tensor_token_ids` output column. For `do_truncate==true`: * ``` * size of tensor_token_ids = max_sequence_length * strings.size() * size of tensor_attention_mask = max_sequence_length * strings.size() * size of tensor_metadata = 3 * strings.size() * ``` * * For `do_truncate==false` the number of rows per output string depends on the * number of tokens resolved and the `stride` value which may repeat tokens * in subsequent overflow rows. * * This function requires about 21x the number of character bytes in the input * strings column as working memory. * * @throw cudf::logic_error if `stride > max_sequence_length` * @throw std::overflow_error if `max_sequence_length * max_rows_tensor` * exceeds the column size limit * * @param strings The input strings to tokenize. * @param vocabulary_table The vocabulary table pre-loaded into this object. * @param max_sequence_length Limit of the number of token-ids per row in final tensor * for each string. * @param stride Each row in the output token-ids will replicate `max_sequence_length - stride` * the token-ids from the previous row, unless it is the first string. * @param do_lower_case If true, the tokenizer will convert uppercase characters in the * input stream to lower-case and strip accents from those characters. * If false, accented and uppercase characters are not transformed. * @param do_truncate If true, the tokenizer will discard all the token-ids after * `max_sequence_length` for each input string. If false, it will use a new row * in the output token-ids to continue generating the output. * @param mr Memory resource to allocate any returned objects. * @return token-ids, attention-mask, and metadata */ tokenizer_result subword_tokenize( cudf::strings_column_view const& strings, hashed_vocabulary const& vocabulary_table, uint32_t max_sequence_length, uint32_t stride, bool do_lower_case, bool do_truncate, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/tokenize.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_tokenize * @{ * @file */ /** * @brief Returns a single column of strings by tokenizing the input strings * column using the provided characters as delimiters. * * The `delimiter` may be zero or more characters. If the `delimiter` is empty, * whitespace (character code-point <= ' ') is used for identifying tokens. * Also, any consecutive delimiters found in a string are ignored. * This means only non-empty tokens are returned. * * Tokens are found by locating delimiter(s) starting at the beginning of each string. * As each string is tokenized, the tokens are appended using input column row order * to build the output column. That is, tokens found in input row[i] will be placed in * the output column directly before tokens found in input row[i+1]. * * @code{.pseudo} * Example: * s = ["a", "b c", "d e f "] * t = tokenize(s) * t is now ["a", "b", "c", "d", "e", "f"] * @endcode * * All null row entries are ignored and the output contains all valid rows. * * @param input Strings column to tokenize * @param delimiter UTF-8 characters used to separate each string into tokens. * The default of empty string will separate tokens using whitespace. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& input, cudf::string_scalar const& delimiter = cudf::string_scalar{""}, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns a single column of strings by tokenizing the input strings * column using multiple strings as delimiters. * * Tokens are found by locating delimiter(s) starting at the beginning of each string. * Any consecutive delimiters found in a string are ignored. * This means only non-empty tokens are returned. * * As each string is tokenized, the tokens are appended using input column row order * to build the output column. That is, tokens found in input row[i] will be placed in * the output column directly before tokens found in input row[i+1]. * * @code{.pseudo} * Example: * s = ["a", "b c", "d.e:f;"] * d = [".", ":", ";"] * t = tokenize(s,d) * t is now ["a", "b c", "d", "e", "f"] * @endcode * * All null row entries are ignored and the output contains all valid rows. * * @throw cudf::logic_error if the delimiters column is empty or contains nulls. * * @param input Strings column to tokenize * @param delimiters Strings used to separate individual strings into tokens * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& input, cudf::strings_column_view const& delimiters, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the number of tokens in each string of a strings column. * * The `delimiter` may be zero or more characters. If the `delimiter` is empty, * whitespace (character code-point <= ' ') is used for identifying tokens. * Also, any consecutive delimiters found in a string are ignored. * This means that only empty strings or null rows will result in a token count of 0. * * @code{.pseudo} * Example: * s = ["a", "b c", " ", "d e f"] * t = count_tokens(s) * t is now [1, 2, 0, 3] * @endcode * * All null row entries are ignored and the output contains all valid rows. * The number of tokens for a null element is set to 0 in the output column. * * @param input Strings column to count tokens * @param delimiter Strings used to separate each string into tokens. * The default of empty string will separate tokens using whitespace. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New column of token counts */ std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& input, cudf::string_scalar const& delimiter = cudf::string_scalar{""}, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the number of tokens in each string of a strings column * by using multiple strings delimiters to identify tokens in each string. * * Also, any consecutive delimiters found in a string are ignored. * This means that only empty strings or null rows will result in a token count of 0. * * @code{.pseudo} * Example: * s = ["a", "b c", "d.e:f;"] * d = [".", ":", ";"] * t = count_tokens(s,d) * t is now [1, 1, 3] * @endcode * * All null row entries are ignored and the output contains all valid rows. * The number of tokens for a null element is set to 0 in the output column. * * @throw cudf::logic_error if the delimiters column is empty or contains nulls * * @param input Strings column to count tokens * @param delimiters Strings used to separate each string into tokens * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New column of token counts */ std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& input, cudf::strings_column_view const& delimiters, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns a single column of strings by converting each character to a string. * * Each string is converted to multiple strings -- one for each character. * Note that a character maybe more than one byte. * * @code{.pseudo} * Example: * s = ["hello world", null, "goodbye"] * t = character_tokenize(s) * t is now ["h","e","l","l","o"," ","w","o","r","l","d","g","o","o","d","b","y","e"] * @endcode * * All null row entries are ignored and the output contains all valid rows. * * @param input Strings column to tokenize * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> character_tokenize( cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Creates a strings column from a strings column of tokens and an * associated column of row ids. * * Multiple tokens from the input column may be combined into a single row (string) * in the output column. The tokens are concatenated along with the `separator` string * in the order in which they appear in the `row_indices` column. * * @code{.pseudo} * Example: * s = ["hello", "world", "one", "two", "three"] * r = [0, 0, 1, 1, 1] * s1 = detokenize(s,r) * s1 is now ["hello world", "one two three"] * r = [0, 2, 1, 1, 0] * s2 = detokenize(s,r) * s2 is now ["hello three", "one two", "world"] * @endcode * * All null row entries are ignored and the output contains all valid rows. * The values in `row_indices` are expected to have positive, sequential * values without any missing row indices otherwise the output is undefined. * * @throw cudf::logic_error is `separator` is invalid * @throw cudf::logic_error if `row_indices.size() != strings.size()` * @throw cudf::logic_error if `row_indices` contains nulls * * @param input Strings column to detokenize * @param row_indices The relative output row index assigned for each token in the input column * @param separator String to append after concatenating each token to the proper output row * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> detokenize( cudf::strings_column_view const& input, cudf::column_view const& row_indices, cudf::string_scalar const& separator = cudf::string_scalar(" "), rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Vocabulary object to be used with nvtext::tokenize_with_vocabulary * * Use nvtext::load_vocabulary to create this object. */ struct tokenize_vocabulary { /** * @brief Vocabulary object constructor * * Token ids are the row indices within the vocabulary column. * Each vocabulary entry is expected to be unique otherwise the behavior is undefined. * * @throw cudf::logic_error if `vocabulary` contains nulls or is empty * * @param input Strings for the vocabulary * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory */ tokenize_vocabulary(cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); ~tokenize_vocabulary(); struct tokenize_vocabulary_impl; tokenize_vocabulary_impl* _impl{}; }; /** * @brief Create a tokenize_vocabulary object from a strings column * * Token ids are the row indices within the vocabulary column. * Each vocabulary entry is expected to be unique otherwise the behavior is undefined. * * @throw cudf::logic_error if `vocabulary` contains nulls or is empty * * @param input Strings for the vocabulary * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Object to be used with nvtext::tokenize_with_vocabulary */ std::unique_ptr<tokenize_vocabulary> load_vocabulary( cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the token ids for the input string by looking up each delimited * token in the given vocabulary * * @code{.pseudo} * Example: * s = ["hello world", "hello there", "there there world", "watch out world"] * v = load_vocabulary(["hello", "there", "world"]) * r = tokenize_with_vocabulary(s,v) * r is now [[0,2], [0,1], [1,1,2], [-1,-1,2]] * @endcode * * Any null row entry results in a corresponding null entry in the output * * @throw cudf::logic_error if `delimiter` is invalid * * @param input Strings column to tokenize * @param vocabulary Used to lookup tokens within * @param delimiter Used to identify tokens within `input` * @param default_id The token id to be used for tokens not found in the `vocabulary`; * Default is -1 * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Lists column of token ids */ std::unique_ptr<cudf::column> tokenize_with_vocabulary( cudf::strings_column_view const& input, tokenize_vocabulary const& vocabulary, cudf::string_scalar const& delimiter, cudf::size_type default_id = -1, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of tokenize group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/generate_ngrams.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_ngrams * @{ * @file */ /** * @brief Returns a single column of strings by generating ngrams from * a strings column. * * An ngram is a grouping of 2 or more strings with a separator. For example, * generating bigrams groups all adjacent pairs of strings. * * ``` * ["a", "bb", "ccc"] would generate bigrams as ["a_bb", "bb_ccc"] * and trigrams as ["a_bb_ccc"] * ``` * * The size of the output column will be the total number of ngrams generated from * the input strings column. * * All null row entries are ignored and the output contains all valid rows. * * @throw cudf::logic_error if `ngrams < 2` * @throw cudf::logic_error if `separator` is invalid * @throw cudf::logic_error if there are not enough strings to generate any ngrams * * @param input Strings column to tokenize and produce ngrams from * @param ngrams The ngram number to generate * @param separator The string to use for separating ngram tokens * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> generate_ngrams( cudf::strings_column_view const& input, cudf::size_type ngrams, cudf::string_scalar const& separator, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Generates ngrams of characters within each string. * * Each character of a string used to build ngrams. * Ngrams are not created across strings. * * ``` * ["ab", "cde", "fgh"] would generate bigrams as ["ab", "cd", "de", "fg", "gh"] * ``` * * The size of the output column will be the total number of ngrams generated from * the input strings column. * * All null row entries are ignored and the output contains all valid rows. * * @throw cudf::logic_error if `ngrams < 2` * @throw cudf::logic_error if there are not enough characters to generate any ngrams * * @param input Strings column to produce ngrams from * @param ngrams The ngram number to generate. * Default is 2 = bigram. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of tokens */ std::unique_ptr<cudf::column> generate_character_ngrams( cudf::strings_column_view const& input, cudf::size_type ngrams = 2, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Hashes ngrams of characters within each string * * Each character of a string used to build the ngrams and ngrams are not * produced across adjacent strings rows. * * ``` * "abcdefg" would generate ngrams=5 as ["abcde", "bcdef" "cdefg"] * ``` * * The ngrams for each string are hashed and returned in a list column where * the offsets specify rows of hash values for each string. * * The size of the child column will be the total number of ngrams generated from * the input strings column. * * All null row entries are ignored and the output contains all valid rows. * * The hash algorithm uses MurmurHash32 on each ngram. * * @throw cudf::logic_error if `ngrams < 2` * @throw cudf::logic_error if there are not enough characters to generate any ngrams * * @param input Strings column to produce ngrams from * @param ngrams The ngram number to generate. Default is 5. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory. * @return A lists column of hash values */ std::unique_ptr<cudf::column> hash_character_ngrams( cudf::strings_column_view const& input, cudf::size_type ngrams = 5, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/normalize.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/strings/strings_column_view.hpp> //! NVText APIs namespace nvtext { /** * @addtogroup nvtext_normalize * @{ * @file */ /** * @brief Returns a new strings column by normalizing the whitespace in each * string in the input column. * * Normalizing a string replaces any number of whitespace character * (character code-point <= ' ') runs with a single space ' ' and * trims whitespace from the beginning and end of the string. * * @code{.pseudo} * Example: * s = ["a b", " c d\n", "e \t f "] * t = normalize_spaces(s) * t is now ["a b","c d","e f"] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * @param input Strings column to normalize * @param mr Device memory resource used to allocate the returned column's device memory * @param stream CUDA stream used for device memory operations and kernel launches * @return New strings columns of normalized strings. */ std::unique_ptr<cudf::column> normalize_spaces( cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Normalizes strings characters for tokenizing. * * This uses the normalizer that is built into the nvtext::subword_tokenize function * which includes: * * - adding padding around punctuation (unicode category starts with "P") * as well as certain ASCII symbols like "^" and "$" * - adding padding around the [CJK Unicode block * characters](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)) * - changing whitespace (e.g. `"\t", "\n", "\r"`) to just space `" "` * - removing control characters (unicode categories "Cc" and "Cf") * * The padding process here adds a single space before and after the character. * Details on _unicode category_ can be found here: * https://unicodebook.readthedocs.io/unicode.html#categories * * If `do_lower_case = true`, lower-casing also removes the accents. The * accents cannot be removed from upper-case characters without lower-casing * and lower-casing cannot be performed without also removing accents. * However, if the accented character is already lower-case, then only the * accent is removed. * * @code{.pseudo} * s = ["éâîô\teaio", "ĂĆĖÑÜ", "ACENU", "$24.08", "[a,bb]"] * s1 = normalize_characters(s,true) * s1 is now ["eaio eaio", "acenu", "acenu", " $ 24 . 08", " [ a , bb ] "] * s2 = normalize_characters(s,false) * s2 is now ["éâîô eaio", "ĂĆĖÑÜ", "ACENU", " $ 24 . 08", " [ a , bb ] "] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * This function requires about 16x the number of character bytes in the input * strings column as working memory. * * @param input The input strings to normalize * @param do_lower_case If true, upper-case characters are converted to * lower-case and accents are stripped from those characters. * If false, accented and upper-case characters are not transformed. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Memory resource to allocate any returned objects * @return Normalized strings column */ std::unique_ptr<cudf::column> normalize_characters( cudf::strings_column_view const& input, bool do_lower_case, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/replace.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> //! NVText APIs namespace nvtext { /** * @addtogroup nvtext_replace * @{ * @file */ /** * @brief Replaces specified tokens with corresponding replacement strings. * * Tokens are identified in each string and if any match the specified `targets` * strings, they are replaced with corresponding `replacements` string such that * if `targets[i]` is found, then it is replaced by `replacements[i]`. * * The `delimiter` may be zero or more characters. If the `delimiter` is empty, * whitespace (character code-point <= ' ') is used for identifying tokens. * Also, any consecutive delimiters found in a string are ignored. * * @code{.pseudo} * Example: * s = ["this is me", "theme music"] * tgt = ["is", "me"] * rpl = ["+", "_"] * result = replace_tokens(s,tgt,rpl) * result is now ["this + _", "theme music"] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * An empty string is allowed for a replacement string but the delimiters * will not be removed. * * @code{.pseudo} * Example: * s = ["this is me", "theme music"] * tgt = ["me", "this"] * rpl = ["", ""] * result = replace_tokens(s,tgt,rpl) * result is now [" is ", "theme music"] * @endcode * * Note the first string in `result` still retains the space delimiters. * * The `replacements.size()` must equal `targets.size()` unless `replacements.size()==1`. * In this case, all matching `targets` strings will be replaced with the * single `replacements[0]` string. * * @throw cudf::logic_error if `targets.size() != replacements.size()` and * if `replacements.size() != 1` * @throw cudf::logic_error if targets or replacements contain nulls * @throw cudf::logic_error if delimiter is invalid * * @param input Strings column to replace * @param targets Strings to compare against tokens found in `input` * @param replacements Replacement strings for each string in `targets` * @param delimiter Characters used to separate each string into tokens. * The default of empty string will identify tokens using whitespace. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of with replaced strings */ std::unique_ptr<cudf::column> replace_tokens( cudf::strings_column_view const& input, cudf::strings_column_view const& targets, cudf::strings_column_view const& replacements, cudf::string_scalar const& delimiter = cudf::string_scalar{""}, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Removes tokens whose lengths are less than a specified number of characters. * * Tokens identified in each string are removed from the corresponding output string. * The removed tokens can be replaced by specifying a `replacement` string as well. * * The `delimiter` may be zero or more characters. If the `delimiter` is empty, * whitespace (character code-point <= ' ') is used for identifying tokens. * Also, any consecutive delimiters found in a string are ignored. * * @code{.pseudo} * Example: * s = ["this is me", "theme music"] * result = filter_tokens(s,3) * result is now ["this ", "theme music"] * @endcode * * Note the first string in `result` still retains the space delimiters. * * Example with a `replacement` string. * * @code{.pseudo} * Example: * s = ["this is me", "theme music"] * result = filter_tokens(s,5,"---") * result is now ["--- --- ---", "theme music"] * @endcode * * The `replacement` string is allowed to be shorter than min_token_length. * * @throw cudf::logic_error if `delimiter` or `replacement` is invalid * * @param input Strings column to replace * @param min_token_length The minimum number of characters to retain a token in the output string * @param replacement Optional replacement string to be used in place of removed tokens * @param delimiter Characters used to separate each string into tokens. * The default of empty string will identify tokens using whitespace. * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of with replaced strings */ std::unique_ptr<cudf::column> filter_tokens( cudf::strings_column_view const& input, cudf::size_type min_token_length, cudf::string_scalar const& replacement = cudf::string_scalar{""}, cudf::string_scalar const& delimiter = cudf::string_scalar{""}, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/stemmer.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_stemmer * @{ * @file */ /** * @brief Used for specifying letter type to check. */ enum class letter_type { CONSONANT, ///< Letter is a consonant VOWEL ///< Letter is not a consonant }; /** * @brief Returns boolean column indicating if `character_index` of the input strings * is a consonant or vowel. * * Determining consonants and vowels is described in the following * paper: https://tartarus.org/martin/PorterStemmer/def.txt * * Each string in the input column is expected to contain a single, lower-cased * word (or subword) with no punctuation and no whitespace otherwise the * measure value for that string is undefined. * * Also, the algorithm only works with English words. * * @code{.pseudo} * Example: * st = ["trouble", "toy", "sygyzy"] * b1 = is_letter(st, VOWEL, 1) * b1 is now [false, true, true] * @endcode * * A negative index value will check the character starting from the end * of each string. That is, for `character_index < 0` the letter checked for string * `input[i]` is at position `input[i].length + index`. * * @code{.pseudo} * Example: * st = ["trouble", "toy", "sygyzy"] * b2 = is_letter(st, CONSONANT, -1) // last letter checked in each string * b2 is now [false, true, false] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * @param input Strings column of words to measure * @param ltype Specify letter type to check * @param character_index The character position to check in each string * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New BOOL column */ std::unique_ptr<cudf::column> is_letter( cudf::strings_column_view const& input, letter_type ltype, cudf::size_type character_index, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns boolean column indicating if character at `indices[i]` of `input[i]` * is a consonant or vowel. * * Determining consonants and vowels is described in the following * paper: https://tartarus.org/martin/PorterStemmer/def.txt * * Each string in the input column is expected to contain a single, lower-cased * word (or subword) with no punctuation and no whitespace otherwise the * measure value for that string is undefined. * * Also, the algorithm only works with English words. * * @code{.pseudo} * Example: * st = ["trouble", "toy", "sygyzy"] * ix = [3, 1, 4] * b1 = is_letter(st, VOWEL, ix) * b1 is now [true, true, false] * @endcode * * A negative index value will check the character starting from the end * of each string. That is, for `character_index < 0` the letter checked for string * `strings[i]` is at position `strings[i].length + indices[i]`. * * @code{.pseudo} * Example: * st = ["trouble", "toy", "sygyzy"] * ix = [3, -2, 4] // 2nd to last character in st[1] is checked * b2 = is_letter(st, CONSONANT, ix) * b2 is now [false, false, true] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * @throw cudf::logic_error if `indices.size() != input.size()` * @throw cudf::logic_error if `indices` contain nulls. * * @param input Strings column of words to measure * @param ltype Specify letter type to check * @param indices The character positions to check in each string * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New BOOL column */ std::unique_ptr<cudf::column> is_letter( cudf::strings_column_view const& input, letter_type ltype, cudf::column_view const& indices, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Returns the Porter Stemmer measurements of a strings column. * * Porter stemming is used to normalize words by removing plural and tense endings * from words in English. The stemming measurement involves counting consonant/vowel * patterns within a string. * Reference paper: https://tartarus.org/martin/PorterStemmer/def.txt * * Each string in the input column is expected to contain a single, lower-cased * word (or subword) with no punctuation and no whitespace otherwise the * measure value for that string is undefined. * * Also, the algorithm only works with English words. * * @code{.pseudo} * Example: * st = ["tr", "troubles", "trouble"] * m = porter_stemmer_measure(st) * m is now [0,2,1] * @endcode * * A null input element at row `i` produces a corresponding null entry * for row `i` in the output column. * * @param input Strings column of words to measure * @param mr Device memory resource used to allocate the returned column's device memory * @param stream CUDA stream used for device memory operations and kernel launches * @return New INT32 column of measure values */ std::unique_ptr<cudf::column> porter_stemmer_measure( cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/jaccard.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/strings/strings_column_view.hpp> namespace nvtext { /** * @addtogroup nvtext_jaccard * @{ * @file */ /** * @brief Computes the Jaccard similarity between individual rows * in two strings columns * * The similarity is calculated between strings in corresponding rows * such that `output[row] = J(input1[row],input2[row])`. * * The Jaccard index formula is https://en.wikipedia.org/wiki/Jaccard_index * ``` * J = |A ∩ B| / |A ∪ B| * where |A ∩ B| is number of common values between A and B * and |x| is the number of unique values in x. * ``` * * The computation here compares strings columns by treating each string as text (i.e. sentences, * paragraphs, articles) instead of individual words or tokens to be compared directly. The * algorithm applies a sliding window (size specified by the `width` parameter) to each string to * form the set of tokens to compare within each row of the two input columns. * * These substrings are essentially character ngrams and used as part of the union and intersect * calculations for that row. For efficiency, the substrings are hashed using the default * MurmurHash32 to identify uniqueness within each row. Once the union and intersect sizes for the * row are resolved, the Jaccard index is computed using the above formula and returned as a float32 * value. * * @code{.pseudo} * input1 = ["the fuzzy dog", "little piggy", "funny bunny", "chatty parrot"] * input2 = ["the fuzzy cat", "bitty piggy", "funny bunny", "silent partner"] * r = jaccard_index(input1, input2) * r is now [0.5, 0.15384616, 1.0, 0] * @endcode * * If either input column's row is null, the output for that row will also be null. * * @throw std::invalid_argument if the `width < 2` or `input1.size() != input2.size()` * * @param input1 Strings column to compare with `input2` * @param input2 Strings column to compare with `input1` * @param width The character width used for apply substrings * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return Index calculation values */ std::unique_ptr<cudf::column> jaccard_index( cudf::strings_column_view const& input1, cudf::strings_column_view const& input2, cudf::size_type width, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/edit_distance.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> //! NVText APIs namespace nvtext { /** * @addtogroup nvtext_edit_distance * @{ * @file */ /** * @brief Compute the edit distance between individual strings in two strings columns. * * The `output[i]` is the edit distance between `input[i]` and `targets[i]`. * This edit distance calculation uses the Levenshtein algorithm as documented here: * https://www.cuelogic.com/blog/the-levenshtein-algorithm * * @code{.pseudo} * Example: * s = ["hello", "", "world"] * t = ["hallo", "goodbye", "world"] * d = edit_distance(s, t) * d is now [1, 7, 0] * @endcode * * Any null entries for either `input` or `targets` is ignored and the edit distance * is computed as though the null entry is an empty string. * * The `targets.size()` must equal `input.size()` unless `targets.size()==1`. * In this case, all `input` will be computed against the single `targets[0]` string. * * @throw cudf::logic_error if `targets.size() != input.size()` and * if `targets.size() != 1` * * @param input Strings column of input strings * @param targets Strings to compute edit distance against `input` * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New strings columns of with replaced strings */ std::unique_ptr<cudf::column> edit_distance( cudf::strings_column_view const& input, cudf::strings_column_view const& targets, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Compute the edit distance between all the strings in the input column. * * This uses the Levenshtein algorithm to calculate the edit distance between * two strings as documented here: https://www.cuelogic.com/blog/the-levenshtein-algorithm * * The output is essentially a `input.size() x input.size()` square matrix of integers. * All values at diagonal `row == col` are 0 since the edit distance between two identical * strings is zero. All values above the diagonal are reflected below since the edit distance * calculation is also commutative. * * @code{.pseudo} * Example: * s = ["hello", "hallo", "hella"] * d = edit_distance_matrix(s) * d is now [[0, 1, 1], * [1, 0, 2] * [1, 2, 0]] * @endcode * * Null entries for `input` are ignored and the edit distance * is computed as though the null entry is an empty string. * * The output is a lists column of size `input.size()` and where each list item * is `input.size()` elements. * * @throw cudf::logic_error if `strings.size() == 1` * * @param input Strings column of input strings * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Device memory resource used to allocate the returned column's device memory * @return New lists column of edit distance values */ std::unique_ptr<cudf::column> edit_distance_matrix( cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/nvtext/byte_pair_encoding.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/default_stream.hpp> namespace nvtext { /** * @addtogroup nvtext_tokenize * @{ * @file */ /** * @brief The table of merge pairs for the BPE encoder. * * To create an instance, call @ref nvtext::load_merge_pairs */ struct bpe_merge_pairs { struct bpe_merge_pairs_impl; bpe_merge_pairs_impl* impl{}; ///< Implementation of the BPE merge pairs table. /** * @brief Construct a new bpe merge pairs object * * @param input The input file containing the BPE merge pairs * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the device memory */ bpe_merge_pairs(std::unique_ptr<cudf::column>&& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Construct a new bpe merge pairs object * * @param input The input column of strings * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource used to allocate the device memory */ bpe_merge_pairs(cudf::strings_column_view const& input, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); ~bpe_merge_pairs(); bpe_merge_pairs(); }; /** * @brief Create a nvtext::bpe_merge_pairs from an input file. * * @deprecated Since 23.12 * * The file should contain a pair of strings per line separated by * a single space. * * Example: * @code{.txt} * e n * i t * i s * e s * en t * c e * es t * en ce * T h * Th is * t est * s ent * ... * @endcode * * The pairs are expected to be ordered in the file by their rank * relative to each other. A pair earlier in the file has priority over * any pairs below it. * * @param filename_merges Local file path of pairs encoded in UTF-8. * @param mr Memory resource to allocate any returned objects. * @return A nvtext::bpe_merge_pairs object */ [[deprecated]] std::unique_ptr<bpe_merge_pairs> load_merge_pairs_file( std::string const& filename_merges, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Create a nvtext::bpe_merge_pairs from a strings column * * The input column should contain a unique pair of strings per line separated by * a single space. An incorrect format or non-unique entries will result in * undefined behavior. * * Example: * @code{.pseudo} * merge_pairs = ["e n", "i t", "i s", "e s", "en t", "c e", "es t", "en ce", "t est", "s ent"] * mps = load_merge_pairs(merge_pairs) * // the mps object can be passed to the byte_pair_encoding API * @endcode * * The pairs are expected to be ordered in the file by their rank * relative to each other. A pair earlier in the file has priority over * any pairs below it. * * @throw cudf::logic_error if `merge_pairs` is empty or contains nulls * * @param merge_pairs Column containing the unique merge pairs * @param stream CUDA stream used for device memory operations and kernel launches * @param mr Memory resource to allocate any returned objects * @return A nvtext::bpe_merge_pairs object */ std::unique_ptr<bpe_merge_pairs> load_merge_pairs( cudf::strings_column_view const& merge_pairs, rmm::cuda_stream_view stream = cudf::get_default_stream(), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** * @brief Byte pair encode the input strings. * * This will split each string on whitespace, perform the encoding, * and then build the output column using the given `separator`. * * The encoding algorithm rebuilds each string by matching substrings * in the `merge_pairs` table and iteratively removing the minimum ranked pair * until no pairs are left. Then, a space is inserted between the remaining * pairs before the result is joined to make the output string. * * @code{.pseudo} * merge_pairs = ["e n", "i t", "i s", "e s", "en t", "c e", "es t", "en ce", "t est", "s ent"] * mps = load_merge_pairs(merge_pairs) * input = ["test sentence", "thisis test"] * result = byte_pair_encoding(input, mps) * result is now ["test sent ence", "this is test"] * @endcode * * @throw cudf::logic_error if `merge_pairs` is empty * @throw cudf::logic_error if `separator` is invalid * * @param input Strings to encode. * @param merges_pairs Created by a call to @ref nvtext::load_merge_pairs. * @param separator String used to build the output after encoding. * Default is a space. * @param mr Memory resource to allocate any returned objects. * @return An encoded column of strings. */ std::unique_ptr<cudf::column> byte_pair_encoding( cudf::strings_column_view const& input, bpe_merge_pairs const& merges_pairs, cudf::string_scalar const& separator = cudf::string_scalar(" "), rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); /** @} */ // end of group } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include/nvtext
rapidsai_public_repos/cudf/cpp/include/nvtext/detail/tokenize.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> namespace nvtext { namespace detail { /** * @copydoc nvtext::tokenize(strings_column_view const&,string_scalar * const&,rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for device memory operations and kernel launches */ std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& strings, cudf::string_scalar const& delimiter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @copydoc nvtext::tokenize(strings_column_view const&,strings_column_view * const&,rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for device memory operations and kernel launches */ std::unique_ptr<cudf::column> tokenize(cudf::strings_column_view const& strings, cudf::strings_column_view const& delimiters, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @copydoc nvtext::count_tokens(strings_column_view const&, string_scalar * const&,rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for device memory operations and kernel launches */ std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& strings, cudf::string_scalar const& delimiter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); /** * @copydoc nvtext::count_tokens(strings_column_view const&,strings_column_view * const&,rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for device memory operations and kernel launches */ std::unique_ptr<cudf::column> count_tokens(cudf::strings_column_view const& strings, cudf::strings_column_view const& delimiters, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include/nvtext
rapidsai_public_repos/cudf/cpp/include/nvtext/detail/generate_ngrams.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <nvtext/generate_ngrams.hpp> #include <rmm/cuda_stream_view.hpp> namespace nvtext { namespace detail { /** * @copydoc hash_character_ngrams(cudf::strings_column_view const&, * cudf::size_type, rmm::mr::device_memory_resource*) * * @param stream CUDA stream used for allocating/copying device memory and launching kernels */ std::unique_ptr<cudf::column> hash_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include/nvtext
rapidsai_public_repos/cudf/cpp/include/nvtext/detail/load_hash_file.hpp
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <nvtext/subword_tokenize.hpp> #include <cudf/column/column.hpp> #include <rmm/cuda_stream_view.hpp> #include <cstdint> #include <cstring> namespace nvtext { namespace detail { /** * @brief Load the hashed vocabulary file into device memory. * * The object here can be used to call the subword_tokenize without * incurring the cost of loading the same file each time. * * @param filename_hashed_vocabulary A path to the preprocessed vocab.txt file. * Note that this is the file AFTER python/perfect_hash.py has been used * for preprocessing. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Memory resource to allocate any returned objects. * @return vocabulary hash-table elements */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr); } // namespace detail } // namespace nvtext
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/column_utilities.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <thrust/host_vector.h> #include <thrust/iterator/transform_iterator.h> namespace cudf::test { /** * @brief Verbosity level of output from column and table comparison functions. */ enum class debug_output_level { FIRST_ERROR = 0, // print first error only ALL_ERRORS, // print all errors QUIET // no debug output }; constexpr size_type default_ulp = 4; namespace detail { /** * @brief Verifies the property equality of two columns. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUAL` * instead. * * @param lhs The first column * @param rhs The second column * @param verbosity Level of debug output verbosity * * @returns True if the column properties are equal, false otherwise */ bool expect_column_properties_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity = debug_output_level::FIRST_ERROR); /** * @brief Verifies the property equivalence of two columns. * * If the columns don't have nulls, then the nullability equality is relaxed. * i.e. the two columns are considered equivalent even if one has a null mask * and the other doesn't. * * @note This function should not be used directly. Use * `CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUIVALENT` instead. * * @param lhs The first column * @param rhs The second column * @param verbosity Level of debug output verbosity * * @returns True if the column properties are equivalent, false otherwise */ bool expect_column_properties_equivalent( cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity = debug_output_level::FIRST_ERROR); /** * @brief Verifies the element-wise equality of two columns. * * Treats null elements as equivalent. * * @note This function should not be used directly. Use * `CUDF_TEST_EXPECT_COLUMNS_EQUAL` instead. * * @param lhs The first column * @param rhs The second column * @param verbosity Level of debug output verbosity * * @returns True if the columns (and their properties) are equal, false otherwise */ bool expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity = debug_output_level::FIRST_ERROR); /** * @brief Verifies the element-wise equivalence of two columns. * * Uses machine epsilon to compare floating point types. * Treats null elements as equivalent. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT` * instead. * * @param lhs The first column * @param rhs The second column * @param verbosity Level of debug output verbosity * @param fp_ulps # of ulps of tolerance to allow when comparing * floating point values * * @returns True if the columns (and their properties) are equivalent, false otherwise */ bool expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity = debug_output_level::FIRST_ERROR, size_type fp_ulps = cudf::test::default_ulp); /** * @brief Verifies the bitwise equality of two device memory buffers. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_EQUAL_BUFFERS` instead. * * @param lhs The first buffer * @param rhs The second buffer * @param size_bytes The number of bytes to check for equality */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes); } // namespace detail /** * @brief Verifies the given column is empty * * @param col The column to check */ void expect_column_empty(cudf::column_view const& col); /** * @brief Copy the null bitmask from a column view to a host vector * * @param c The column view * @returns Vector of bitmask_type elements */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c); /** * @brief Validates bitmask situated in host as per `number_of_elements` * * This takes care of padded bits * * @param expected_mask A vector representing expected mask * @param got_mask A vector representing mask obtained from column * @param number_of_elements number of elements the mask represent * * @returns true if both vector match till the `number_of_elements` */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask_begin, size_type number_of_elements); /** * @brief Copies the data and bitmask of a `column_view` to the host. * * @tparam T The data type of the elements of the `column_view` * @param c the `column_view` to copy from * @return std::pair<thrust::host_vector<T>, std::vector<bitmask_type>> first is the * `column_view`'s data, and second is the column's bitmask. */ template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::pair<thrust::host_vector<T>, std::vector<bitmask_type>> to_host(column_view c) { thrust::host_vector<T> host_data(c.size()); CUDF_CUDA_TRY(cudaMemcpy(host_data.data(), c.data<T>(), c.size() * sizeof(T), cudaMemcpyDefault)); return {host_data, bitmask_to_host(c)}; } /** * @brief Copies the data and bitmask of a `column_view` to the host. * * This is the specialization for `fixed_point` that performs construction of a `fixed_point` from * the underlying `rep` type that is stored on the device. * * @tparam T The data type of the elements of the `column_view` * @param c the `column_view` to copy from * @return std::pair<thrust::host_vector<T>, std::vector<bitmask_type>> first is the * `column_view`'s data, and second is the column's bitmask. */ template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::pair<thrust::host_vector<T>, std::vector<bitmask_type>> to_host(column_view c) { using namespace numeric; using Rep = typename T::rep; auto host_rep_types = thrust::host_vector<Rep>(c.size()); CUDF_CUDA_TRY( cudaMemcpy(host_rep_types.data(), c.begin<Rep>(), c.size() * sizeof(Rep), cudaMemcpyDefault)); auto to_fp = [&](Rep val) { return T{scaled_integer<Rep>{val, scale_type{c.type().scale()}}}; }; auto begin = thrust::make_transform_iterator(std::cbegin(host_rep_types), to_fp); auto const host_fixed_points = thrust::host_vector<T>(begin, begin + c.size()); return {host_fixed_points, bitmask_to_host(c)}; } /** * @brief Copies the data and bitmask of a `column_view` of strings * column to the host. * * @throw cudf::logic_error if c is not strings column. * * @param c the `column_view` of strings to copy from * @return std::pair first is `std::vector` of `std::string` * and second is the column's bitmask. */ template <> inline std::pair<thrust::host_vector<std::string>, std::vector<bitmask_type>> to_host(column_view c) { thrust::host_vector<std::string> host_data(c.size()); if (c.size() > c.null_count()) { auto const scv = strings_column_view(c); auto const h_chars = cudf::detail::make_std_vector_sync<char>( cudf::device_span<char const>(scv.chars().data<char>(), scv.chars().size()), cudf::get_default_stream()); auto const h_offsets = cudf::detail::make_std_vector_sync( cudf::device_span<cudf::size_type const>(scv.offsets().data<cudf::size_type>() + scv.offset(), scv.size() + 1), cudf::get_default_stream()); // build std::string vector from chars and offsets std::transform( std::begin(h_offsets), std::end(h_offsets) - 1, std::begin(h_offsets) + 1, host_data.begin(), [&](auto start, auto end) { return std::string(h_chars.data() + start, end - start); }); } return {std::move(host_data), bitmask_to_host(c)}; } } // namespace cudf::test // Macros for showing line of failure. #define CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUAL(lhs, rhs) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_column_properties_equal(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUIVALENT(lhs, rhs) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_column_properties_equivalent(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_COLUMNS_EQUAL(lhs, rhs...) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_columns_equal(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(lhs, rhs...) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_columns_equivalent(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_EQUAL_BUFFERS(lhs, rhs, size_bytes) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_equal_buffers(lhs, rhs, size_bytes); \ } while (0)
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/cudf_gtest.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifdef GTEST_INCLUDE_GTEST_GTEST_H_ #error "Don't include gtest/gtest.h directly, include cudf_gtest.hpp instead" #endif /** * @file cudf_gtest.hpp * @brief Work around for GTests( <=v1.10 ) emulation of variadic templates in * @verbatim ::Testing::Types @endverbatim * * @note Instead of including `gtest/gtest.h`, all libcudf test files should * include `cudf_gtest.hpp` instead. * * Removes the 50 type limit in a type-parameterized test list. * * Uses macros to rename GTests's emulated variadic template types and then * redefines them properly. */ // @cond #if __has_include(<gtest/internal/gtest-type-util.h.pump>) // gtest doesn't provide a version header so we need to // use a file existence trick. // gtest-type-util.h.pump only exists in versions < 1.11 #define Types Types_NOT_USED #define Types0 Types0_NOT_USED #define TypeList TypeList_NOT_USED #define Templates Templates_NOT_USED #define Templates0 Templates0_NOT_USED #include <gtest/internal/gtest-type-util.h> #undef Types #undef Types0 #undef TypeList #undef Templates #undef Templates0 namespace testing { template <class... TYPES> struct Types { using type = Types; }; template <class T, class... TYPES> struct Types<T, TYPES...> { using Head = T; using Tail = Types<TYPES...>; using type = Types; }; namespace internal { using Types0 = Types<>; template <GTEST_TEMPLATE_... TYPES> struct Templates {}; template <GTEST_TEMPLATE_ HEAD, GTEST_TEMPLATE_... TAIL> struct Templates<HEAD, TAIL...> { using Head = internal::TemplateSel<HEAD>; using Tail = Templates<TAIL...>; using type = Templates; }; using Templates0 = Templates<>; template <typename T> struct TypeList { using type = Types<T>; }; template <class... TYPES> struct TypeList<Types<TYPES...>> { using type = Types<TYPES...>; }; } // namespace internal } // namespace testing #endif // gtest < 1.11 // @endcond #include <gmock/gmock.h> #include <gtest/gtest.h> /** * @brief test macro to be expects `expr` to return cudaSuccess * * This will stop the test process on failure. * * @param expr expression to be tested */ #define ASSERT_CUDA_SUCCEEDED(expr) ASSERT_EQ(cudaSuccess, expr) /** * @brief test macro to be expects `expr` to return cudaSuccess * * @param expr expression to be tested */ #define EXPECT_CUDA_SUCCEEDED(expr) EXPECT_EQ(cudaSuccess, expr) /** * @brief test macro to be expected as no exception. * * The testing is same with EXPECT_NO_THROW() in gtest. * It also outputs captured error message, useful for debugging. * * @param statement The statement to be tested */ #define CUDF_EXPECT_NO_THROW(statement) \ try { \ statement; \ } catch (std::exception & e) { \ FAIL() << "statement:" << #statement << std::endl << "reason: " << e.what() << std::endl; \ } /** * @brief test macro comparing for equality of \p lhs and \p rhs for the first \p size elements. */ #define CUDF_TEST_EXPECT_VECTOR_EQUAL(lhs, rhs, size) \ do { \ for (decltype(size) i = 0; i < size; i++) \ EXPECT_EQ(lhs[i], rhs[i]) << "Mismatch at index #" << i; \ } while (0)
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/stream_checking_resource_adaptor.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf_test/default_stream.hpp> #include <cudf/detail/utilities/stacktrace.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <iostream> /** * @brief Resource that verifies that the default stream is not used in any allocation. * * @tparam Upstream Type of the upstream resource used for * allocation/deallocation. */ template <typename Upstream> class stream_checking_resource_adaptor final : public rmm::mr::device_memory_resource { public: /** * @brief Construct a new adaptor. * * @throws `cudf::logic_error` if `upstream == nullptr` * * @param upstream The resource used for allocating/deallocating device memory */ stream_checking_resource_adaptor(Upstream* upstream, bool error_on_invalid_stream, bool check_default_stream) : upstream_{upstream}, error_on_invalid_stream_{error_on_invalid_stream}, check_default_stream_{check_default_stream} { CUDF_EXPECTS(nullptr != upstream, "Unexpected null upstream resource pointer."); } stream_checking_resource_adaptor() = delete; ~stream_checking_resource_adaptor() override = default; stream_checking_resource_adaptor(stream_checking_resource_adaptor const&) = delete; stream_checking_resource_adaptor& operator=(stream_checking_resource_adaptor const&) = delete; stream_checking_resource_adaptor(stream_checking_resource_adaptor&&) noexcept = default; stream_checking_resource_adaptor& operator=(stream_checking_resource_adaptor&&) noexcept = default; /** * @brief Return pointer to the upstream resource. * * @return Pointer to the upstream resource. */ Upstream* get_upstream() const noexcept { return upstream_; } /** * @brief Checks whether the upstream resource supports streams. * * @return Whether or not the upstream resource supports streams */ bool supports_streams() const noexcept override { return upstream_->supports_streams(); } /** * @brief Query whether the resource supports the get_mem_info API. * * @return Whether or not the upstream resource supports get_mem_info */ bool supports_get_mem_info() const noexcept override { return upstream_->supports_get_mem_info(); } private: /** * @brief Allocates memory of size at least `bytes` using the upstream * resource as long as it fits inside the allocation limit. * * The returned pointer has at least 256B alignment. * * @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled * by the upstream resource. * @throws `cudf::logic_error` if attempted on a default stream * * @param bytes The size, in bytes, of the allocation * @param stream Stream on which to perform the allocation * @return Pointer to the newly allocated memory */ void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override { verify_stream(stream); return upstream_->allocate(bytes, stream); } /** * @brief Free allocation of size `bytes` pointed to by `ptr` * * @throws `cudf::logic_error` if attempted on a default stream * * @param ptr Pointer to be deallocated * @param bytes Size of the allocation * @param stream Stream on which to perform the deallocation */ void do_deallocate(void* ptr, std::size_t bytes, rmm::cuda_stream_view stream) override { verify_stream(stream); upstream_->deallocate(ptr, bytes, stream); } /** * @brief Compare the upstream resource to another. * * @param other The other resource to compare to * @return Whether or not the two resources are equivalent */ bool do_is_equal(device_memory_resource const& other) const noexcept override { if (this == &other) { return true; } auto cast = dynamic_cast<stream_checking_resource_adaptor<Upstream> const*>(&other); return cast != nullptr ? upstream_->is_equal(*cast->get_upstream()) : upstream_->is_equal(other); } /** * @brief Get free and available memory from upstream resource. * * @throws `rmm::cuda_error` if unable to retrieve memory info. * @throws `cudf::logic_error` if attempted on a default stream * * @param stream Stream on which to get the mem info. * @return std::pair with available and free memory for resource */ std::pair<std::size_t, std::size_t> do_get_mem_info(rmm::cuda_stream_view stream) const override { verify_stream(stream); return upstream_->get_mem_info(stream); } /** * @brief Throw an error if the provided stream is invalid. * * A stream is invalid if: * - check_default_stream_ is true and this function is passed one of CUDA's * default stream specifiers, or * - check_default_stream_ is false and this function is passed any stream * other than the result of cudf::test::get_default_stream(). * * @throws `std::runtime_error` if provided an invalid stream */ void verify_stream(rmm::cuda_stream_view const stream) const { auto cstream{stream.value()}; auto const invalid_stream = check_default_stream_ ? ((cstream == cudaStreamDefault) || (cstream == cudaStreamLegacy) || (cstream == cudaStreamPerThread)) : (cstream != cudf::test::get_default_stream().value()); if (invalid_stream) { // Exclude the current function from stacktrace. std::cout << cudf::detail::get_stacktrace(cudf::detail::capture_last_stackframe::NO) << std::endl; if (error_on_invalid_stream_) { throw std::runtime_error("Attempted to perform an operation on an unexpected stream!"); } else { std::cout << "Attempted to perform an operation on an unexpected stream!" << std::endl; } } } Upstream* upstream_; // the upstream resource used for satisfying allocation requests bool error_on_invalid_stream_; // If true, throw an exception when the wrong stream is detected. // If false, simply print to stdout. bool check_default_stream_; // If true, throw an exception when the default stream is observed. // If false, throw an exception when anything other than // cudf::test::get_default_stream() is observed. }; /** * @brief Convenience factory to return a `stream_checking_resource_adaptor` around the * upstream resource `upstream`. * * @tparam Upstream Type of the upstream `device_memory_resource`. * @param upstream Pointer to the upstream resource */ template <typename Upstream> stream_checking_resource_adaptor<Upstream> make_stream_checking_resource_adaptor( Upstream* upstream, bool error_on_invalid_stream, bool check_default_stream) { return stream_checking_resource_adaptor<Upstream>{ upstream, error_on_invalid_stream, check_default_stream}; }
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/io_metadata_utilities.hpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/io/types.hpp> namespace cudf::test { void expect_metadata_equal(cudf::io::table_input_metadata in_meta, cudf::io::table_metadata out_meta); /** * @brief Ensures that the metadata of two tables matches for the root columns as well as for all * descendents (recursively) */ void expect_metadata_equal(cudf::io::table_metadata lhs_meta, cudf::io::table_metadata rhs_meta); } // namespace cudf::test
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/file_utilities.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdio> #include <cstdlib> #include <filesystem> #include <string> #include <ftw.h> #include <cudf/utilities/error.hpp> /** * @brief RAII class for creating a temporary directory. * */ class temp_directory { std::string _path; public: /** * @brief Construct a new temp directory object * * @param base_name The base name of the temporary directory */ temp_directory(std::string const& base_name) { std::string dir_template{std::filesystem::temp_directory_path().string()}; if (auto env_p = std::getenv("WORKSPACE")) dir_template = env_p; dir_template += "/" + base_name + ".XXXXXX"; auto const tmpdirptr = mkdtemp(const_cast<char*>(dir_template.data())); CUDF_EXPECTS(tmpdirptr != nullptr, "Temporary directory creation failure: " + dir_template); _path = dir_template + "/"; } temp_directory& operator=(temp_directory const&) = delete; temp_directory(temp_directory const&) = delete; /** * @brief Move assignment operator * * @return Reference to this object */ temp_directory& operator=(temp_directory&&) = default; temp_directory(temp_directory&&) = default; ///< Move constructor ~temp_directory() { std::filesystem::remove_all(std::filesystem::path{_path}); } /** * @brief Returns the path of the temporary directory * * @return string path of the temporary directory */ [[nodiscard]] std::string const& path() const { return _path; } };
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/tdigest_utilities.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf_test/column_wrapper.hpp> #include <cudf/detail/tdigest/tdigest.hpp> #include <cudf/detail/unary.hpp> #include <cudf/groupby.hpp> #include <cudf/tdigest/tdigest_column_view.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/exec_policy.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/host_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> // for use with groupby and reduction aggregation tests. namespace cudf { namespace test { using expected_value = thrust::tuple<size_type, double, double>; /** * @brief Device functor to compute min of a sequence of values serially. */ template <typename T> struct column_min { /** * @brief Computes the min of a sequence of values serially. * * @param vals The sequence of values to compute the min of * @return The min value */ __device__ double operator()(device_span<T const> vals) { return static_cast<double>(*thrust::min_element(thrust::seq, vals.begin(), vals.end())); } }; /** * @brief Device functor to compute max of a sequence of values serially. */ template <typename T> struct column_max { /** * @brief Computes the max of a sequence of values serially. * * @param vals The sequence of values to compute the max of * @return The max value */ __device__ double operator()(device_span<T const> vals) { return static_cast<double>(*thrust::max_element(thrust::seq, vals.begin(), vals.end())); } }; /** * @brief Functor to generate a tdigest. */ struct tdigest_gen { // @cond template < typename T, typename Func, typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(Func op, column_view const& values, int delta) { return op(values, delta); } template < typename T, typename Func, typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(Func op, column_view const& values, int delta) { CUDF_FAIL("Invalid tdigest test type"); } // @endcond }; template <typename T> inline T frand() { return static_cast<T>(rand()) / static_cast<T>(RAND_MAX); } template <typename T> inline T rand_range(T min, T max) { return min + static_cast<T>(frand<T>() * (max - min)); } inline std::unique_ptr<column> generate_typed_percentile_distribution( std::vector<double> const& buckets, std::vector<int> const& sizes, data_type t, bool sorted = false) { srand(0); std::vector<double> values; size_t total_size = std::reduce(sizes.begin(), sizes.end(), 0); values.reserve(total_size); for (size_t idx = 0; idx < sizes.size(); idx++) { double min = idx == 0 ? 0.0f : buckets[idx - 1]; double max = buckets[idx]; for (int v_idx = 0; v_idx < sizes[idx]; v_idx++) { values.push_back(rand_range(min, max)); } } if (sorted) { std::sort(values.begin(), values.end()); } cudf::test::fixed_width_column_wrapper<double> src(values.begin(), values.end()); return cudf::cast(src, t); } // "standardized" means the parameters sent into generate_typed_percentile_distribution. the intent // is to provide a standardized set of inputs for use with tdigest generation tests and // percentile_approx tests. std::vector<double> // buckets{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}; std::vector<int> // sizes{50000, 50000, 50000, 50000, 50000, 100000, 100000, 100000, 100000, 100000}; inline std::unique_ptr<column> generate_standardized_percentile_distribution( data_type t = data_type{type_id::FLOAT64}, bool sorted = false) { std::vector<double> buckets{10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0, 90.0f, 100.0f}; std::vector<int> b_sizes{ 50000, 50000, 50000, 50000, 50000, 100000, 100000, 100000, 100000, 100000}; return generate_typed_percentile_distribution(buckets, b_sizes, t, sorted); } /** * @brief Compare a tdigest column against a sampling of expected values. */ void tdigest_sample_compare(cudf::tdigest::tdigest_column_view const& tdv, std::vector<expected_value> const& h_expected); /** * @brief Compare the min/max values of a tdigest against inputs. */ template <typename T> void tdigest_minmax_compare(cudf::tdigest::tdigest_column_view const& tdv, column_view const& input_values) { // verify min/max thrust::host_vector<device_span<T const>> h_spans; h_spans.push_back({input_values.begin<T>(), static_cast<size_t>(input_values.size())}); auto spans = cudf::detail::make_device_uvector_async( h_spans, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto expected_min = cudf::make_fixed_width_column( data_type{type_id::FLOAT64}, spans.size(), mask_state::UNALLOCATED); thrust::transform(rmm::exec_policy(cudf::get_default_stream()), spans.begin(), spans.end(), expected_min->mutable_view().template begin<double>(), column_min<T>{}); column_view result_min(data_type{type_id::FLOAT64}, tdv.size(), tdv.min_begin(), nullptr, 0); CUDF_TEST_EXPECT_COLUMNS_EQUAL(result_min, *expected_min); auto expected_max = cudf::make_fixed_width_column( data_type{type_id::FLOAT64}, spans.size(), mask_state::UNALLOCATED); thrust::transform(rmm::exec_policy(cudf::get_default_stream()), spans.begin(), spans.end(), expected_max->mutable_view().template begin<double>(), column_max<T>{}); column_view result_max(data_type{type_id::FLOAT64}, tdv.size(), tdv.max_begin(), nullptr, 0); CUDF_TEST_EXPECT_COLUMNS_EQUAL(result_max, *expected_max); } /// Expected values for tdigest tests struct expected_tdigest { // @cond column_view mean; column_view weight; double min, max; // @endcond }; /** * @brief Create an expected tdigest column given component inputs. */ std::unique_ptr<column> make_expected_tdigest_column(std::vector<expected_tdigest> const& groups); // shared test for groupby/reduction. template <typename T, typename Func> void tdigest_simple_aggregation(Func op) { // create a tdigest that has far fewer values in it than the delta value. this should result // in every value remaining uncompressed cudf::test::fixed_width_column_wrapper<T> values{126, 15, 1, 99, 67}; int const delta = 1000; auto result = cudf::type_dispatcher( static_cast<column_view>(values).type(), tdigest_gen{}, op, values, delta); cudf::test::fixed_width_column_wrapper<T> raw_mean({1, 15, 67, 99, 126}); cudf::test::fixed_width_column_wrapper<double> weight{1, 1, 1, 1, 1}; auto mean = cudf::cast(raw_mean, data_type{type_id::FLOAT64}); double const min = 1; double const max = 126; auto expected = make_expected_tdigest_column({{*mean, weight, static_cast<double>(static_cast<T>(min)), static_cast<double>(static_cast<T>(max))}}); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, *expected); } // shared test for groupby/reduction. template <typename T, typename Func> void tdigest_simple_with_nulls_aggregation(Func op) { // create a tdigest that has far fewer values in it than the delta value. this should result // in every value remaining uncompressed cudf::test::fixed_width_column_wrapper<T> values{{122, 15, 1, 99, 67, 101, 100, 84, 44, 2}, {1, 0, 1, 0, 1, 0, 1, 0, 1, 0}}; int const delta = 1000; auto result = cudf::type_dispatcher( static_cast<column_view>(values).type(), tdigest_gen{}, op, values, delta); cudf::test::fixed_width_column_wrapper<T> raw_mean({1, 44, 67, 100, 122}); cudf::test::fixed_width_column_wrapper<double> weight{1, 1, 1, 1, 1}; auto mean = cudf::cast(raw_mean, data_type{type_id::FLOAT64}); double const min = 1; double const max = 122; auto expected = make_expected_tdigest_column({{*mean, weight, static_cast<double>(static_cast<T>(min)), static_cast<double>(static_cast<T>(max))}}); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, *expected); } // shared test for groupby/reduction. template <typename T, typename Func> void tdigest_simple_all_nulls_aggregation(Func op) { // create a tdigest that has far fewer values in it than the delta value. this should result // in every value remaining uncompressed cudf::test::fixed_width_column_wrapper<T> values{{122, 15, 1, 99, 67, 101, 100, 84, 44, 2}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}; int const delta = 1000; auto result = cudf::type_dispatcher( static_cast<column_view>(values).type(), tdigest_gen{}, op, values, delta); // NOTE: an empty tdigest column still has 1 row. auto expected = cudf::tdigest::detail::make_empty_tdigest_column( cudf::get_default_stream(), rmm::mr::get_current_device_resource()); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*result, *expected); } // shared test for groupby/reduction. template <typename Func> void tdigest_simple_large_input_double_aggregation(Func op) { // these tests are being done explicitly because of the way we have to precompute the correct // answers. since the input values generated by the generate_distribution() function below are // cast to specific types -before- being sent into the aggregation, I can't (safely) just use the // expected values that you get when using doubles all the way through. so I have to pregenerate // the correct answers for each type by hand. so, we'll choose a reasonable subset (double, // decimal, int, bool) auto values = generate_standardized_percentile_distribution(data_type{type_id::FLOAT64}); // compare against a sample of known/expected values (which themselves were verified against the // Arrow implementation) // delta 1000 { int const delta = 1000; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0.00040692343794663995, 7}, {10, 0.16234555627091204477, 153}, {59, 5.12764811246045937310, 858}, {250, 62.54581814492237157310, 2356}, {368, 87.85834376680742252574, 1735}, {409, 94.07685720279611985006, 1272}, {491, 99.94197663121231300920, 130}, {500, 99.99969880795092080916, 2}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *values); } // delta 100 { int const delta = 100; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0.07265722021410986331, 739}, {7, 8.19766194442652640362, 10693}, {16, 36.82277869518204482802, 20276}, {29, 72.95424834129075009059, 22623}, {38, 90.61229683516096145013, 15581}, {46, 99.07283498858802772702, 5142}, {50, 99.99970905482754801596, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *values); } // delta 10 { int const delta = 10; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 7.15508346777729631327, 71618}, {1, 33.04971680740474226923, 187499}, {2, 62.50566666553867634093, 231762}, {3, 83.46216572053654658703, 187500}, {4, 96.42204425201593664951, 71620}, {5, 99.99970905482754801596, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *values); } } // shared test for groupby/reduction. template <typename Func> void tdigest_simple_large_input_int_aggregation(Func op) { // these tests are being done explicitly because of the way we have to precompute the correct // answers. since the input values generated by the generate_distribution() function below are // cast to specific types -before- being sent into the aggregation, I can't (safely) just use the // expected values that you get when using doubles all the way through. so I have to pregenerate // the correct answers for each type by hand. so, we'll choose a reasonable subset (double, // decimal, int, bool) auto values = generate_standardized_percentile_distribution(data_type{type_id::INT32}); // compare against a sample of known/expected values (which themselves were verified against the // Arrow implementation) // delta 1000 { int const delta = 1000; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0, 7}, {14, 0, 212}, {26, 0.83247422680412408447, 388}, {44, 2, 648}, {45, 2.42598187311178170589, 662}, {342, 82.75190258751908345403, 1971}, {383, 90, 1577}, {417, 94.88376068376066996279, 1170}, {418, 95, 1157}, {479, 99, 307}, {500, 99, 2}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<int>(tdv, *values); } // delta 100 { int const delta = 100; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0, 739}, {7, 7.71486018890863167741, 10693}, {16, 36.32491615703294485229, 20276}, {29, 72.44392874508245938614, 22623}, {38, 90.14209614273795523332, 15581}, {46, 98.64041229093737683797, 5142}, {50, 99, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<int>(tdv, *values); } // delta 10 { int const delta = 10; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 6.66025300902007799664, 71618}, {1, 32.54912826201739051157, 187499}, {2, 62.00734805533262772315, 231762}, {3, 82.96355733333332693746, 187500}, {4, 95.91280368612116546956, 71620}, {5, 99, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<int>(tdv, *values); } } // shared test for groupby/reduction. template <typename Func> void tdigest_simple_large_input_decimal_aggregation(Func op) { // these tests are being done explicitly because of the way we have to precompute the correct // answers. since the input values generated by the generate_distribution() function below are // cast to specific types -before- being sent into the aggregation, I can't (safely) just use the // expected values that you get when using doubles all the way through. so I have to pregenerate // the correct answers for each type by hand. so, we'll choose a reasonable subset (double, // decimal, int, bool) auto values = generate_standardized_percentile_distribution(data_type{type_id::DECIMAL32, -4}); auto cast_values = cudf::cast(*values, data_type{type_id::FLOAT64}); // compare against a sample of known/expected values (which themselves were verified against the // Arrow implementation) // delta 1000 { int const delta = 1000; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0.00035714285714285709, 7}, {10, 0.16229738562091505782, 153}, {59, 5.12759696969697031932, 858}, {250, 62.54576854838715860296, 2356}, {368, 87.85829446685879418055, 1735}, {409, 94.07680636792450457051, 1272}, {491, 99.94192461538463589932, 130}, {500, 99.99965000000000259206, 2}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *cast_values); } // delta 100 { int const delta = 100; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 0.07260811907983763525, 739}, {7, 8.19761183016926864298, 10693}, {16, 36.82272891595975750079, 20276}, {29, 72.95419827167043536065, 22623}, {38, 90.61224673640975879607, 15581}, {46, 99.07278498638662256326, 5142}, {50, 99.99970000000000425189, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *cast_values); } // delta 10 { int const delta = 10; auto result = cudf::type_dispatcher(values->view().type(), tdigest_gen{}, op, *values, delta); std::vector<expected_value> expected{{0, 7.15503361864335740705, 71618}, {1, 33.04966679715625588187, 187499}, {2, 62.50561666407782013266, 231762}, {3, 83.46211575573336460820, 187500}, {4, 96.42199425300195514410, 71620}, {5, 99.99970000000000425189, 1}}; cudf::tdigest::tdigest_column_view tdv(*result); tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *cast_values); } } // Note: there is no need to test different types here as the internals of a tdigest are always // the same regardless of input. template <typename Func, typename MergeFunc> void tdigest_merge_simple(Func op, MergeFunc merge_op) { auto values = generate_standardized_percentile_distribution(data_type{type_id::FLOAT64}); CUDF_EXPECTS(values->size() == 750000, "Unexpected distribution size"); auto split_values = cudf::split(*values, {250000, 500000}); int const delta = 1000; // generate separate digests std::vector<std::unique_ptr<column>> parts; auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + split_values.size(), std::back_inserter(parts), [&split_values, delta, op](int i) { return op(split_values[i], delta); }); std::vector<column_view> part_views; std::transform(parts.begin(), parts.end(), std::back_inserter(part_views), [](std::unique_ptr<column> const& col) { return col->view(); }); // merge delta = 1000 { int const merge_delta = 1000; // merge them auto merge_input = cudf::concatenate(part_views); auto result = merge_op(*merge_input, merge_delta); cudf::tdigest::tdigest_column_view tdv(*result); // verify centroids std::vector<expected_value> expected{{0, 0.00013945158577498588, 2}, {10, 0.04804393446447510763, 50}, {59, 1.68846964439246893797, 284}, {250, 33.36323141295877547918, 1479}, {368, 65.36307727957283475462, 2292}, {409, 73.95399208218296394080, 1784}, {490, 87.67566167909056673579, 1570}, {491, 87.83119717763385381204, 1570}, {500, 89.24891838334393412424, 1555}, {578, 95.87182997389099625707, 583}, {625, 98.20470345147104751504, 405}, {700, 99.96818381983835877236, 56}, {711, 99.99970905482754801596, 1}}; tdigest_sample_compare(tdv, expected); // verify min/max tdigest_minmax_compare<double>(tdv, *values); } } // shared test for groupby/reduction. template <typename MergeFunc> void tdigest_merge_empty(MergeFunc merge_op) { // 3 empty tdigests all in the same group auto a = cudf::tdigest::detail::make_empty_tdigest_column(cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto b = cudf::tdigest::detail::make_empty_tdigest_column(cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto c = cudf::tdigest::detail::make_empty_tdigest_column(cudf::get_default_stream(), rmm::mr::get_current_device_resource()); std::vector<column_view> cols; cols.push_back(*a); cols.push_back(*b); cols.push_back(*c); auto values = cudf::concatenate(cols); auto const delta = 1000; auto result = merge_op(*values, delta); auto expected = cudf::tdigest::detail::make_empty_tdigest_column( cudf::get_default_stream(), rmm::mr::get_current_device_resource()); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expected, *result); } } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/base_fixture.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <random> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/cxxopts.hpp> #include <cudf_test/default_stream.hpp> #include <cudf_test/file_utilities.hpp> #include <cudf_test/stream_checking_resource_adaptor.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/arena_memory_resource.hpp> #include <rmm/mr/device/binning_memory_resource.hpp> #include <rmm/mr/device/cuda_async_memory_resource.hpp> #include <rmm/mr/device/cuda_memory_resource.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <rmm/mr/device/owning_wrapper.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <rmm/mr/device/pool_memory_resource.hpp> namespace cudf { namespace test { /** * @brief Base test fixture class from which all libcudf tests should inherit. * * Example: * ``` * class MyTestFixture : public cudf::test::BaseFixture {}; * ``` */ class BaseFixture : public ::testing::Test { rmm::mr::device_memory_resource* _mr{rmm::mr::get_current_device_resource()}; public: /** * @brief Returns pointer to `device_memory_resource` that should be used for * all tests inheriting from this fixture * @return pointer to memory resource */ rmm::mr::device_memory_resource* mr() { return _mr; } }; /** * @brief Base test fixture that takes a parameter. * * Example: * ``` * class MyIntTestFixture : public cudf::test::BaseFixtureWithParam<int> {}; * ``` */ template <typename T> class BaseFixtureWithParam : public ::testing::TestWithParam<T> { rmm::mr::device_memory_resource* _mr{rmm::mr::get_current_device_resource()}; public: /** * @brief Returns pointer to `device_memory_resource` that should be used for * all tests inheriting from this fixture * @return pointer to memory resource */ rmm::mr::device_memory_resource* mr() const { return _mr; } }; template <typename T, typename Enable = void> struct uniform_distribution_impl {}; template <typename T> struct uniform_distribution_impl<T, std::enable_if_t<std::is_integral_v<T>>> { using type = std::uniform_int_distribution<T>; }; template <> struct uniform_distribution_impl<bool> { using type = std::bernoulli_distribution; }; template <typename T> struct uniform_distribution_impl<T, std::enable_if_t<std::is_floating_point_v<T>>> { using type = std::uniform_real_distribution<T>; }; template <typename T> struct uniform_distribution_impl< T, std::enable_if_t<cudf::is_chrono<T>() or cudf::is_fixed_point<T>()>> { using type = std::uniform_int_distribution<typename T::rep>; }; template <typename T> using uniform_distribution_t = typename uniform_distribution_impl<T>::type; namespace detail { /** * @brief Returns an incrementing seed value for use with UniformRandomGenerator. * * The intent behind this is to handle the following case: * * auto lhs = make_random_wrapped_column<TypeLhs>(10000); * auto rhs = make_random_wrapped_column<TypeRhs>(10000); * * Previously, the binops test framework had a persistent UniformRandomGenerator * that would produce unique values across two calls to make_random_wrapped_column() * like this. However that code has been changed and each call to make_random_wrapped_column() * now uses a local UniformRandomGenerator object. If we didn't generate an incrementing seed * for each one, every call to make_random_wrapped_column() would return the same values. This * fixes that case and also leaves results across multiple test runs deterministic. */ uint64_t random_generator_incrementing_seed(); } // namespace detail /** * @brief Provides uniform random number generation. * * It is often useful in testing to have a convenient source of random numbers. * This class is intended to serve as a base class for test fixtures to provide * random number generation. `UniformRandomGenerator::generate()` will generate * the next random number in the sequence. * * Example: * ```c++ * UniformRandomGenerator g(0,100); * g.generate(); // Returns a random number in the range [0,100] * ``` * * @tparam T The type of values that will be generated. */ template <typename T = cudf::size_type, typename Engine = std::default_random_engine> class UniformRandomGenerator { public: using uniform_distribution = uniform_distribution_t<T>; ///< The uniform distribution type for T. UniformRandomGenerator() : rng{std::mt19937_64{detail::random_generator_incrementing_seed()}()} {} /** * @brief Construct a new Uniform Random Generator to generate uniformly * random numbers in the range `[upper,lower]` * * @param lower Lower bound of the range * @param upper Upper bound of the desired range * @param seed seed to initialize generator with */ template <typename TL = T, std::enable_if_t<cudf::is_numeric<TL>() && !cudf::is_boolean<TL>()>* = nullptr> UniformRandomGenerator(T lower, T upper, uint64_t seed = detail::random_generator_incrementing_seed()) : dist{lower, upper}, rng{std::mt19937_64{seed}()} { } /** * @brief Construct a new Uniform Random Generator to generate uniformly random booleans * * @param lower ignored * @param upper ignored * @param seed seed to initialize generator with */ template <typename TL = T, std::enable_if_t<cudf::is_boolean<TL>()>* = nullptr> UniformRandomGenerator(T lower, T upper, uint64_t seed = detail::random_generator_incrementing_seed()) : dist{0.5}, rng{std::mt19937_64{seed}()} { } /** * @brief Construct a new Uniform Random Generator to generate uniformly * random numbers in the range `[upper,lower]` * * @param lower Lower bound of the range * @param upper Upper bound of the desired range * @param seed seed to initialize generator with */ template <typename TL = T, std::enable_if_t<cudf::is_chrono<TL>() or cudf::is_fixed_point<TL>()>* = nullptr> UniformRandomGenerator(typename TL::rep lower, typename TL::rep upper, uint64_t seed = detail::random_generator_incrementing_seed()) : dist{lower, upper}, rng{std::mt19937_64{seed}()} { } /** * @brief Returns the next random number. * * @return generated random number */ template <typename TL = T, std::enable_if_t<!cudf::is_timestamp<TL>()>* = nullptr> T generate() { return T{dist(rng)}; } /** * @brief Returns the next random number. * @return generated random number */ template <typename TL = T, std::enable_if_t<cudf::is_timestamp<TL>()>* = nullptr> T generate() { return T{typename T::duration{dist(rng)}}; } private: uniform_distribution dist{}; ///< Distribution Engine rng; ///< Random generator }; /** * @brief Provides temporary directory for temporary test files. * * Example: * ```c++ * ::testing::Environment* const temp_env = * ::testing::AddGlobalTestEnvironment(new TempDirTestEnvironment); * ``` */ class TempDirTestEnvironment : public ::testing::Environment { temp_directory const tmpdir{"gtest"}; public: /** * @brief Get directory path to use for temporary files * * @return std::string The temporary directory path */ std::string get_temp_dir() { return tmpdir.path(); } /** * @brief Get a temporary filepath to use for the specified filename * * @param filename name of the file to be placed in temporary directory. * @return std::string The temporary filepath */ std::string get_temp_filepath(std::string filename) { return tmpdir.path() + filename; } }; /// MR factory functions inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); } inline auto make_async() { return std::make_shared<rmm::mr::cuda_async_memory_resource>(); } inline auto make_managed() { return std::make_shared<rmm::mr::managed_memory_resource>(); } inline auto make_pool() { auto const [free, total] = rmm::detail::available_device_memory(); auto min_alloc = rmm::detail::align_down(std::min(free, total / 10), rmm::detail::CUDA_ALLOCATION_ALIGNMENT); return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda(), min_alloc); } inline auto make_arena() { return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda()); } inline auto make_binning() { auto pool = make_pool(); // Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB // Larger allocations will use the pool resource auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(pool, 18, 22); return mr; } /** * @brief Creates a memory resource for the unit test environment * given the name of the allocation mode. * * The returned resource instance must be kept alive for the duration of * the tests. Attaching the resource to a TestEnvironment causes * issues since the environment objects are not destroyed until * after the runtime is shutdown. * * @throw cudf::logic_error if the `allocation_mode` is unsupported. * * @param allocation_mode String identifies which resource type. * Accepted types are "pool", "cuda", and "managed" only. * @return Memory resource instance */ inline std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource( std::string const& allocation_mode) { if (allocation_mode == "binning") return make_binning(); if (allocation_mode == "cuda") return make_cuda(); if (allocation_mode == "async") return make_async(); if (allocation_mode == "pool") return make_pool(); if (allocation_mode == "arena") return make_arena(); if (allocation_mode == "managed") return make_managed(); CUDF_FAIL("Invalid RMM allocation mode: " + allocation_mode); } } // namespace test } // namespace cudf /** * @brief Parses the cuDF test command line options. * * Currently only supports 'rmm_mode' string parameter, which set the rmm * allocation mode. The default value of the parameter is 'pool'. * Environment variable 'CUDF_TEST_RMM_MODE' can also be used to set the rmm * allocation mode. If both are set, the value of 'rmm_mode' string parameter * takes precedence. * * @return Parsing results in the form of unordered map */ inline auto parse_cudf_test_opts(int argc, char** argv) { try { cxxopts::Options options(argv[0], " - cuDF tests command line options"); char const* env_rmm_mode = std::getenv("GTEST_CUDF_RMM_MODE"); // Overridden by CLI options char const* env_stream_mode = std::getenv("GTEST_CUDF_STREAM_MODE"); // Overridden by CLI options char const* env_stream_error_mode = std::getenv("GTEST_CUDF_STREAM_ERROR_MODE"); // Overridden by CLI options auto default_rmm_mode = env_rmm_mode ? env_rmm_mode : "pool"; auto default_stream_mode = env_stream_mode ? env_stream_mode : "default"; auto default_stream_error_mode = env_stream_error_mode ? env_stream_error_mode : "error"; options.allow_unrecognised_options().add_options()( "rmm_mode", "RMM allocation mode", cxxopts::value<std::string>()->default_value(default_rmm_mode)); // `new_cudf_default` means that cudf::get_default_stream has been patched, // so we raise errors anywhere that a CUDA default stream is observed // instead of cudf::get_default_stream(). This corresponds to compiling // identify_stream_usage with STREAM_MODE_TESTING=OFF (must do both at the // same time). // `new_testing_default` means that cudf::test::get_default_stream has been // patched, so we raise errors anywhere that _any_ other stream is // observed. This corresponds to compiling identify_stream_usage with // STREAM_MODE_TESTING=ON (must do both at the same time). options.allow_unrecognised_options().add_options()( "stream_mode", "Whether to use a non-default stream", cxxopts::value<std::string>()->default_value(default_stream_mode)); options.allow_unrecognised_options().add_options()( "stream_error_mode", "Whether to error or print to stdout when a non-default stream is observed and stream_mode " "is not \"default\"", cxxopts::value<std::string>()->default_value(default_stream_error_mode)); return options.parse(argc, argv); } catch (cxxopts::OptionException const& e) { CUDF_FAIL("Error parsing command line options"); } } /** * @brief Macro that defines main function for gtest programs that use rmm * * Should be included in every test program that uses rmm allocators since * it maintains the lifespan of the rmm default memory resource. * This `main` function is a wrapper around the google test generated `main`, * maintaining the original functionality. In addition, this custom `main` * function parses the command line to customize test behavior, like the * allocation mode used for creating the default memory resource. */ #define CUDF_TEST_PROGRAM_MAIN() \ int main(int argc, char** argv) \ { \ ::testing::InitGoogleTest(&argc, argv); \ auto const cmd_opts = parse_cudf_test_opts(argc, argv); \ auto const rmm_mode = cmd_opts["rmm_mode"].as<std::string>(); \ auto resource = cudf::test::create_memory_resource(rmm_mode); \ rmm::mr::set_current_device_resource(resource.get()); \ \ auto const stream_mode = cmd_opts["stream_mode"].as<std::string>(); \ if ((stream_mode == "new_cudf_default") || (stream_mode == "new_testing_default")) { \ auto const stream_error_mode = cmd_opts["stream_error_mode"].as<std::string>(); \ auto const error_on_invalid_stream = (stream_error_mode == "error"); \ auto const check_default_stream = (stream_mode == "new_cudf_default"); \ auto adaptor = make_stream_checking_resource_adaptor( \ resource.get(), error_on_invalid_stream, check_default_stream); \ rmm::mr::set_current_device_resource(&adaptor); \ return RUN_ALL_TESTS(); \ } \ \ return RUN_ALL_TESTS(); \ }
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/default_stream.hpp
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace test { /** * @brief Get the default stream to use for tests. * * The standard behavior of this function is to return cudf's default stream * (cudf::get_default_stream). This function is primarily provided as an * overload target for preload libraries (via LD_PRELOAD) so that the default * stream used for tests may be modified for tracking purposes. All tests of * public APIs that accept streams should pass `cudf::test::get_default_stream` * as the stream argument so that a preload library changing the behavior of * this function will trigger those tests to run on a different stream than * `cudf::get_default_stream`. * * @return The default stream to use for tests. */ rmm::cuda_stream_view const get_default_stream(); } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/debug_utilities.hpp
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column_view.hpp> #include <cudf/null_mask.hpp> namespace cudf::test { /** * @brief Formats a column view as a string * * @param col The input column view * @param delimiter The delimiter to put between strings */ std::string to_string(cudf::column_view const& col, std::string const& delimiter); /** * @brief Convert column values to a host vector of strings * * @param col The input column view */ std::vector<std::string> to_strings(cudf::column_view const& col); /** * @brief Print a column view to an ostream * * @param col The input column view * @param os The output stream */ void print(cudf::column_view const& col, std::ostream& os = std::cout); } // namespace cudf::test
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/print_utilities.cuh
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/transform_iterator.h> #include <type_traits> namespace cudf::test::print { constexpr int32_t hex_tag = 0; template <int32_t TagT, typename T> struct TaggedType { T v; }; template <typename T> using hex_t = TaggedType<hex_tag, T>; /** * @brief Function object to transform a built-in type to a tagged type (e.g., in order to print * values from an iterator returning uint32_t as hex values) * * @tparam TaggedTypeT A TaggedType template specialisation */ template <typename TaggedTypeT> struct ToTaggedType { template <typename T> CUDF_HOST_DEVICE TaggedTypeT operator()(T const& v) const { return TaggedTypeT{v}; } }; /** * @brief Returns an iterator that causes the values from \p it to be printed as hex values. * * @tparam InItT A random-access input iterator type * @param it A random-access input iterator t * @return */ template <typename InItT> auto hex(InItT it) { using value_t = typename std::iterator_traits<InItT>::value_type; using tagged_t = hex_t<value_t>; return thrust::make_transform_iterator(it, ToTaggedType<tagged_t>{}); } template <typename T, CUDF_ENABLE_IF(std::is_integral_v<T>&& std::is_signed_v<T>)> CUDF_HOST_DEVICE void print_value(int32_t width, T arg) { printf("%*d", width, arg); } template <typename T, CUDF_ENABLE_IF(std::is_integral_v<T>&& std::is_unsigned_v<T>)> CUDF_HOST_DEVICE void print_value(int32_t width, T arg) { printf("%*d", width, arg); } CUDF_HOST_DEVICE void print_value(int32_t width, char arg) { printf("%*c", width, arg); } template <typename T> CUDF_HOST_DEVICE void print_value(int32_t width, hex_t<T> arg) { printf("%*X", width, arg.v); } namespace detail { template <typename T> CUDF_HOST_DEVICE void print_values(int32_t width, char delimiter, T arg) { print_value(width, arg); } template <typename T, typename... Ts> CUDF_HOST_DEVICE void print_values(int32_t width, char delimiter, T arg, Ts... args) { print_value(width, arg); if (delimiter) printf("%c", delimiter); print_values(width, delimiter, args...); } template <typename... Ts> __global__ void print_array_kernel(std::size_t count, int32_t width, char delimiter, Ts... args) { if (threadIdx.x == 0 && blockIdx.x == 0) { for (std::size_t i = 0; i < count; i++) { printf("%6lu: ", i); print_values(width, delimiter, args[i]...); printf("\n"); } } } } // namespace detail /** * @brief Prints \p count elements from each of the given device-accessible iterators. * * @param count The number of items to print from each device-accessible iterator * @param stream The cuda stream to which the printing kernel shall be dispatched * @param args List of iterators to be printed */ template <typename... Ts> void print_array(std::size_t count, rmm::cuda_stream_view stream, Ts... args) { // The width to pad printed numbers to constexpr int32_t width = 6; // Delimiter used for separating values from subsequent iterators constexpr char delimiter = ','; // TODO we want this to compile to nothing dependnig on compiler flag, rather than runtime if (std::getenv("CUDA_DBG_DUMP") != nullptr) { detail::print_array_kernel<<<1, 1, 0, stream.value()>>>(count, width, delimiter, args...); } } } // namespace cudf::test::print
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/table_utilities.hpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> namespace cudf::test::detail { /** * @brief Verifies the property equality of two tables. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_TABLE_PROPERTIES_EQUAL` * instead. * * @param lhs The first table * @param rhs The second table */ void expect_table_properties_equal(cudf::table_view lhs, cudf::table_view rhs); /** * @brief Verifies the equality of two tables. * * Treats null elements as equivalent. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_TABLES_EQUAL` instead. * * @param lhs The first table * @param rhs The second table */ void expect_tables_equal(cudf::table_view lhs, cudf::table_view rhs); /** * @brief Verifies the equivalency of two tables. * * Treats null elements as equivalent. Columns that have nullability but no nulls, * and columns that are not nullable are considered equivalent. * * @note This function should not be used directly. Use `CUDF_TEST_EXPECT_TABLES_EQUIVALENT` * instead. * * @param lhs The first table * @param rhs The second table */ void expect_tables_equivalent(cudf::table_view lhs, cudf::table_view rhs); } // namespace cudf::test::detail // Macros for showing line of failure. #define CUDF_TEST_EXPECT_TABLE_PROPERTIES_EQUAL(lhs, rhs) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_table_properties_equal(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_TABLES_EQUAL(lhs, rhs) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_tables_equal(lhs, rhs); \ } while (0) #define CUDF_TEST_EXPECT_TABLES_EQUIVALENT(lhs, rhs) \ do { \ SCOPED_TRACE(" <-- line of failure\n"); \ cudf::test::detail::expect_tables_equivalent(lhs, rhs); \ } while (0)
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/type_lists.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/strings/string_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/wrappers/durations.hpp> #include <cudf/wrappers/timestamps.hpp> #include <cudf_test/type_list_utilities.hpp> #include <thrust/host_vector.h> #include <array> #include <tuple> #include <type_traits> /** * @file type_lists.hpp * @brief Provides centralized type lists for use in Google Test * type-parameterized tests. * * These lists should be used for consistency across tests as well as * future-proofing against the addition of any new types in the future. */ namespace cudf { namespace test { namespace detail { template <typename TYPES, std::size_t... Indices> constexpr std::array<cudf::type_id, sizeof...(Indices)> types_to_ids_impl( std::index_sequence<Indices...>) { return {{cudf::type_to_id<GetType<TYPES, Indices>>()...}}; } /** * @brief Converts a `Types` list of types into a `std::array` of the * corresponding `cudf::type_id`s for each type in the list * * Example: * ``` * auto array = types_to_ids<Types<int32_t, float>>(); * array == {type_id::INT32, type_id::FLOAT}; * ``` * * @tparam TYPES List of types to convert to `type_id`s * @return `std::array` of `type_id`s corresponding to each type in `TYPES` */ template <typename TYPES> constexpr auto types_to_ids() { constexpr auto N = GetSize<TYPES>; return types_to_ids_impl<TYPES>(std::make_index_sequence<N>()); } } // namespace detail /** * @brief Convert numeric values of type T to numeric vector of type TypeParam. * * This will also convert negative values to positive values if the output type is unsigned. * * @param init_list Values used to create the output vector * @return Vector of TypeParam with the values specified */ template <typename TypeParam, typename T> std::enable_if_t<cudf::is_fixed_width<TypeParam>() && !cudf::is_timestamp_t<TypeParam>::value, thrust::host_vector<TypeParam>> make_type_param_vector(std::initializer_list<T> const& init_list) { std::vector<T> input{init_list}; std::vector<TypeParam> vec(init_list.size()); std::transform( std::cbegin(input), std::cend(input), std::begin(vec), [](auto const& e) -> TypeParam { if constexpr (std::is_unsigned_v<TypeParam>) { return static_cast<TypeParam>(std::abs(e)); } return static_cast<TypeParam>(e); }); return vec; } /** * @brief Convert numeric values of type T to timestamp vector * * @param init_list Values used to create the output vector * @return Vector of TypeParam with the values specified */ template <typename TypeParam, typename T> std::enable_if_t<cudf::is_timestamp_t<TypeParam>::value, thrust::host_vector<TypeParam>> make_type_param_vector(std::initializer_list<T> const& init_list) { thrust::host_vector<TypeParam> vec(init_list.size()); std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) { return TypeParam{typename TypeParam::duration{e}}; }); return vec; } /** * @brief Convert numeric values of type T to vector of std::string * * @param init_list Values used to create the output vector * @return Vector of TypeParam with the values specified */ template <typename TypeParam, typename T> std::enable_if_t<std::is_same_v<TypeParam, std::string>, thrust::host_vector<std::string>> make_type_param_vector(std::initializer_list<T> const& init_list) { thrust::host_vector<std::string> vec(init_list.size()); std::transform(std::cbegin(init_list), std::cend(init_list), std::begin(vec), [](auto const& e) { return std::to_string(e); }); return vec; } /** * @brief Convert the numeric value of type T to a fixed width type of type TypeParam. * * This function is necessary because some types (such as timestamp types) are not directly * constructible from numeric types. This function is offered as a convenience to allow * implicitly constructing such objects from numeric values. * * @param init_value Value used to initialize the fixed width type * @return A fixed width type - [u]int32/float/duration etc. of type TypeParam with the * value specified */ template <typename TypeParam, typename T> std::enable_if_t<cudf::is_fixed_width<TypeParam>() && !cudf::is_timestamp_t<TypeParam>::value, TypeParam> make_type_param_scalar(T const init_value) { return static_cast<TypeParam>(init_value); } /** * @brief Convert the timestamp value of type T to a fixed width type of type TypeParam. * * This function is necessary because some types (such as timestamp types) are not directly * constructible from timestamp types. This function is offered as a convenience to allow * implicitly constructing such objects from timestamp values. * * @param init_value Value used to initialize the fixed width type * @return A fixed width type - TimeStamp of type TypeParam with the * value specified */ template <typename TypeParam, typename T> std::enable_if_t<cudf::is_timestamp_t<TypeParam>::value, TypeParam> make_type_param_scalar( T const init_value) { return TypeParam{typename TypeParam::duration(init_value)}; } /** * @brief Convert the numeric value of type T to a string type. * * This function converts the numeric value of type T to its string representation. * * @param init_value Value to convert to a string * @return string representation of the value */ template <typename TypeParam, typename T> std::enable_if_t<std::is_same_v<TypeParam, std::string>, TypeParam> make_type_param_scalar( T const init_value) { return std::to_string(init_value); } /** * @brief Type list for all integral types except type bool. */ using IntegralTypesNotBool = cudf::test::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t>; /** * @brief Type list for all integral types. */ using IntegralTypes = Concat<IntegralTypesNotBool, cudf::test::Types<bool>>; /** * @brief Provides a list of all floating point types supported in libcudf for * use in a GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all floating point types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::FloatingPointTypes); * ``` */ using FloatingPointTypes = cudf::test::Types<float, double>; /** * @brief Provides a list of all numeric types supported in libcudf for use in a * GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all numeric types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::NumericTypes); * ``` */ using NumericTypes = Concat<IntegralTypes, FloatingPointTypes>; /** * @brief Provides a list of all timestamp types supported in libcudf for use * in a GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all timestamp types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::TimestampTypes); * ``` */ using TimestampTypes = cudf::test::Types<timestamp_D, timestamp_s, timestamp_ms, timestamp_us, timestamp_ns>; /** * @brief Provides a list of all duration types supported in libcudf for use * in a GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all duration types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::DurationTypes); * ``` */ using DurationTypes = cudf::test::Types<duration_D, duration_s, duration_ms, duration_us, duration_ns>; /** * @brief Provides a list of all chrono types supported in libcudf for use in a GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all chrono types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::ChronoTypes); * ``` */ using ChronoTypes = Concat<TimestampTypes, DurationTypes>; /** * @brief Provides a list of all string types supported in libcudf for use in a * GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all string types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::StringTypes); * ``` */ using StringTypes = cudf::test::Types<string_view>; /** * @brief Provides a list of all list types supported in libcudf for use in a * GTest typed test. * * Example: * ``` * // Invokes all typed fixture tests for all list types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::ListTypes); * ``` */ using ListTypes = cudf::test::Types<list_view>; /** * @brief Provides a list of all fixed-point element types for use in GTest * typed tests. * * Example: * ``` * // Invokes all typed fixture tests for all fixed-width types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::FixedPointTypes); * ``` */ using FixedPointTypes = cudf::test::Types<numeric::decimal32, numeric::decimal64, numeric::decimal128>; /** * @brief Provides a list of all fixed-width element types for use in GTest * typed tests. * * Example: * ``` * // Invokes all typed fixture tests for all fixed-width types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::FixedWidthTypes); * ``` */ using FixedWidthTypes = Concat<NumericTypes, ChronoTypes, FixedPointTypes>; /** * @brief Provides a list of all fixed-width element types except for the * fixed-point types for use in GTest typed tests. * * Certain tests written for fixed-width types don't work for fixed-point as * fixed-point types aren't constructible from other fixed-width types * because a scale needs to be specified. * * Example: * ``` * // Invokes all typed fixture tests for all fixed-width types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::FixedWidthTypesWithoutFixedPoint); * ``` */ using FixedWidthTypesWithoutFixedPoint = Concat<NumericTypes, ChronoTypes>; /** * @brief Provides a list of all fixed-width element types except for the * chrono types for use in GTest typed tests. * * Example: * ``` * // Invokes all typed fixture tests for all fixed-width types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::FixedWidthTypesWithoutChrono); * ``` */ using FixedWidthTypesWithoutChrono = Concat<NumericTypes, FixedPointTypes>; /** * @brief Provides a list of sortable types for use in GTest typed tests. * * Example: * ``` * // Invokes all typed fixture tests for all sortable types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::ComparableTypes); * ``` */ using ComparableTypes = Concat<NumericTypes, ChronoTypes, StringTypes>; /** * @brief Provides a list of all compound types for use in GTest typed tests. * * Example: * ``` * // Invokes all typed fixture tests for all compound types in libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::CompoundTypes); * ``` */ using CompoundTypes = cudf::test::Types<cudf::string_view, cudf::dictionary32, cudf::list_view, cudf::struct_view>; /** * @brief Provides a list of all types supported in libcudf for use in a GTest * typed test. * * @note Currently does not provide any of the "wrapped" types, e.g., * category, etc. * * Example: * ``` * // Invokes all typed fixture tests for all types supported by libcudf * TYPED_TEST_SUITE(MyTypedFixture, cudf::test::AllTypes); * ``` */ using AllTypes = Concat<NumericTypes, ChronoTypes, FixedPointTypes>; /** * @brief `std::array` of all `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr auto all_type_ids{detail::types_to_ids<AllTypes>()}; /** * @brief `std::array` of all numeric `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr auto numeric_type_ids{detail::types_to_ids<NumericTypes>()}; /** * @brief `std::array` of all timestamp `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr std::array<cudf::type_id, 5> timestamp_type_ids{ detail::types_to_ids<TimestampTypes>()}; /** * @brief `std::array` of all duration `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr std::array<cudf::type_id, 5> duration_type_ids{ detail::types_to_ids<DurationTypes>()}; /** * @brief `std::array` of all non-numeric `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr std::array<cudf::type_id, 12> non_numeric_type_ids{ cudf::type_id::EMPTY, cudf::type_id::TIMESTAMP_DAYS, cudf::type_id::TIMESTAMP_SECONDS, cudf::type_id::TIMESTAMP_MILLISECONDS, cudf::type_id::TIMESTAMP_MICROSECONDS, cudf::type_id::TIMESTAMP_NANOSECONDS, cudf::type_id::DURATION_DAYS, cudf::type_id::DURATION_SECONDS, cudf::type_id::DURATION_MILLISECONDS, cudf::type_id::DURATION_MICROSECONDS, cudf::type_id::DURATION_NANOSECONDS, cudf::type_id::STRING}; /** * @brief `std::array` of all non-fixed-width `cudf::type_id`s * * This can be used for iterating over `type_id`s for custom testing, or used in * GTest value-parameterized tests. */ static constexpr std::array<cudf::type_id, 2> non_fixed_width_type_ids{cudf::type_id::EMPTY, cudf::type_id::STRING}; } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/column_wrapper.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/copying.hpp> #include <cudf/detail/concatenate.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/encode.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/null_mask.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/default_stream.hpp> #include <rmm/device_buffer.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <algorithm> #include <iterator> #include <memory> #include <numeric> namespace cudf { namespace test { namespace detail { /** * @brief Base class for a wrapper around a `cudf::column`. * * Classes that derive from `column_wrapper` may be passed directly into any * API expecting a `column_view` or `mutable_column_view`. * * `column_wrapper` should not be instantiated directly. */ class column_wrapper { public: /** * @brief Implicit conversion operator to `column_view`. * * Allows passing in a `column_wrapper` (or any class deriving from * `column_wrapper`) to be passed into any API expecting a `column_view` * parameter. */ operator column_view() const { return wrapped->view(); } /** * @brief Implicit conversion operator to `mutable_column_view`. * * Allows passing in a `column_wrapper` (or any class deriving from * `column_wrapper`) to be passed into any API expecting a * `mutable_column_view` parameter. */ operator mutable_column_view() { return wrapped->mutable_view(); } /** * @brief Releases internal unique_ptr to wrapped column * * @return unique_ptr to wrapped column */ std::unique_ptr<cudf::column> release() { return std::move(wrapped); } protected: std::unique_ptr<cudf::column> wrapped{}; ///< The wrapped column }; /** * @brief Convert between source and target types when they differ and where possible. */ template <typename From, typename To> struct fixed_width_type_converter { /** * @brief No conversion necessary: Same type, simply copy element to output. * * @tparam FromT Source type * @tparam ToT Target type * @param element Source value * @return The converted target value, same as source value */ template <typename FromT = From, typename ToT = To, std::enable_if_t<std::is_same_v<FromT, ToT>, void>* = nullptr> constexpr ToT operator()(FromT element) const { return element; } /** * @brief Convert types if possible, otherwise construct target from source. * * @tparam FromT Source type * @tparam ToT Target type * @param element Source value * @return The converted target value */ template < typename FromT = From, typename ToT = To, std::enable_if_t<!std::is_same_v<FromT, ToT> && (cudf::is_convertible<FromT, ToT>::value || std::is_constructible_v<ToT, FromT>), void>* = nullptr> constexpr ToT operator()(FromT element) const { return static_cast<ToT>(element); } /** * @brief Convert integral values to timestamps * * @tparam FromT Source type * @tparam ToT Target type * @param element Source value * @return The converted target `timestamp` value */ template < typename FromT = From, typename ToT = To, std::enable_if_t<std::is_integral_v<FromT> && cudf::is_timestamp<ToT>(), void>* = nullptr> constexpr ToT operator()(FromT element) const { return ToT{typename ToT::duration{element}}; } }; /** * @brief Creates a `device_buffer` containing the elements in the range `[begin,end)`. * * @tparam ElementTo The element type that is being created (non-`fixed_point`) * @tparam ElementFrom The element type used to create elements of type `ElementTo` * @tparam InputIterator Iterator type for `begin` and `end` * @param begin Beginning of the sequence of elements * @param end End of the sequence of elements * @return rmm::device_buffer Buffer containing all elements in the range `[begin,end)` */ template <typename ElementTo, typename ElementFrom, typename InputIterator, std::enable_if_t<not cudf::is_fixed_point<ElementTo>()>* = nullptr> rmm::device_buffer make_elements(InputIterator begin, InputIterator end) { static_assert(cudf::is_fixed_width<ElementTo>(), "Unexpected non-fixed width type."); auto transformer = fixed_width_type_converter<ElementFrom, ElementTo>{}; auto transform_begin = thrust::make_transform_iterator(begin, transformer); auto const size = cudf::distance(begin, end); auto const elements = thrust::host_vector<ElementTo>(transform_begin, transform_begin + size); return rmm::device_buffer{ elements.data(), size * sizeof(ElementTo), cudf::test::get_default_stream()}; } /** * @brief Creates a `device_buffer` containing the elements in the range `[begin,end)`. * * @tparam ElementTo The element type that is being created (`fixed_point` specialization) * @tparam ElementFrom The element type used to create elements of type `ElementTo` * (non-`fixed-point`) * @tparam InputIterator Iterator type for `begin` and `end` * @param begin Beginning of the sequence of elements * @param end End of the sequence of elements * @return rmm::device_buffer Buffer containing all elements in the range `[begin,end)` */ template <typename ElementTo, typename ElementFrom, typename InputIterator, std::enable_if_t<not cudf::is_fixed_point<ElementFrom>() and cudf::is_fixed_point<ElementTo>()>* = nullptr> rmm::device_buffer make_elements(InputIterator begin, InputIterator end) { using RepType = typename ElementTo::rep; auto transformer = fixed_width_type_converter<ElementFrom, RepType>{}; auto transform_begin = thrust::make_transform_iterator(begin, transformer); auto const size = cudf::distance(begin, end); auto const elements = thrust::host_vector<RepType>(transform_begin, transform_begin + size); return rmm::device_buffer{ elements.data(), size * sizeof(RepType), cudf::test::get_default_stream()}; } /** * @brief Creates a `device_buffer` containing the elements in the range `[begin,end)`. * * @tparam ElementTo The element type that is being created (`fixed_point` specialization) * @tparam ElementFrom The element type used to create elements of type `ElementTo` (`fixed_point`) * @tparam InputIterator Iterator type for `begin` and `end` * @param begin Beginning of the sequence of elements * @param end End of the sequence of elements * @return rmm::device_buffer Buffer containing all elements in the range `[begin,end)` */ template <typename ElementTo, typename ElementFrom, typename InputIterator, std::enable_if_t<cudf::is_fixed_point<ElementFrom>() and cudf::is_fixed_point<ElementTo>()>* = nullptr> rmm::device_buffer make_elements(InputIterator begin, InputIterator end) { using namespace numeric; using RepType = typename ElementTo::rep; auto to_rep = [](ElementTo fp) { return fp.value(); }; auto transformer_begin = thrust::make_transform_iterator(begin, to_rep); auto const size = cudf::distance(begin, end); auto const elements = thrust::host_vector<RepType>(transformer_begin, transformer_begin + size); return rmm::device_buffer{ elements.data(), size * sizeof(RepType), cudf::test::get_default_stream()}; } /** * @brief Create a `std::vector` containing a validity indicator bitmask using * the range `[begin,end)` interpreted as booleans to indicate the state of * each bit. * * If `*(begin + i) == true`, then bit `i` is set to 1, else it is zero. * * @tparam ValidityIterator * @param begin The beginning of the validity indicator sequence * @param end The end of the validity indicator sequence * @return std::vector Contains a bitmask where bits are set for every * element in `[begin,end)` that evaluated to `true`. */ template <typename ValidityIterator> std::pair<std::vector<bitmask_type>, cudf::size_type> make_null_mask_vector(ValidityIterator begin, ValidityIterator end) { auto const size = cudf::distance(begin, end); auto const num_words = cudf::bitmask_allocation_size_bytes(size) / sizeof(bitmask_type); auto null_mask = std::vector<bitmask_type>(num_words, 0); auto null_count = cudf::size_type{0}; for (auto i = 0; i < size; ++i) { if (*(begin + i)) { set_bit_unsafe(null_mask.data(), i); } else { ++null_count; } } return {std::move(null_mask), null_count}; } /** * @brief Create a `device_buffer` containing a validity indicator bitmask using * the range `[begin,end)` interpreted as booleans to indicate the state of *each bit. * * If `*(begin + i) == true`, then bit `i` is set to 1, else it is zero. * * @tparam ValidityIterator * @param begin The beginning of the validity indicator sequence * @param end The end of the validity indicator sequence * @return rmm::device_buffer Contains a bitmask where bits are set for every * element in `[begin,end)` that evaluated to `true`. */ template <typename ValidityIterator> std::pair<rmm::device_buffer, cudf::size_type> make_null_mask(ValidityIterator begin, ValidityIterator end) { auto [null_mask, null_count] = make_null_mask_vector(begin, end); auto d_mask = rmm::device_buffer{null_mask.data(), cudf::bitmask_allocation_size_bytes(cudf::distance(begin, end)), cudf::test::get_default_stream()}; return {std::move(d_mask), null_count}; } /** * @brief Given the range `[begin,end)`, converts each value to a string and * then creates a packed vector of characters for each string and a vector of * offsets indicating the starting position of each string. * * @tparam StringsIterator A `std::string` must be constructible from * dereferencing a `StringsIterator`. * @tparam ValidityIterator Dereferencing a ValidityIterator must be * convertible to `bool` * @param begin The beginning of the sequence of values to convert to strings * @param end The end of the sequence of values to convert to strings * @param v The beginning of the validity indicator sequence * @return std::pair containing the vector of chars and offsets */ template <typename StringsIterator, typename ValidityIterator> auto make_chars_and_offsets(StringsIterator begin, StringsIterator end, ValidityIterator v) { std::vector<char> chars{}; std::vector<cudf::size_type> offsets(1, 0); for (auto str = begin; str < end; ++str) { std::string tmp = (*v++) ? std::string(*str) : std::string{}; chars.insert(chars.end(), std::cbegin(tmp), std::cend(tmp)); offsets.push_back(offsets.back() + tmp.length()); } return std::pair(std::move(chars), std::move(offsets)); }; } // namespace detail /** * @brief `column_wrapper` derived class for wrapping columns of fixed-width * elements. * * @tparam ElementTo The fixed-width element type that is created * @tparam SourceElementT The fixed-width element type that is used to create elements of type * `ElementTo` */ template <typename ElementTo, typename SourceElementT = ElementTo> class fixed_width_column_wrapper : public detail::column_wrapper { public: /** * @brief Default constructor initializes an empty column with proper dtype */ fixed_width_column_wrapper() : column_wrapper{} { std::vector<ElementTo> empty; wrapped.reset( new cudf::column{cudf::data_type{cudf::type_to_id<ElementTo>()}, 0, detail::make_elements<ElementTo, SourceElementT>(empty.begin(), empty.end()), rmm::device_buffer{}, 0}); } /** * @brief Construct a non-nullable column of the fixed-width elements in the * range `[begin,end)`. * * Example: * @code{.cpp} * // Creates a non-nullable column of INT32 elements with 5 elements: {0, 2, 4, 6, 8} * auto elements = make_counting_transform_iterator(0, [](auto i){return i*2;}); * fixed_width_column_wrapper<int32_t> w(elements, elements + 5); * @endcode * * Note: similar to `std::vector`, this "range" constructor should be used * with parentheses `()` and not braces `{}`. The latter should only * be used for the `initializer_list` constructors * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements */ template <typename InputIterator> fixed_width_column_wrapper(InputIterator begin, InputIterator end) : column_wrapper{} { auto const size = cudf::distance(begin, end); wrapped.reset(new cudf::column{cudf::data_type{cudf::type_to_id<ElementTo>()}, size, detail::make_elements<ElementTo, SourceElementT>(begin, end), rmm::device_buffer{}, 0}); } /** * @brief Construct a nullable column of the fixed-width elements in the range * `[begin,end)` using the range `[v, v + distance(begin,end))` interpreted * as booleans to indicate the validity of each element. * * If `v[i] == true`, element `i` is valid, else it is null. * * Example: * @code{.cpp} * // Creates a nullable column of INT32 elements with 5 elements: {null, 1, null, 3, null} * auto elements = make_counting_transform_iterator(0, [](auto i){return i;}); * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}) * fixed_width_column_wrapper<int32_t> w(elements, elements + 5, validity); * @endcode * * Note: similar to `std::vector`, this "range" constructor should be used * with parentheses `()` and not braces `{}`. The latter should only * be used for the `initializer_list` constructors * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param v The beginning of the sequence of validity indicators */ template <typename InputIterator, typename ValidityIterator> fixed_width_column_wrapper(InputIterator begin, InputIterator end, ValidityIterator v) : column_wrapper{} { auto const size = cudf::distance(begin, end); auto [null_mask, null_count] = detail::make_null_mask(v, v + size); wrapped.reset(new cudf::column{cudf::data_type{cudf::type_to_id<ElementTo>()}, size, detail::make_elements<ElementTo, SourceElementT>(begin, end), std::move(null_mask), null_count}); } /** * @brief Construct a non-nullable column of fixed-width elements from an * initializer list. * * Example: * @code{.cpp} * // Creates a non-nullable INT32 column with 4 elements: {1, 2, 3, 4} * fixed_width_column_wrapper<int32_t> w{{1, 2, 3, 4}}; * @endcode * * @param elements The list of elements */ template <typename ElementFrom> fixed_width_column_wrapper(std::initializer_list<ElementFrom> elements) : fixed_width_column_wrapper(std::cbegin(elements), std::cend(elements)) { } /** * @brief Construct a nullable column from a list of fixed-width elements * using another list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of * booleans where `true` indicates the element is valid, and `false` indicates * the element is null. * * Example: * @code{.cpp} * // Creates a nullable INT32 column with 4 elements: {1, NULL, 3, NULL} * fixed_width_column_wrapper<int32_t> w{ {1,2,3,4}, {1, 0, 1, 0}}; * @endcode * * @param elements The list of elements * @param validity The list of validity indicator booleans */ template <typename ElementFrom> fixed_width_column_wrapper(std::initializer_list<ElementFrom> elements, std::initializer_list<bool> validity) : fixed_width_column_wrapper(std::cbegin(elements), std::cend(elements), std::cbegin(validity)) { } /** * @brief Construct a nullable column from a list of fixed-width elements and * the range `[v, v + element_list.size())` interpreted as booleans to * indicate the validity of each element. * * Example: * @code{.cpp} * // Creates a nullable INT32 column with 4 elements: {NULL, 1, NULL, 3} * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}) * fixed_width_column_wrapper<int32_t> w{ {1,2,3,4}, validity} * @endcode * * @tparam ValidityIterator Dereferencing a ValidityIterator must be * convertible to `bool` * @param element_list The list of elements * @param v The beginning of the sequence of validity indicators */ template <typename ValidityIterator, typename ElementFrom> fixed_width_column_wrapper(std::initializer_list<ElementFrom> element_list, ValidityIterator v) : fixed_width_column_wrapper(std::cbegin(element_list), std::cend(element_list), v) { } /** * @brief Construct a nullable column of the fixed-width elements in the range * `[begin,end)` using a validity initializer list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of * booleans where `true` indicates the element is valid, and `false` indicates * the element is null. * * Example: * @code{.cpp} * // Creates a nullable column of INT32 elements with 5 elements: {null, 1, null, 3, null} * fixed_width_column_wrapper<int32_t> w(elements, elements + 5, {0, 1, 0, 1, 0}); * @endcode * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param validity The list of validity indicator booleans */ template <typename InputIterator> fixed_width_column_wrapper(InputIterator begin, InputIterator end, std::initializer_list<bool> const& validity) : fixed_width_column_wrapper(begin, end, std::cbegin(validity)) { } /** * @brief Construct a nullable column from a list of pairs of fixed-width * elements and validity booleans of each element. * * The validity of each element is determined by the boolean element in the pair * where `true` indicates the element is valid, and `false` indicates the * element is null. * * Example: * @code{.cpp} * // Creates a nullable INT32 column with 4 elements: {1, NULL, 3, NULL} * using p = std::pair<int32_t, bool>; * fixed_width_column_wrapper<int32_t> w( p{1, true}, p{2, false}, p{3, true}, p{4, false} ); * @endcode * * @param elements The list of pairs of element and validity booleans */ template <typename ElementFrom> fixed_width_column_wrapper(std::initializer_list<std::pair<ElementFrom, bool>> elements) { auto begin = thrust::make_transform_iterator(elements.begin(), [](auto const& e) { return e.first; }); auto end = begin + elements.size(); auto v = thrust::make_transform_iterator(elements.begin(), [](auto const& e) { return e.second; }); wrapped = fixed_width_column_wrapper<ElementTo, ElementFrom>(begin, end, v).release(); } }; /** * @brief A wrapper for a column of fixed-width elements. * * @tparam Rep The type of the column */ template <typename Rep> class fixed_point_column_wrapper : public detail::column_wrapper { public: /** * @brief Construct a non-nullable column of the decimal elements in the range `[begin,end)`. * * Example: * @code{.cpp} * // Creates a non-nullable column of DECIMAL32 elements with 5 elements: {0, 2, 4, 6, 8} * auto elements = make_counting_transform_iterator(0, [](auto i) { return i * 2;}); * auto w = fixed_point_column_wrapper<int32_t>(elements, elements + 5, scale_type{0}); * @endcode * * @tparam FixedPointRepIterator Iterator for fixed_point::rep * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param scale The scale of the elements in the column */ template <typename FixedPointRepIterator> fixed_point_column_wrapper(FixedPointRepIterator begin, FixedPointRepIterator end, numeric::scale_type scale) : column_wrapper{} { CUDF_EXPECTS(numeric::is_supported_representation_type<Rep>(), "not valid representation type"); auto const size = cudf::distance(begin, end); auto const elements = thrust::host_vector<Rep>(begin, end); auto const id = type_to_id<numeric::fixed_point<Rep, numeric::Radix::BASE_10>>(); auto const data_type = cudf::data_type{id, static_cast<int32_t>(scale)}; wrapped.reset(new cudf::column{ data_type, size, rmm::device_buffer{elements.data(), size * sizeof(Rep), cudf::test::get_default_stream()}, rmm::device_buffer{}, 0}); } /** * @brief Construct a non-nullable column of decimal elements from an initializer list. * * Example: * @code{.cpp} * // Creates a non-nullable `decimal32` column with 4 elements: {42.0, 4.2, 0.4} * auto const col = fixed_point_column_wrapper<int32_t>{{420, 42, 4}, scale_type{-1}}; * @endcode * * @param values The initializer list of already shifted values * @param scale The scale of the elements in the column */ fixed_point_column_wrapper(std::initializer_list<Rep> values, numeric::scale_type scale) : fixed_point_column_wrapper(std::cbegin(values), std::cend(values), scale) { } /** * @brief Construct a nullable column of the fixed-point elements from a range. * * Constructs a nullable column of the fixed-point elements in the range `[begin,end)` using the * range `[v, v + distance(begin,end))` interpreted as Booleans to indicate the validity of each * element. * * If `v[i] == true`, element `i` is valid, else it is null. * * Example: * @code{.cpp} * // Creates a nullable column of DECIMAL32 elements with 5 elements: {null, 100, null, 300, * null} * auto elements = make_counting_transform_iterator(0, [](auto i){ return i; }); * auto validity = make_counting_transform_iterator(0, [](auto i){ return i % 2; }); * fixed_point_column_wrapper<int32_t> w(elements, elements + 5, validity, scale_type{2}); * @endcode * * Note: similar to `std::vector`, this "range" constructor should be used * with parentheses `()` and not braces `{}`. The latter should only * be used for the `initializer_list` constructors * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param v The beginning of the sequence of validity indicators * @param scale The scale of the elements in the column */ template <typename FixedPointRepIterator, typename ValidityIterator> fixed_point_column_wrapper(FixedPointRepIterator begin, FixedPointRepIterator end, ValidityIterator v, numeric::scale_type scale) : column_wrapper{} { CUDF_EXPECTS(numeric::is_supported_representation_type<Rep>(), "not valid representation type"); auto const size = cudf::distance(begin, end); auto const elements = thrust::host_vector<Rep>(begin, end); auto const id = type_to_id<numeric::fixed_point<Rep, numeric::Radix::BASE_10>>(); auto const data_type = cudf::data_type{id, static_cast<int32_t>(scale)}; auto [null_mask, null_count] = detail::make_null_mask(v, v + size); wrapped.reset(new cudf::column{ data_type, size, rmm::device_buffer{elements.data(), size * sizeof(Rep), cudf::test::get_default_stream()}, std::move(null_mask), null_count}); } /** * @brief Construct a nullable column from an initializer list of decimal elements using another * list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of booleans where `true` * indicates the element is valid, and `false` indicates the element is null. * * Example: * @code{.cpp} * // Creates a nullable INT32 column with 4 elements: {1, null, 3, null} * fixed_width_column_wrapper<int32_t> w{ {1,2,3,4}, {1, 0, 1, 0}, scale_type{0}}; * @endcode * * @param elements The initializer list of elements * @param validity The initializer list of validity indicator booleans * @param scale The scale of the elements in the column */ fixed_point_column_wrapper(std::initializer_list<Rep> elements, std::initializer_list<bool> validity, numeric::scale_type scale) : fixed_point_column_wrapper( std::cbegin(elements), std::cend(elements), std::cbegin(validity), scale) { } /** * @brief Construct a nullable column from an initializer list of decimal elements and the * range `[v, v + element_list.size())` interpreted as booleans to indicate the validity of each * element. * * Example: * @code{.cpp} * // Creates a nullable INT32 column with 4 elements: {null, 1, null, 3} * auto validity = make_counting_transform_iterator(0, [](auto i) { return i % 2; }); * auto w = fixed_width_column_wrapper<int32_t>{ {1,2,3,4}, validity, scale_type{0}}; * @endcode * * @tparam ValidityIterator Dereferencing a ValidityIterator must be convertible to `bool` * * @param element_list The initializer list of elements * @param v The beginning of the sequence of validity indicators * @param scale The scale of the elements in the column */ template <typename ValidityIterator> fixed_point_column_wrapper(std::initializer_list<Rep> element_list, ValidityIterator v, numeric::scale_type scale) : fixed_point_column_wrapper(std::cbegin(element_list), std::cend(element_list), v, scale) { } /** * @brief Construct a nullable column of the decimal elements in the range `[begin,end)` using a * validity initializer list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of booleans where `true` * indicates the element is valid, and `false` indicates the element is null. * * Example: * @code{.cpp} * // Creates a nullable column of DECIMAL32 elements with 5 elements: {null, 1, null, 3, null} * fixed_point_column_wrapper<int32_t> w(elements, elements + 5, {0, 1, 0, 1, 0}, scale_type{0}); * @endcode * * @tparam FixedPointRepIterator Iterator for fixed_point::rep * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param validity The initializer list of validity indicator booleans * @param scale The scale of the elements in the column */ template <typename FixedPointRepIterator> fixed_point_column_wrapper(FixedPointRepIterator begin, FixedPointRepIterator end, std::initializer_list<bool> const& validity, numeric::scale_type scale) : fixed_point_column_wrapper(begin, end, std::cbegin(validity), scale) { } }; /** * @brief `column_wrapper` derived class for wrapping columns of strings. */ class strings_column_wrapper : public detail::column_wrapper { public: /** * @brief Default constructor initializes an empty column of strings */ strings_column_wrapper() : strings_column_wrapper(std::initializer_list<std::string>{}) {} /** * @brief Construct a non-nullable column of strings from the range * `[begin,end)`. * * Values in the sequence `[begin,end)` will each be converted to *`std::string` and a column will be created containing all of the strings. * * Example: * @code{.cpp} * // Creates a non-nullable STRING column with 7 string elements: * // {"", "this", "is", "a", "column", "of", "strings"} * std::vector<std::string> strings{"", "this", "is", "a", "column", "of", "strings"}; * strings_column_wrapper s(strings.begin(), strings.end()); * @endcode * * @tparam StringsIterator A `std::string` must be constructible from * dereferencing a `StringsIterator`. * @param begin The beginning of the sequence * @param end The end of the sequence */ template <typename StringsIterator> strings_column_wrapper(StringsIterator begin, StringsIterator end) : column_wrapper{} { auto all_valid = thrust::make_constant_iterator(true); auto [chars, offsets] = detail::make_chars_and_offsets(begin, end, all_valid); auto d_chars = cudf::detail::make_device_uvector_sync( chars, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); auto d_offsets = cudf::detail::make_device_uvector_sync( offsets, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); wrapped = cudf::make_strings_column(d_chars, d_offsets, {}, 0, cudf::test::get_default_stream()); } /** * @brief Construct a nullable column of strings from the range * `[begin,end)` using the range `[v, v + distance(begin,end))` interpreted * as booleans to indicate the validity of each string. * * Values in the sequence `[begin,end)` will each be converted to *`std::string` and a column will be created containing all of the strings. * * If `v[i] == true`, string `i` is valid, else it is null. If a string * `*(begin+i)` is null, it's value is ignored and treated as an empty string. * * Example: * @code{.cpp} * // Creates a nullable STRING column with 7 string elements: * // {NULL, "this", NULL, "a", NULL, "of", NULL} * std::vector<std::string> strings{"", "this", "is", "a", "column", "of", "strings"}; * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * strings_column_wrapper s(strings.begin(), strings.end(), validity); * @endcode * * @tparam StringsIterator A `std::string` must be constructible from * dereferencing a `StringsIterator`. * @tparam ValidityIterator Dereferencing a ValidityIterator must be convertible to `bool` * * @param begin The beginning of the sequence * @param end The end of the sequence * @param v The beginning of the sequence of validity indicators */ template <typename StringsIterator, typename ValidityIterator> strings_column_wrapper(StringsIterator begin, StringsIterator end, ValidityIterator v) : column_wrapper{} { size_type num_strings = std::distance(begin, end); auto [chars, offsets] = detail::make_chars_and_offsets(begin, end, v); auto [null_mask, null_count] = detail::make_null_mask_vector(v, v + num_strings); auto d_chars = cudf::detail::make_device_uvector_sync( chars, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); auto d_offsets = cudf::detail::make_device_uvector_sync( offsets, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); auto d_bitmask = cudf::detail::make_device_uvector_sync( null_mask, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); wrapped = cudf::make_strings_column( d_chars, d_offsets, d_bitmask, null_count, cudf::test::get_default_stream()); } /** * @brief Construct a non-nullable column of strings from a list of strings. * * Example: * @code{.cpp} * // Creates a non-nullable STRING column with 7 string elements: * // {"", "this", "is", "a", "column", "of", "strings"} * strings_column_wrapper s({"", "this", "is", "a", "column", "of", "strings"}); * @endcode * * @param strings The list of strings */ strings_column_wrapper(std::initializer_list<std::string> strings) : strings_column_wrapper(std::cbegin(strings), std::cend(strings)) { } /** * @brief Construct a nullable column of strings from a list of strings and * the range `[v, v + strings.size())` interpreted as booleans to indicate the * validity of each string. * * Example: * @code{.cpp} * // Creates a nullable STRING column with 7 string elements: * // {NULL, "this", NULL, "a", NULL, "of", NULL} * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * strings_column_wrapper s({"", "this", "is", "a", "column", "of", "strings"}, validity); * @endcode * * @tparam ValidityIterator Dereferencing a ValidityIterator must be * convertible to `bool` * @param strings The list of strings * @param v The beginning of the sequence of validity indicators */ template <typename ValidityIterator> strings_column_wrapper(std::initializer_list<std::string> strings, ValidityIterator v) : strings_column_wrapper(std::cbegin(strings), std::cend(strings), v) { } /** * @brief Construct a nullable column of strings from a list of strings and * a list of booleans to indicate the validity of each string. * * Example: * @code{.cpp} * // Creates a nullable STRING column with 7 string elements: * // {NULL, "this", NULL, "a", NULL, "of", NULL} * strings_column_wrapper s({"", "this", "is", "a", "column", "of", "strings"}, * {0,1,0,1,0,1,0}); * @endcode * * @param strings The list of strings * @param validity The list of validity indicator booleans */ strings_column_wrapper(std::initializer_list<std::string> strings, std::initializer_list<bool> validity) : strings_column_wrapper(std::cbegin(strings), std::cend(strings), std::cbegin(validity)) { } /** * @brief Construct a nullable column from a list of pairs of strings * and validity booleans of each string. * * The validity of each string is determined by the boolean element in the pair * where `true` indicates the string is valid, and `false` indicates the * string is null. * * Example: * @code{.cpp} * // Creates a nullable STRING column with 7 string elements: * // {NULL, "this", NULL, "a", NULL, "of", NULL} * using p = std::pair<std::string, bool>; * strings_column_wrapper s( p{"", false}, p{"this", true}, p{"is", false}, * p{"a", true}, p{"column", false}, p{"of", true}, * p{"strings", false} ); * @endcode * * @param strings The list of pairs of strings and validity booleans */ strings_column_wrapper(std::initializer_list<std::pair<std::string, bool>> strings) { auto begin = thrust::make_transform_iterator(strings.begin(), [](auto const& s) { return s.first; }); auto end = begin + strings.size(); auto v = thrust::make_transform_iterator(strings.begin(), [](auto const& s) { return s.second; }); wrapped = strings_column_wrapper(begin, end, v).release(); } }; /** * @brief `column_wrapper` derived class for wrapping dictionary columns. * * This class handles fixed-width type keys. * * @tparam KeyElementTo Specify a fixed-width type for the key values of the dictionary * @tparam SourceElementTo For converting fixed-width values to the KeyElementTo */ template <typename KeyElementTo, typename SourceElementT = KeyElementTo> class dictionary_column_wrapper : public detail::column_wrapper { public: /** * @brief Cast to dictionary_column_view */ operator dictionary_column_view() const { return cudf::dictionary_column_view{wrapped->view()}; } /** * @brief Default constructor initializes an empty column with dictionary type. */ dictionary_column_wrapper() : column_wrapper{} { wrapped = cudf::make_empty_column(cudf::type_id::DICTIONARY32); } /** * @brief Construct a non-nullable dictionary column of the fixed-width elements in the * range `[begin,end)`. * * Example: * @code{.cpp} * // Creates a non-nullable dictionary column of INT32 elements with 5 elements * std::vector<int32_t> elements{0, 2, 2, 6, 6}; * dictionary_column_wrapper<int32_t> w(element.begin(), elements.end()); * // keys = {0, 2, 6}, indices = {0, 1, 1, 2, 2} * @endcode * * @note Similar to `std::vector`, this "range" constructor should be used * with parentheses `()` and not braces `{}`. The latter should only * be used for the `initializer_list` constructors. * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements */ template <typename InputIterator> dictionary_column_wrapper(InputIterator begin, InputIterator end) : column_wrapper{} { wrapped = cudf::dictionary::encode(fixed_width_column_wrapper<KeyElementTo, SourceElementT>(begin, end), cudf::data_type{type_id::UINT32}, cudf::test::get_default_stream()); } /** * @brief Construct a nullable dictionary column of the fixed-width elements in the range * `[begin,end)` using the range `[v, v + distance(begin,end))` interpreted * as booleans to indicate the validity of each element. * * If `v[i] == true`, element `i` is valid, else it is null. * * Example: * @code{.cpp} * // Creates a nullable dictionary column with 5 elements and a validity iterator. * std::vector<int32_t> elements{0, 2, 0, 6, 0}; * // Validity iterator here sets even rows to null. * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}) * dictionary_column_wrapper<int32_t> w(elements, elements + 5, validity); * // keys = {2, 6}, indices = {NULL, 0, NULL, 1, NULL} * @endcode * * @note Similar to `std::vector`, this "range" constructor should be used * with parentheses `()` and not braces `{}`. The latter should only * be used for the `initializer_list` constructors. * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param v The beginning of the sequence of validity indicators */ template <typename InputIterator, typename ValidityIterator> dictionary_column_wrapper(InputIterator begin, InputIterator end, ValidityIterator v) : column_wrapper{} { wrapped = cudf::dictionary::encode( fixed_width_column_wrapper<KeyElementTo, SourceElementT>(begin, end, v), cudf::data_type{type_id::UINT32}, cudf::test::get_default_stream()); } /** * @brief Construct a non-nullable dictionary column of fixed-width elements from an * initializer list. * * Example: * @code{.cpp} * // Creates a non-nullable dictionary column with 4 elements. * dictionary_column_wrapper<int32_t> w{{1, 2, 3, 1}}; * // keys = {1, 2, 3}, indices = {0, 1, 2, 0} * @endcode * * @param elements The list of elements */ template <typename ElementFrom> dictionary_column_wrapper(std::initializer_list<ElementFrom> elements) : dictionary_column_wrapper(std::cbegin(elements), std::cend(elements)) { } /** * @brief Construct a nullable dictionary column from a list of fixed-width elements * using another list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of * booleans where `true` indicates the element is valid, and `false` indicates * the element is null. * * Example: * @code{.cpp} * // Creates a nullable dictionary column with 4 elements and validity initializer. * dictionary_column_wrapper<int32_t> w{ {1, 0, 3, 0}, {1, 0, 1, 0}}; * // keys = {1, 3}, indices = {0, NULL, 1, NULL} * @endcode * * @param elements The list of elements * @param validity The list of validity indicator booleans */ template <typename ElementFrom> dictionary_column_wrapper(std::initializer_list<ElementFrom> elements, std::initializer_list<bool> validity) : dictionary_column_wrapper(std::cbegin(elements), std::cend(elements), std::cbegin(validity)) { } /** * @brief Construct a nullable dictionary column from a list of fixed-width elements and * the range `[v, v + element_list.size())` interpreted as booleans to * indicate the validity of each element. * * Example: * @code{.cpp} * // Creates a nullable dictionary column with 6 elements and a validity iterator. * // This validity iterator sets even rows to null. * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}) * dictionary_column_wrapper<int32_t> w{ {0, 4, 0, 4, 0, 5}, validity} * // keys = {4, 5}, indices = {NULL, 0, NULL, 0, NULL, 1} * @endcode * * @tparam ValidityIterator Dereferencing a ValidityIterator must be convertible to `bool` * @param element_list The list of elements * @param v The beginning of the sequence of validity indicators */ template <typename ValidityIterator, typename ElementFrom> dictionary_column_wrapper(std::initializer_list<ElementFrom> element_list, ValidityIterator v) : dictionary_column_wrapper(std::cbegin(element_list), std::cend(element_list), v) { } /** * @brief Construct a nullable dictionary column of the fixed-width elements in the range * `[begin,end)` using a validity initializer list to indicate the validity of each element. * * The validity of each element is determined by an `initializer_list` of * booleans where `true` indicates the element is valid, and `false` indicates * the element is null. * * Example: * @code{.cpp} * // Creates a nullable column of dictionary elements with 5 elements and validity initializer. * std::vector<int32_t> elements{0, 2, 2, 6, 6}; * dictionary_width_column_wrapper<int32_t> w(elements, elements + 5, {0, 1, 0, 1, 0}); * // keys = {2, 6}, indices = {NULL, 0, NULL, 1, NULL} * @endcode * * @param begin The beginning of the sequence of elements * @param end The end of the sequence of elements * @param validity The list of validity indicator booleans */ template <typename InputIterator> dictionary_column_wrapper(InputIterator begin, InputIterator end, std::initializer_list<bool> const& validity) : dictionary_column_wrapper(begin, end, std::cbegin(validity)) { } }; /** * @brief `column_wrapper` derived class for wrapping a dictionary column with string keys. * * This is a specialization of the `dictionary_column_wrapper` class for strings. */ template <> class dictionary_column_wrapper<std::string> : public detail::column_wrapper { public: /** * @brief Cast to dictionary_column_view * */ operator dictionary_column_view() const { return cudf::dictionary_column_view{wrapped->view()}; } /** * @brief Access keys column view * * @return column_view to keys column */ column_view keys() const { return cudf::dictionary_column_view{wrapped->view()}.keys(); } /** * @brief Access indices column view * * @return column_view to indices column */ column_view indices() const { return cudf::dictionary_column_view{wrapped->view()}.indices(); } /** * @brief Default constructor initializes an empty dictionary column of strings */ dictionary_column_wrapper() : dictionary_column_wrapper(std::initializer_list<std::string>{}) {} /** * @brief Construct a non-nullable dictionary column of strings from the range * `[begin,end)`. * * Values in the sequence `[begin,end)` will each be converted to *`std::string` and a dictionary column will be created by encoding the strings. * * Example: * @code{.cpp} * // Creates a non-nullable dictionary column with 7 string elements * std::vector<std::string> strings{"", "aaa", "bbb", "aaa", "bbb, "ccc", "bbb"}; * dictionary_column_wrapper<std::string> d(strings.begin(), strings.end()); * // keys = {"","aaa","bbb","ccc"}, indices = {0, 1, 2, 1, 2, 3, 2} * @endcode * * @tparam StringsIterator A `std::string` must be constructible from * dereferencing a `StringsIterator`. * @param begin The beginning of the sequence * @param end The end of the sequence */ template <typename StringsIterator> dictionary_column_wrapper(StringsIterator begin, StringsIterator end) : column_wrapper{} { wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end), cudf::data_type{type_id::UINT32}, cudf::test::get_default_stream()); } /** * @brief Construct a nullable dictionary column of strings from the range * `[begin,end)` using the range `[v, v + distance(begin,end))` interpreted * as booleans to indicate the validity of each string. * * Values in the sequence `[begin,end)` will each be converted to * `std::string` and a dictionary column will be created by encoding the strings. * * If `v[i] == true`, string `i` is valid, else it is treated as null row. * * Example: * @code{.cpp} * // Creates a nullable dictionary column with 7 strings elements and validity iterator. * std::vector<std::string> strings{"", "aaa", "", "aaa", "", "bbb", ""}; * // Validity iterator sets even rows to null. * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * dictionary_column_wrapper<std::string> d(strings.begin(), strings.end(), validity); * // keys = {"aaa", "bbb"}, indices = {NULL, 0, NULL, 0, NULL, 1, NULL} * @endcode * * @tparam StringsIterator A `std::string` must be constructible from * dereferencing a `StringsIterator`. * @tparam ValidityIterator Dereferencing a ValidityIterator must be * convertible to `bool` * @param begin The beginning of the sequence * @param end The end of the sequence * @param v The beginning of the sequence of validity indicators */ template <typename StringsIterator, typename ValidityIterator> dictionary_column_wrapper(StringsIterator begin, StringsIterator end, ValidityIterator v) : column_wrapper{} { wrapped = cudf::dictionary::encode(strings_column_wrapper(begin, end, v), cudf::data_type{type_id::UINT32}, cudf::test::get_default_stream()); } /** * @brief Construct a non-nullable dictionary column of strings from a list of strings. * * Example: * @code{.cpp} * // Creates a non-nullable dictionary column with 7 string elements. * dictionary_column_wrapper<std::string> d({"", "bb", "a", "bb", "a", "ccc", "a"}); * // keys = {"","a","bb","ccc"}, indices = {0, 2, 1, 2, 1, 3, 1} * @endcode * * @param strings The list of strings */ dictionary_column_wrapper(std::initializer_list<std::string> strings) : dictionary_column_wrapper(std::cbegin(strings), std::cend(strings)) { } /** * @brief Construct a nullable dictionary column of strings from a list of strings and * the range `[v, v + strings.size())` interpreted as booleans to indicate the * validity of each string. * * Example: * @code{.cpp} * // Creates a nullable dictionary column with 7 string elements and a validity iterator. * // Validity iterator here sets even rows to null. * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * dictionary_column_wrapper<std::string> d({"", "bb", "", "bb", "", "a", ""}, validity); * // keys = {"a", "bb"}, indices = {NULL, 1, NULL, 1, NULL, 0, NULL} * @endcode * * @tparam ValidityIterator Dereferencing a ValidityIterator must be convertible to `bool` * @param strings The list of strings * @param v The beginning of the sequence of validity indicators */ template <typename ValidityIterator> dictionary_column_wrapper(std::initializer_list<std::string> strings, ValidityIterator v) : dictionary_column_wrapper(std::cbegin(strings), std::cend(strings), v) { } /** * @brief Construct a nullable dictionary column of strings from a list of strings and * a list of booleans to indicate the validity of each string. * * Example: * @code{.cpp} * // Creates a nullable STRING column with 7 string elements and validity initializer. * dictionary_column_wrapper<std::string> ({"", "a", "", "bb", "", "ccc", ""}, * {0, 1, 0, 1, 0, 1, 0}); * // keys = {"a", "bb", "ccc"}, indices = {NULL, 0, NULL, 1, NULL, 2, NULL} * @endcode * * @param strings The list of strings * @param validity The list of validity indicator booleans */ dictionary_column_wrapper(std::initializer_list<std::string> strings, std::initializer_list<bool> validity) : dictionary_column_wrapper(std::cbegin(strings), std::cend(strings), std::cbegin(validity)) { } }; /** * @brief `column_wrapper` derived class for wrapping columns of lists. * * Important note : due to the way initializer lists work, there is a * non-obvious behavioral difference when declaring nested empty lists * in different situations. Specifically, * * - When compiled inside of a templated class function (such as a TYPED_TEST * cudf test wrapper), nested empty lists behave as they read, semantically. * * @code{.pseudo} * lists_column_wrapper<int> col{ {LCW{}} } * This yields a List<List<int>> column containing 1 row : a list * containing an empty list. * @endcode * * - When compiled under other situations (a global function, or a non * templated class function), the behavior is different. * * @code{.pseudo} * lists_column_wrapper<int> col{ {LCW{}} } * This yields a List<int> column containing 1 row that is an empty * list. * @endcode * * This only effects the initial nesting of the empty list. In summary, the * correct way to declare an "Empty List" in the two cases are: * * @code{.pseudo} * // situation 1 (cudf TYPED_TEST case) * LCW{} * // situation 2 (cudf TEST_F case) * {LCW{}} * @endcode */ template <typename T, typename SourceElementT = T> class lists_column_wrapper : public detail::column_wrapper { public: /** * @brief Cast to lists_column_view */ operator lists_column_view() const { return cudf::lists_column_view{wrapped->view()}; } /** * @brief Construct a lists column containing a single list of fixed-width * type from an initializer list of values. * * Example: * @code{.cpp} * Creates a LIST column with 1 list composed of 2 total integers * [{0, 1}] * lists_column_wrapper l{0, 1}; * @endcode * * @param elements The list of elements */ template <typename Element = T, std::enable_if_t<cudf::is_fixed_width<Element>()>* = nullptr> lists_column_wrapper(std::initializer_list<SourceElementT> elements) : column_wrapper{} { build_from_non_nested( std::move(cudf::test::fixed_width_column_wrapper<T, SourceElementT>(elements).release())); } /** * @brief Construct a lists column containing a single list of fixed-width * type from an iterator range. * * Example: * @code{.cpp} * // Creates a LIST column with 1 list composed of 5 total integers * auto elements = make_counting_transform_iterator(0, [](auto i){return i*2;}); * // [{0, 1, 2, 3, 4}] * lists_column_wrapper l(elements, elements+5); * @endcode * * @param begin Beginning of the sequence * @param end End of the sequence */ template <typename Element = T, typename InputIterator, std::enable_if_t<cudf::is_fixed_width<Element>()>* = nullptr> lists_column_wrapper(InputIterator begin, InputIterator end) : column_wrapper{} { build_from_non_nested( std::move(cudf::test::fixed_width_column_wrapper<T, SourceElementT>(begin, end).release())); } /** * @brief Construct a lists column containing a single list of fixed-width * type from an initializer list of values and a validity iterator. * * Example: * @code{.cpp} * // Creates a LIST column with 1 lists composed of 2 total integers * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * // [{0, NULL}] * lists_column_wrapper l{{0, 1}, validity}; * @endcode * * @param elements The list of elements * @param v The validity iterator */ template <typename Element = T, typename ValidityIterator, std::enable_if_t<cudf::is_fixed_width<Element>()>* = nullptr> lists_column_wrapper(std::initializer_list<SourceElementT> elements, ValidityIterator v) : column_wrapper{} { build_from_non_nested( std::move(cudf::test::fixed_width_column_wrapper<T, SourceElementT>(elements, v).release())); } /** * @brief Construct a lists column containing a single list of fixed-width * type from an iterator range and a validity iterator. * * Example: * @code{.cpp} * // Creates a LIST column with 1 lists composed of 5 total integers * auto elements = make_counting_transform_iterator(0, [](auto i){return i*2;}); * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * // [{0, NULL, 2, NULL, 4}] * lists_column_wrapper l(elements, elements+5, validity); * @endcode * * @param begin Beginning of the sequence * @param end End of the sequence * @param v The validity iterator */ template <typename Element = T, typename InputIterator, typename ValidityIterator, std::enable_if_t<cudf::is_fixed_width<Element>()>* = nullptr> lists_column_wrapper(InputIterator begin, InputIterator end, ValidityIterator v) : column_wrapper{} { build_from_non_nested(std::move( cudf::test::fixed_width_column_wrapper<T, SourceElementT>(begin, end, v).release())); } /** * @brief Construct a lists column containing a single list of strings * from an initializer list of values. * * Example: * @code{.cpp} * // Creates a LIST column with 1 list composed of 2 total strings * // [{"abc", "def"}] * lists_column_wrapper l{"abc", "def"}; * @endcode * * @param elements The list of elements */ template <typename Element = T, std::enable_if_t<std::is_same_v<Element, cudf::string_view>>* = nullptr> lists_column_wrapper(std::initializer_list<std::string> elements) : column_wrapper{} { build_from_non_nested( std::move(cudf::test::strings_column_wrapper(elements.begin(), elements.end()).release())); } /** * @brief Construct a lists column containing a single list of strings * from an initializer list of values and a validity iterator. * * Example: * @code{.cpp} * // Creates a LIST column with 1 list composed of 2 total strings * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * // [{"abc", NULL}] * lists_column_wrapper l{{"abc", "def"}, validity}; * @endcode * * @param elements The list of elements * @param v The validity iterator */ template <typename Element = T, typename ValidityIterator, std::enable_if_t<std::is_same_v<Element, cudf::string_view>>* = nullptr> lists_column_wrapper(std::initializer_list<std::string> elements, ValidityIterator v) : column_wrapper{} { build_from_non_nested( std::move(cudf::test::strings_column_wrapper(elements.begin(), elements.end(), v).release())); } /** * @brief Construct a lists column of nested lists from an initializer list of values. * * Example: * @code{.cpp} * // Creates a LIST column with 3 lists * // [{0, 1}, {2, 3}, {4, 5}] * lists_column_wrapper l{ {0, 1}, {2, 3}, {4, 5} }; * @endcode * * Automatically handles nesting * Example: * @code{.cpp} * // Creates a LIST of LIST columns with 2 lists on the top level and * // 4 below * // [ {{0, 1}, {2, 3}}, {{4, 5}, {6, 7}} ] * lists_column_wrapper l{ {{0, 1}, {2, 3}}, {{4, 5}, {6, 7}} }; * @endcode * * @param elements The list of elements */ lists_column_wrapper(std::initializer_list<lists_column_wrapper<T, SourceElementT>> elements) : column_wrapper{} { std::vector<bool> valids; build_from_nested(elements, valids); } /** * @brief Construct am empty lists column * * Example: * @code{.cpp} * // Creates an empty LIST column * // [] * lists_column_wrapper l{}; * @endcode * */ lists_column_wrapper() : column_wrapper{} { build_from_non_nested(make_empty_column(cudf::type_to_id<T>())); } /** * @brief Construct a lists column of nested lists from an initializer list of values * and a validity iterator. * * Example: * @code{.cpp} * // Creates a LIST column with 3 lists * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * // [{0, 1}, NULL, {4, 5}] * lists_column_wrapper l{ {{0, 1}, {2, 3}, {4, 5}, validity} }; * @endcode * * Automatically handles nesting * Example: * @code{.cpp} * // Creates a LIST of LIST columns with 2 lists on the top level and * // 4 below * auto validity = make_counting_transform_iterator(0, [](auto i){return i%2;}); * // [ {{0, 1}, NULL}, {{4, 5}, NULL} ] * lists_column_wrapper l{ {{{0, 1}, {2, 3}}, validity}, {{{4, 5}, {6, 7}}, validity} }; * @endcode * * @param elements The list of elements * @param v The validity iterator */ template <typename ValidityIterator> lists_column_wrapper(std::initializer_list<lists_column_wrapper<T, SourceElementT>> elements, ValidityIterator v) : column_wrapper{} { std::vector<bool> validity; std::transform(elements.begin(), elements.end(), v, std::back_inserter(validity), [](lists_column_wrapper const& l, bool valid) { return valid; }); build_from_nested(elements, validity); } /** * @brief Construct a list column containing a single empty, optionally null row. * * @param valid Whether or not the empty row is also null * @return A list column containing a single empty row */ static lists_column_wrapper<T> make_one_empty_row_column(bool valid = true) { cudf::test::fixed_width_column_wrapper<cudf::size_type> offsets{0, 0}; cudf::test::fixed_width_column_wrapper<int> values{}; return lists_column_wrapper<T>( 1, offsets.release(), values.release(), valid ? 0 : 1, valid ? rmm::device_buffer{} : cudf::create_null_mask(1, cudf::mask_state::ALL_NULL)); } private: /** * @brief Construct a list column from constituent parts. * * @param num_rows The number of lists the column represents * @param offsets The column of offset values for this column * @param values The column of values bounded by the offsets * @param null_count The number of null list entries * @param null_mask The bits specifying the null lists in device memory */ lists_column_wrapper(size_type num_rows, std::unique_ptr<cudf::column>&& offsets, std::unique_ptr<cudf::column>&& values, size_type null_count, rmm::device_buffer&& null_mask) { // construct the list column wrapped = make_lists_column(num_rows, std::move(offsets), std::move(values), null_count, std::move(null_mask), cudf::test::get_default_stream()); } /** * @brief Initialize as a nested list column composed of other list columns. * * This function handles a special case. For convenience of declaration, we want to treat these * two cases as equivalent * * List<int> = { 0, 1 } * List<int> = { {0, 1} } * * while at the same time, allowing further nesting * List<List<int> = { {{0, 1}} } * * @param c Input column to be wrapped * */ void build_from_nested(std::initializer_list<lists_column_wrapper<T, SourceElementT>> elements, std::vector<bool> const& v) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [&v](auto i) { return v.empty() ? true : v[i]; }); // compute the expected hierarchy and depth auto const hierarchy_and_depth = std::accumulate(elements.begin(), elements.end(), std::pair<column_view, int32_t>{{}, -1}, [](auto acc, lists_column_wrapper const& lcw) { return lcw.depth > acc.second ? std::pair(lcw.get_view(), lcw.depth) : acc; }); column_view expected_hierarchy = hierarchy_and_depth.first; int32_t const expected_depth = hierarchy_and_depth.second; // preprocess columns so that every column_view in 'cols' is an equivalent hierarchy auto [cols, stubs] = preprocess_columns(elements, expected_hierarchy, expected_depth); // generate offsets size_type count = 0; std::vector<size_type> offsetv; std::transform(cols.cbegin(), cols.cend(), valids, std::back_inserter(offsetv), [&](cudf::column_view const& col, bool valid) { // nulls are represented as a repeated offset size_type ret = count; if (valid) { count += col.size(); } return ret; }); // add the final offset offsetv.push_back(count); auto offsets = cudf::test::fixed_width_column_wrapper<size_type>(offsetv.begin(), offsetv.end()).release(); // concatenate them together, skipping children that are null. std::vector<column_view> children; thrust::copy_if( std::cbegin(cols), std::cend(cols), valids, std::back_inserter(children), thrust::identity{}); auto data = children.empty() ? cudf::empty_like(expected_hierarchy) : cudf::concatenate(children, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()); // increment depth depth = expected_depth + 1; auto [null_mask, null_count] = [&] { if (v.size() <= 0) return std::make_pair(rmm::device_buffer{}, cudf::size_type{0}); return cudf::test::detail::make_null_mask(v.begin(), v.end()); }(); // construct the list column wrapped = make_lists_column(cols.size(), std::move(offsets), std::move(data), null_count, std::move(null_mask), cudf::test::get_default_stream()); } /** * @brief Initialize as a "root" list column from a non-list input column. Root columns * will be "unwrapped" when used in the nesting (list of lists) case. * * @param c Input column to be wrapped * */ void build_from_non_nested(std::unique_ptr<column> c) { CUDF_EXPECTS(c->type().id() == type_id::EMPTY || !cudf::is_nested(c->type()), "Unexpected type"); std::vector<size_type> offsetv; if (c->size() > 0) { offsetv.push_back(0); offsetv.push_back(c->size()); } auto offsets = cudf::test::fixed_width_column_wrapper<size_type>(offsetv.begin(), offsetv.end()).release(); // construct the list column. mark this as a root root = true; depth = 0; size_type num_elements = offsets->size() == 0 ? 0 : offsets->size() - 1; wrapped = make_lists_column(num_elements, std::move(offsets), std::move(c), 0, rmm::device_buffer{}, cudf::test::get_default_stream()); } /** * @brief Given an input column that may be an "incomplete hierarchy" due to being empty * at a level before the leaf, normalize it so that it matches the expected hierarchy of * sibling columns. * * cudf functions that handle lists expect that all columns are fully formed hierarchies, * even if they are empty somewhere in the middle of the hierarchy. * If we had the following lists_column_wrapper<int> declaration: * * @code{.pseudo} * [ {{{1, 2, 3}}}, {} ] * Row 0 in this case is a List<List<List<int>>>, where row 1 appears to be just a List<>. * @endcode * * These two columns will end up getting passed to cudf::concatenate() to merge. But * concatenate() will throw an exception because row 1 will appear to have a child type * of nothing, while row 0 will appear to have a child type of List<List<int>>. * To handle this cleanly, we want to "normalize" row 1 so that it appears as a * List<List<List<int>>> column even though it has 0 elements at the top level. * * This function also detects the case where the user has constructed a truly invalid * pair of columns, such as * * @code{.pseudo} * [ {{{1, 2, 3}}}, {4, 5} ] * Row 0 in this case is a List<List<List<int>>>, and row 1 is a concrete List<int> with * elements. This is purely an invalid way of constructing a lists column. * @endcode * * @param col Input column to be normalized * @param expected_hierarchy Input column which represents the expected hierarchy * * @return A new column representing a normalized copy of col */ std::unique_ptr<column> normalize_column(column_view const& col, column_view const& expected_hierarchy) { // if are at the bottom of the short column, it must be empty if (col.type().id() != type_id::LIST) { CUDF_EXPECTS(col.is_empty(), "Encountered mismatched column!"); auto remainder = empty_like(expected_hierarchy); return remainder; } lists_column_view lcv(col); return make_lists_column( col.size(), std::make_unique<column>(lcv.offsets()), normalize_column(lists_column_view(col).child(), lists_column_view(expected_hierarchy).child()), col.null_count(), cudf::detail::copy_bitmask( col, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()), cudf::test::get_default_stream()); } std::pair<std::vector<column_view>, std::vector<std::unique_ptr<column>>> preprocess_columns( std::initializer_list<lists_column_wrapper<T, SourceElementT>> const& elements, column_view& expected_hierarchy, int expected_depth) { std::vector<std::unique_ptr<column>> stubs; std::vector<column_view> cols; // preprocess the incoming lists. // - unwrap any "root" lists // - handle incomplete hierarchies std::transform(elements.begin(), elements.end(), std::back_inserter(cols), [&](lists_column_wrapper const& l) -> column_view { // depth mismatch. attempt to normalize the short column. // this function will also catch if this is a legitimately broken // set of input if (l.depth < expected_depth) { if (l.root) { // this exception distinguishes between the following two cases: // // { {{{1, 2, 3}}}, {} } // In this case, row 0 is a List<List<List<int>>>, whereas row 1 is // just a List<> which is an apparent mismatch. However, because row 1 // is empty we will allow that to semantically mean // "a List<List<List<int>>> that's empty at the top level" // // { {{{1, 2, 3}}}, {4, 5, 6} } // In this case, row 1 is a concrete List<int> with actual values. // There is no way to rectify the differences so we will treat it as a // true column mismatch. CUDF_EXPECTS(l.wrapped->size() == 0, "Mismatch in column types!"); stubs.push_back(empty_like(expected_hierarchy)); } else { stubs.push_back(normalize_column(l.get_view(), expected_hierarchy)); } return *(stubs.back()); } // the empty hierarchy case return l.get_view(); }); return {std::move(cols), std::move(stubs)}; } column_view get_view() const { return root ? lists_column_view(*wrapped).child() : *wrapped; } int depth = 0; bool root = false; }; /** * @brief `column_wrapper` derived class for wrapping columns of structs. */ class structs_column_wrapper : public detail::column_wrapper { public: /** * @brief Constructs a struct column from the specified list of pre-constructed child columns. * * The child columns are "adopted" by the struct column constructed here. * * Example usage: * @code{.cpp} * // The following constructs a column for struct< int, string >. * auto child_int_col = fixed_width_column_wrapper<int32_t>{ 1, 2, 3, 4, 5 }.release(); * auto child_string_col = string_column_wrapper {"All", "the", "leaves", "are", * "brown"}.release(); * * std::vector<std::unique_ptr<column>> child_columns; * child_columns.push_back(std::move(child_int_col)); * child_columns.push_back(std::move(child_string_col)); * * struct_column_wrapper struct_column_wrapper{ * child_cols, * {1,0,1,0,1} // Validity. * }; * * auto struct_col {struct_column_wrapper.release()}; * @endcode * * @param child_columns The vector of pre-constructed child columns * @param validity The vector of bools representing the column validity values */ structs_column_wrapper(std::vector<std::unique_ptr<cudf::column>>&& child_columns, std::vector<bool> const& validity = {}) { init(std::move(child_columns), validity); } /** * @brief Constructs a struct column from the list of column wrappers for child columns. * * Example usage: * @code{.cpp} * // The following constructs a column for struct< int, string >. * fixed_width_column_wrapper<int32_t> child_int_col_wrapper{ 1, 2, 3, 4, 5 }; * string_column_wrapper child_string_col_wrapper {"All", "the", "leaves", "are", "brown"}; * * struct_column_wrapper struct_column_wrapper{ * {child_int_col_wrapper, child_string_col_wrapper} * {1,0,1,0,1} // Validity. * }; * * auto struct_col {struct_column_wrapper.release()}; * @endcode * * @param child_column_wrappers The list of child column wrappers * @param validity The vector of bools representing the column validity values */ structs_column_wrapper( std::initializer_list<std::reference_wrapper<detail::column_wrapper>> child_column_wrappers, std::vector<bool> const& validity = {}) { std::vector<std::unique_ptr<cudf::column>> child_columns; child_columns.reserve(child_column_wrappers.size()); std::transform(child_column_wrappers.begin(), child_column_wrappers.end(), std::back_inserter(child_columns), [&](auto const& column_wrapper) { return std::make_unique<cudf::column>(column_wrapper.get(), cudf::test::get_default_stream()); }); init(std::move(child_columns), validity); } /** * @brief Constructs a struct column from the list of column wrappers for child columns. * * Example usage: * @code{.cpp} * // The following constructs a column for struct< int, string >. * fixed_width_column_wrapper<int32_t> child_int_col_wrapper{ 1, 2, 3, 4, 5 }; * string_column_wrapper child_string_col_wrapper {"All", "the", "leaves", "are", "brown"}; * * struct_column_wrapper struct_column_wrapper{ * {child_int_col_wrapper, child_string_col_wrapper} * cudf::detail::make_counting_transform_iterator(0, [](auto i){ return i%2; }) // Validity. * }; * * auto struct_col {struct_column_wrapper.release()}; * @endcode * * @param child_column_wrappers The list of child column wrappers * @param validity_iter Iterator returning the per-row validity bool */ template <typename V> structs_column_wrapper( std::initializer_list<std::reference_wrapper<detail::column_wrapper>> child_column_wrappers, V validity_iter) { std::vector<std::unique_ptr<cudf::column>> child_columns; child_columns.reserve(child_column_wrappers.size()); std::transform(child_column_wrappers.begin(), child_column_wrappers.end(), std::back_inserter(child_columns), [&](auto const& column_wrapper) { return std::make_unique<cudf::column>(column_wrapper.get(), cudf::test::get_default_stream()); }); init(std::move(child_columns), validity_iter); } private: void init(std::vector<std::unique_ptr<cudf::column>>&& child_columns, std::vector<bool> const& validity) { size_type num_rows = child_columns.empty() ? 0 : child_columns[0]->size(); CUDF_EXPECTS(std::all_of(child_columns.begin(), child_columns.end(), [&](auto const& p_column) { return p_column->size() == num_rows; }), "All struct member columns must have the same row count."); CUDF_EXPECTS(validity.size() <= 0 || static_cast<size_type>(validity.size()) == num_rows, "Validity buffer must have as many elements as rows in the struct column."); auto [null_mask, null_count] = [&] { if (validity.size() <= 0) return std::make_pair(rmm::device_buffer{}, cudf::size_type{0}); return cudf::test::detail::make_null_mask(validity.begin(), validity.end()); }(); wrapped = cudf::make_structs_column(num_rows, std::move(child_columns), null_count, std::move(null_mask), cudf::test::get_default_stream()); } template <typename V> void init(std::vector<std::unique_ptr<cudf::column>>&& child_columns, V validity_iterator) { size_type const num_rows = child_columns.empty() ? 0 : child_columns[0]->size(); CUDF_EXPECTS(std::all_of(child_columns.begin(), child_columns.end(), [&](auto const& p_column) { return p_column->size() == num_rows; }), "All struct member columns must have the same row count."); std::vector<bool> validity(num_rows); std::copy(validity_iterator, validity_iterator + num_rows, validity.begin()); init(std::move(child_columns), validity); } }; } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/type_list_utilities.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "cudf_gtest.hpp" /** * @file type_list_utilities.hpp * @brief Utilities for creating type lists for typed tests in Google Test * * A "type list" is a list of types passed to a Google Test type-parameterized * test suite. The set of tests in the suite will be invoked once for each type * in the list. Normally, this is done by using the `testing::Types` `struct` * provided by GTest. For example, * * ``` * using TestTypes = ::testing::Types<int, char, float>; * * template <class T> * class TestFixture : ::testing::Test { }; * * TYPED_TEST_SUITE(TestFixture, TestTypes); * * TYPED_TEST(TestFixture, mytest){ * using Type0 = GetType<TypeParam,0>; // the first type element * } * ``` * * The test `mytest` will be invoked 3 times, once for each of the types `int, * char, float`. * * Instead of using \::testing::Types directly, we provide * `cudf::test::Types`. This is a drop in replacement for GTest's * \::testing::Types. In lieu of including `gtest/gtest.h`, include * `cudf_gtest.hpp` to ensure `cudf::test::Types` is used. * * Using the utilities in this file, you can compose complex type lists. * * For example, `CrossProduct` may be used to compute the cross-product of two or * more type lists: * * ``` * using TestTypes = CrossProduct<Types<int,float>,Types<char, void*>>; * // TestTypes == Types< <int,char> <int,void*> <float,char> <float,void*> > * ``` * RemoveIf can be used to remove some parameters that match a given predicate: * * ``` * using TestTypes = RemoveIf<AllSame, CrossProduct<Types<int,char>, *Types<int,char>>>; * // TestTypes == Types< <int,char>,<char,int> > * ``` * * @note WARNING: Abusing and overusing these utilities can lead to dramatically * increased compile-times. Use responsibly. */ namespace cudf { namespace test { // Utilities for creating parameters for typed tests on GoogleTest // // Types is used to define type list, it's just an alias to ::testing::Types: // using Types = util::Types<int,char,float>; // // Types ----------------------------------------- using ::testing::Types; // @cond template <class T, int D> struct GetTypeImpl { static_assert(D == 0, "Out of bounds"); using type = T; }; template <class... T, int D> struct GetTypeImpl<Types<T...>, D> { static_assert(D < sizeof...(T), "Out of bounds"); using raw_type = decltype(std::get<D>(std::declval<std::tuple<T...>>())); using type = std::decay_t<raw_type>; }; template <class T, class... ARGS> struct GetTypeImpl<Types<T, ARGS...>, 0> { using type = T; }; // @endcond /** * @brief Gives the specified type from a type list * * Example: * ``` * using T = GetType< Types<int, float, char, void*>, 2> * // T == char * ``` * * @tparam TUPLE The type list * @tparam D Index of the desired type */ template <class TUPLE, int D> using GetType = typename GetTypeImpl<TUPLE, D>::type; // GetSize ------------------------------- // @cond template <class TUPLE> struct GetSizeImpl; template <class... TYPES> struct GetSizeImpl<Types<TYPES...>> { static constexpr auto value = sizeof...(TYPES); }; // @endcond /** * @brief Returns the size (number of elements) in a type list * * Example: * ``` * GetSize< Types<int, float, double, void*> == 4 * ``` */ template <class TUPLE> constexpr auto GetSize = GetSizeImpl<TUPLE>::value; // Concat ----------------------------------------- // @cond namespace detail { template <class A, class B> struct Concat2; template <class... T, class... U> struct Concat2<Types<T...>, Types<U...>> { using type = Types<T..., U...>; }; } // namespace detail template <class... T> struct ConcatImpl; template <class HEAD1, class HEAD2, class... TAIL> struct ConcatImpl<HEAD1, HEAD2, TAIL...> { using type = typename ConcatImpl<typename detail::Concat2<HEAD1, HEAD2>::type, TAIL...>::type; }; template <class A> struct ConcatImpl<A> { using type = A; }; template <class... A> struct ConcatImpl<Types<A...>> { using type = Types<A...>; }; template <> struct ConcatImpl<> { using type = Types<>; }; // @endcond /** * @brief Concatenates compile-time lists of types into a single type list. * * Example: * ``` * using MyTypes = Concat< Types<int, float>, Types<char, double>> * // MyTypes == Types<int, float, char, double>; * ``` */ template <class... T> using Concat = typename ConcatImpl<T...>::type; // Flatten ----------------------------------------- // @cond template <class T> struct FlattenImpl; template <> struct FlattenImpl<Types<>> { using type = Types<>; }; template <class HEAD, class... TAIL> struct FlattenImpl<Types<HEAD, TAIL...>> { using type = Concat<Types<HEAD>, typename FlattenImpl<Types<TAIL...>>::type>; }; template <class... HEAD, class... TAIL> struct FlattenImpl<Types<Types<HEAD...>, TAIL...>> { using type = typename FlattenImpl<Types<HEAD..., TAIL...>>::type; }; // @endcond /** * @brief Flattens nested compile-time lists of types into a single list of *types. * * Example: * ``` * // Flatten< Types< int, Types< double, Types<char> > > == Types<int, double, *char> static_assert(std::is_same<Flatten<Types<Types<int, Types<double>>, *float>>, Types<int, double, float>>::value, ""); * ``` */ template <class T> using Flatten = typename FlattenImpl<T>::type; // CrossProduct ----------------------------------------- // @cond namespace detail { // prepend T in TUPLE template <class T, class TUPLE> struct Prepend1; template <class T, class... ARGS> struct Prepend1<T, Types<ARGS...>> { using type = Flatten<Types<T, ARGS...>>; }; template <class T, class TUPLES> struct Prepend; // Prepend T in all TUPLES template <class T, class... TUPLES> struct Prepend<T, Types<TUPLES...>> { using type = Types<typename Prepend1<T, TUPLES>::type...>; }; // skip empty tuples template <class T, class... TUPLES> struct Prepend<T, Types<Types<>, TUPLES...>> : Prepend<T, Types<TUPLES...>> {}; } // namespace detail template <class... ARGS> struct CrossProductImpl; template <> struct CrossProductImpl<> { using type = Types<>; }; template <class... ARGS> struct CrossProductImpl<Types<ARGS...>> { using type = Types<Types<ARGS>...>; }; template <class... AARGS, class... TAIL> struct CrossProductImpl<Types<AARGS...>, TAIL...> { using type = Concat<typename detail::Prepend<AARGS, typename CrossProductImpl<TAIL...>::type>::type...>; }; // to make it easy for the user when there's only one element to be joined template <class T, class... TAIL> struct CrossProductImpl<T, TAIL...> : CrossProductImpl<Types<T>, TAIL...> {}; // @endcond /** * @brief Creates a new type list from the cross product (cartesian product) of * two type lists. * * @note This should be used with caution, as it can easily lead to a large * number of typed test cases. For example, computing the `CrossProduct` of two type * lists of size `n` and `m`, the resulting list will have `n*m` types. * * Example: * ``` * using Types = CrossProduct<Types<int,float>, Types<char, double>>; * // Types == Types< Types<int, char>, Types<int, double>, Types<float, char>, * Types<float, double> > * ``` */ template <class... ARGS> using CrossProduct = typename CrossProductImpl<ARGS...>::type; // AllSame ----------------------------------------- // @cond namespace detail { template <class... ITEMS> struct AllSame : std::false_type {}; // degenerate case template <class A> struct AllSame<A> : std::true_type {}; template <class A> struct AllSame<A, A> : std::true_type {}; template <class HEAD, class... TAIL> struct AllSame<HEAD, HEAD, TAIL...> : AllSame<HEAD, TAIL...> {}; template <class... ITEMS> struct AllSame<Types<ITEMS...>> : AllSame<ITEMS...> {}; } // namespace detail // @endcond /** * @brief Indicates if all types in a list are identical. * * This is useful as a predicate for `RemoveIf`. * * Example: * ``` * // AllSame::Call<Types<int, int, int>> == true_type * // AllSame::Call<Types<float, bool>> == false_type * * // Used as a predicate * RemoveIf<AllSame, Types<Types<int, int, int>>> == Types<> * RemoveIf<AllSame, Types<Types<int, float, int>>> == Types<Types<int, float, *int>> * ``` */ struct AllSame { /** * @brief Invoked as predicate for RemoveIf * * @tparam ITEMS The type to check if they are all same */ template <class... ITEMS> using Call = detail::AllSame<ITEMS...>; }; // Exists --------------------------------- // @cond // Do a linear search to find NEEDLE in HAYSACK template <class NEEDLE, class HAYSACK> struct ExistsImpl; // end case, no more types to check template <class NEEDLE> struct ExistsImpl<NEEDLE, Types<>> : std::false_type {}; // next one matches template <class NEEDLE, class... TAIL> struct ExistsImpl<NEEDLE, Types<NEEDLE, TAIL...>> : std::true_type {}; // next one doesn't match template <class NEEDLE, class HEAD, class... TAIL> struct ExistsImpl<NEEDLE, Types<HEAD, TAIL...>> : ExistsImpl<NEEDLE, Types<TAIL...>> {}; // @endcond /** * @brief Indicates if a type exists within a type list. * * Example: * ``` * // Exists<int, Types<float, double, int>> == true_type * // Exists<char, Types<int, float, void*>> == false_type * ``` * @tparam NEEDLE The type to search for * @tparam HAYSACK The list to search in */ template <class NEEDLE, class HAYSACK> constexpr bool Exists = ExistsImpl<NEEDLE, HAYSACK>::value; // ContainedIn ----------------------------------------- /** * @brief Indicates if a type exists within a type list. * * Used as a predicate for RemoveIf * * Example: * ``` * ContainedIn<Types<Types<int, char>>>::Call<Types<int, char>>::value == *true_type ContainedIn<Types<Types<int, char>>>::Call<Types<int, float>>::value *== false_type * * // Used as a predicate * using MyTypes = RemoveIf<ContainedIn<Types<Types<char, char>>>, * Types<Types<char, char>, Types<float,int>>>; * // MyTypes == Types<float, int> * * ``` * * @tparam HAYSACK The type list to search */ template <class HAYSACK> struct ContainedIn { /** * @brief Invoked as predicate for RemoveIf * * @tparam NEEDLE The type to search for */ template <class NEEDLE> using Call = ExistsImpl<NEEDLE, HAYSACK>; }; // RemoveIf ----------------------------------------- // @cond template <class PRED, class TUPLE> struct RemoveIfImpl; template <class PRED> struct RemoveIfImpl<PRED, Types<>> { using type = Types<>; }; template <class PRED, class HEAD, class... TAIL> struct RemoveIfImpl<PRED, Types<HEAD, TAIL...>> { using type = Concat<typename std::conditional<PRED::template Call<HEAD>::value, Types<>, Types<HEAD>>::type, typename RemoveIfImpl<PRED, Types<TAIL...>>::type>; }; // @endcond /** * @brief Removes types from a type list that satisfy a predicate * * Possible predicates: `AllSame`, `ContainedIn` * * Example: * ``` * RemoveIf<AllSame, Types<Types<int, int, int>>> == Types<> * RemoveIf<AllSame, Types<Types<int, float, int>>> == Types<Types<int, float, *int>> * * using MyTypes = RemoveIf<ContainedIn<Types<Types<char, char>>>, * Types<Types<char, char>, Types<float,int>>>; * // MyTypes == Types<float, int> * ``` * * @tparam PRED The predicate * @tparam TUPLE The list of types on which to apply the predicate */ template <class PRED, class TUPLE> using RemoveIf = typename RemoveIfImpl<PRED, TUPLE>::type; // Transform -------------------------------- // @cond template <class XFORM, class TYPES> struct TransformImpl; template <class XFORM, class... ITEMS> struct TransformImpl<XFORM, Types<ITEMS...>> { using type = Types<typename XFORM::template Call<ITEMS>...>; }; // @endcond /** * @brief Applies a transformation to every type in a type list * * Possible transformations: Repeat * * Example: * ``` * // Repeat transformation repeats each type for a specified count * using MyTypes = Transform<Repeat<2>, Types<int, float>>; * // MyTypes == Types< Types<int, int>, Types<float, float>>); * ``` * * @tparam XFORM The transformation to apply * @tparam TYPES The list of types to transform */ template <class XFORM, class TYPES> using Transform = typename TransformImpl<XFORM, TYPES>::type; // Repeat -------------------------------- // @cond namespace detail { template <class T, int N, class RES> struct Repeat; template <class T, int N, class... ITEMS> struct Repeat<T, N, Types<ITEMS...>> { using type = typename Repeat<T, N - 1, Types<T, ITEMS...>>::type; }; template <class T, class... ITEMS> struct Repeat<T, 0, Types<ITEMS...>> { using type = Types<ITEMS...>; }; } // namespace detail // @endcond /** * @brief Transformation that repeats a type for a specified count. * * Used in Transform. * * Example: * ``` * // Repeat transformation repeats each type for a specified count * using MyTypes = Transform<Repeat<2>, Types<int, float>>; * // MyTypes == Types< Types<int, int>, Types<float, float>>); * ``` * * @tparam N The number of times to repeat the type */ template <int N> struct Repeat { /** * @brief Invoked as predicate for Transform * * @tparam T The type to repeat */ template <class T> using Call = typename detail::Repeat<T, N, Types<>>::type; }; // Append -------------------------------- // @cond template <class TYPES, class... ITEMS> struct AppendImpl; template <class... HEAD, class... TAIL> struct AppendImpl<Types<HEAD...>, TAIL...> { using type = Types<HEAD..., TAIL...>; }; // @endcond /** * @brief Appends types to a type list * * Example: * ``` * using MyTypes = Append<Types<int>, float, char>; * MyTypes == Types<int, float, char>; * ``` * * @tparam TYPES The type list to append to * @tparam ITEMS The types to append */ template <class TYPES, class... ITEMS> using Append = typename AppendImpl<TYPES, ITEMS...>::type; // Remove ------------------------------------------- // remove items from tuple given by their indices // @cond namespace detail { template <class TUPLE, int CUR, int... IDXs> struct Remove; // nothing else to do? template <class... ITEMS, int CUR> struct Remove<Types<ITEMS...>, CUR> { using type = Types<ITEMS...>; }; // index match current item? template <class HEAD, class... TAIL, int CUR, int... IDXTAIL> struct Remove<Types<HEAD, TAIL...>, CUR, CUR, IDXTAIL...> { // remove it, and recurse into the remaining items using type = typename Remove<Types<TAIL...>, CUR + 1, IDXTAIL...>::type; }; // index doesn't match current item? template <class HEAD, class... TAIL, int CUR, int IDXHEAD, int... IDXTAIL> struct Remove<Types<HEAD, TAIL...>, CUR, IDXHEAD, IDXTAIL...> { static_assert(sizeof...(TAIL) + 1 > IDXHEAD - CUR, "Index out of bounds"); // add current item to output and recurse into the remaining items using type = Concat<Types<HEAD>, typename Remove<Types<TAIL...>, CUR + 1, IDXHEAD, IDXTAIL...>::type>; }; } // namespace detail template <class TUPLE, int... IDXs> struct RemoveImpl { using type = typename detail::Remove<TUPLE, 0, IDXs...>::type; }; // @endcond /** * @brief Removes types at specified indices from a type list. * * @tparam TUPLE Type list to remove types from * @tparam IDXs Indices of types to remove */ template <class TUPLE, int... IDXs> using Remove = typename RemoveImpl<TUPLE, IDXs...>::type; // Unique -------------------------------- // @cond namespace detail { template <class... ITEMS> struct Unique; template <> struct Unique<> { using type = Types<>; }; template <class HEAD, class... TAIL> struct Unique<HEAD, TAIL...> { using type = Concat<std::conditional_t<Exists<HEAD, Types<TAIL...>>, Types<>, Types<HEAD>>, typename Unique<TAIL...>::type>; }; } // namespace detail template <class TYPES> struct UniqueImpl; template <class... ITEMS> struct UniqueImpl<Types<ITEMS...>> { using type = typename detail::Unique<ITEMS...>::type; }; // @endcond /** * @brief Removes duplicate types from a type list * * Example: * ``` * using MyTypes = Unique<Types<int, float, int, float>>; * MyTypes == Types<int, float>) * ``` * * @tparam TYPES The type list from which to remove duplicates */ template <class TYPES> using Unique = typename UniqueImpl<TYPES>::type; } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/iterator_utilities.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/iterator.cuh> #include <cudf/types.hpp> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <iterator> namespace cudf { namespace test { namespace iterators { /** * @brief Bool iterator for marking (possibly multiple) null elements in a column_wrapper. * * The returned iterator yields `false` (to mark `null`) at all the specified indices, * and yields `true` (to mark valid rows) for all other indices. E.g. * * @code * auto indices = std::vector<size_type>{8,9}; * auto iter = nulls_at(indices.cbegin(), indices.end()); * iter[6] == true; // i.e. Valid row at index 6. * iter[7] == true; // i.e. Valid row at index 7. * iter[8] == false; // i.e. Invalid row at index 8. * iter[9] == false; // i.e. Invalid row at index 9. * @endcode * * @tparam Iter Iterator type * @param index_start Iterator to start of indices for which the validity iterator * must return `false` (i.e. null) * @param index_end Iterator to end of indices for the validity iterator * @return auto Validity iterator */ template <typename Iter> [[maybe_unused]] static auto nulls_at(Iter index_start, Iter index_end) { using index_type = typename std::iterator_traits<Iter>::value_type; return cudf::detail::make_counting_transform_iterator( 0, [indices = std::vector<index_type>{index_start, index_end}](auto i) { return std::find(indices.cbegin(), indices.cend(), i) == indices.cend(); }); } /** * @brief Bool iterator for marking (possibly multiple) null elements in a column_wrapper. * * The returned iterator yields `false` (to mark `null`) at all the specified indices, * and yields `true` (to mark valid rows) for all other indices. E.g. * * @code * auto iter = nulls_at({8,9}); * iter[6] == true; // i.e. Valid row at index 6. * iter[7] == true; // i.e. Valid row at index 7. * iter[8] == false; // i.e. Invalid row at index 8. * iter[9] == false; // i.e. Invalid row at index 9. * @endcode * * @param indices The indices for which the validity iterator must return `false` (i.e. null) * @return auto Validity iterator */ [[maybe_unused]] static auto nulls_at(std::vector<cudf::size_type> const& indices) { return nulls_at(indices.cbegin(), indices.cend()); } /** * @brief Bool iterator for marking a single null element in a column_wrapper * * The returned iterator yields `false` (to mark `null`) at the specified index, * and yields `true` (to mark valid rows) for all other indices. E.g. * * @code * auto iter = null_at(8); * iter[7] == true; // i.e. Valid row at index 7. * iter[8] == false; // i.e. Invalid row at index 8. * @endcode * * @param index The index for which the validity iterator must return `false` (i.e. null) * @return auto Validity iterator */ [[maybe_unused]] static auto null_at(cudf::size_type index) { return nulls_at(std::vector<cudf::size_type>{index}); } /** * @brief Bool iterator for marking all elements are null * * @return auto Validity iterator which always yields `false` */ [[maybe_unused]] static auto all_nulls() { return thrust::make_constant_iterator(false); } /** * @brief Bool iterator for marking all elements are valid (non-null) * * @return auto Validity iterator which always yields `true` */ [[maybe_unused]] static auto no_nulls() { return thrust::make_constant_iterator(true); } /** * @brief Bool iterator for marking null elements from pointers of data * * The returned iterator yields `false` (to mark `null`) at the indices corresponding to the * pointers having `nullptr` values and `true` for the remaining indices. * * @note The input vector is referenced by the transform iterator, so the * lifespan must be just as long as the iterator. * * @tparam T the data type * @param ptrs The data pointers for which the validity iterator is computed * @return auto Validity iterator */ template <class T> [[maybe_unused]] static auto nulls_from_nullptrs(std::vector<T const*> const& ptrs) { return thrust::make_transform_iterator(ptrs.begin(), [](auto ptr) { return ptr != nullptr; }); } } // namespace iterators } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/cxxopts.hpp
/* Copyright (c) 2014, 2015, 2016, 2017 Jarryd Beck Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef CXXOPTS_HPP_INCLUDED #define CXXOPTS_HPP_INCLUDED #ifndef DOXYGEN_SHOULD_SKIP_THIS #include <cctype> #include <cstring> #include <exception> #include <iostream> #include <limits> #include <map> #include <memory> #include <regex> #include <sstream> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #ifndef CXXOPTS_VECTOR_DELIMITER #define CXXOPTS_VECTOR_DELIMITER ',' #endif #define CXXOPTS__VERSION_MAJOR 2 #define CXXOPTS__VERSION_MINOR 2 #define CXXOPTS__VERSION_PATCH 0 namespace cxxopts { static constexpr struct { uint8_t major, minor, patch; } version = {CXXOPTS__VERSION_MAJOR, CXXOPTS__VERSION_MINOR, CXXOPTS__VERSION_PATCH}; } // namespace cxxopts // when we ask cxxopts to use Unicode, help strings are processed using ICU, // which results in the correct lengths being computed for strings when they // are formatted for the help output // it is necessary to make sure that <unicode/unistr.h> can be found by the // compiler, and that icu-uc is linked in to the binary. #ifdef CXXOPTS_USE_UNICODE #include <unicode/unistr.h> namespace cxxopts { typedef icu::UnicodeString String; inline String toLocalString(std::string s) { return icu::UnicodeString::fromUTF8(std::move(s)); } class UnicodeStringIterator : public std::iterator<std::forward_iterator_tag, int32_t> { public: UnicodeStringIterator(icu::UnicodeString const* string, int32_t pos) : s(string), i(pos) {} value_type operator*() const { return s->char32At(i); } bool operator==(UnicodeStringIterator const& rhs) const { return s == rhs.s && i == rhs.i; } bool operator!=(UnicodeStringIterator const& rhs) const { return !(*this == rhs); } UnicodeStringIterator& operator++() { ++i; return *this; } UnicodeStringIterator operator+(int32_t v) { return UnicodeStringIterator(s, i + v); } private: icu::UnicodeString const* s; int32_t i; }; inline String& stringAppend(String& s, String a) { return s.append(std::move(a)); } inline String& stringAppend(String& s, int n, UChar32 c) { for (int i = 0; i != n; ++i) { s.append(c); } return s; } template <typename Iterator> String& stringAppend(String& s, Iterator begin, Iterator end) { while (begin != end) { s.append(*begin); ++begin; } return s; } inline size_t stringLength(String const& s) { return s.length(); } inline std::string toUTF8String(String const& s) { std::string result; s.toUTF8String(result); return result; } inline bool empty(String const& s) { return s.isEmpty(); } } // namespace cxxopts namespace std { inline cxxopts::UnicodeStringIterator begin(icu::UnicodeString const& s) { return cxxopts::UnicodeStringIterator(&s, 0); } inline cxxopts::UnicodeStringIterator end(icu::UnicodeString const& s) { return cxxopts::UnicodeStringIterator(&s, s.length()); } } // namespace std // ifdef CXXOPTS_USE_UNICODE #else namespace cxxopts { typedef std::string String; template <typename T> T toLocalString(T&& t) { return std::forward<T>(t); } inline size_t stringLength(String const& s) { return s.length(); } inline String& stringAppend(String& s, String a) { return s.append(std::move(a)); } inline String& stringAppend(String& s, size_t n, char c) { return s.append(n, c); } template <typename Iterator> String& stringAppend(String& s, Iterator begin, Iterator end) { return s.append(begin, end); } template <typename T> std::string toUTF8String(T&& t) { return std::forward<T>(t); } inline bool empty(std::string const& s) { return s.empty(); } } // namespace cxxopts // ifdef CXXOPTS_USE_UNICODE #endif namespace cxxopts { namespace { #ifdef _WIN32 const std::string LQUOTE("\'"); const std::string RQUOTE("\'"); #else const std::string LQUOTE("‘"); const std::string RQUOTE("’"); #endif } // namespace class Value : public std::enable_shared_from_this<Value> { public: virtual ~Value() = default; virtual std::shared_ptr<Value> clone() const = 0; virtual void parse(std::string const& text) const = 0; virtual void parse() const = 0; virtual bool has_default() const = 0; virtual bool is_container() const = 0; virtual bool has_implicit() const = 0; virtual std::string get_default_value() const = 0; virtual std::string get_implicit_value() const = 0; virtual std::shared_ptr<Value> default_value(std::string const& value) = 0; virtual std::shared_ptr<Value> implicit_value(std::string const& value) = 0; virtual std::shared_ptr<Value> no_implicit_value() = 0; virtual bool is_boolean() const = 0; }; class OptionException : public std::exception { public: OptionException(std::string const& message) : m_message(message) {} virtual char const* what() const noexcept { return m_message.c_str(); } private: std::string m_message; }; class OptionSpecException : public OptionException { public: OptionSpecException(std::string const& message) : OptionException(message) {} }; class OptionParseException : public OptionException { public: OptionParseException(std::string const& message) : OptionException(message) {} }; class option_exists_error : public OptionSpecException { public: option_exists_error(std::string const& option) : OptionSpecException("Option " + LQUOTE + option + RQUOTE + " already exists") { } }; class invalid_option_format_error : public OptionSpecException { public: invalid_option_format_error(std::string const& format) : OptionSpecException("Invalid option format " + LQUOTE + format + RQUOTE) { } }; class option_syntax_exception : public OptionParseException { public: option_syntax_exception(std::string const& text) : OptionParseException("Argument " + LQUOTE + text + RQUOTE + " starts with a - but has incorrect syntax") { } }; class option_not_exists_exception : public OptionParseException { public: option_not_exists_exception(std::string const& option) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " does not exist") { } }; class missing_argument_exception : public OptionParseException { public: missing_argument_exception(std::string const& option) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " is missing an argument") { } }; class option_requires_argument_exception : public OptionParseException { public: option_requires_argument_exception(std::string const& option) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " requires an argument") { } }; class option_not_has_argument_exception : public OptionParseException { public: option_not_has_argument_exception(std::string const& option, std::string const& arg) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " does not take an argument, but argument " + LQUOTE + arg + RQUOTE + " given") { } }; class option_not_present_exception : public OptionParseException { public: option_not_present_exception(std::string const& option) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " not present") { } }; class argument_incorrect_type : public OptionParseException { public: argument_incorrect_type(std::string const& arg) : OptionParseException("Argument " + LQUOTE + arg + RQUOTE + " failed to parse") { } }; class option_required_exception : public OptionParseException { public: option_required_exception(std::string const& option) : OptionParseException("Option " + LQUOTE + option + RQUOTE + " is required but not present") { } }; template <typename T> void throw_or_mimic(std::string const& text) { static_assert(std::is_base_of<std::exception, T>::value, "throw_or_mimic only works on std::exception and " "deriving classes"); #ifndef CXXOPTS_NO_EXCEPTIONS // If CXXOPTS_NO_EXCEPTIONS is not defined, just throw throw T{text}; #else // Otherwise manually instantiate the exception, print what() to stderr, // and abort T exception{text}; std::cerr << exception.what() << std::endl; std::cerr << "Aborting (exceptions disabled)..." << std::endl; std::abort(); #endif } namespace values { namespace { std::basic_regex<char> integer_pattern("(-)?(0x)?([0-9a-zA-Z]+)|((0x)?0)"); std::basic_regex<char> truthy_pattern("(t|T)(rue)?|1"); std::basic_regex<char> falsy_pattern("(f|F)(alse)?|0"); } // namespace namespace detail { template <typename T, bool B> struct SignedCheck; template <typename T> struct SignedCheck<T, true> { template <typename U> void operator()(bool negative, U u, std::string const& text) { if (negative) { if (u > static_cast<U>((std::numeric_limits<T>::min)())) { throw_or_mimic<argument_incorrect_type>(text); } } else { if (u > static_cast<U>((std::numeric_limits<T>::max)())) { throw_or_mimic<argument_incorrect_type>(text); } } } }; template <typename T> struct SignedCheck<T, false> { template <typename U> void operator()(bool, U, std::string const&) { } }; template <typename T, typename U> void check_signed_range(bool negative, U value, std::string const& text) { SignedCheck<T, std::numeric_limits<T>::is_signed>()(negative, value, text); } } // namespace detail template <typename R, typename T> R checked_negate(T&& t, std::string const&, std::true_type) { // if we got to here, then `t` is a positive number that fits into // `R`. So to avoid MSVC C4146, we first cast it to `R`. // See https://github.com/jarro2783/cxxopts/issues/62 for more details. return static_cast<R>(-static_cast<R>(t - 1) - 1); } template <typename R, typename T> T checked_negate(T&& t, std::string const& text, std::false_type) { throw_or_mimic<argument_incorrect_type>(text); return t; } template <typename T> void integer_parser(std::string const& text, T& value) { std::smatch match; std::regex_match(text, match, integer_pattern); if (match.length() == 0) { throw_or_mimic<argument_incorrect_type>(text); } if (match.length(4) > 0) { value = 0; return; } using US = typename std::make_unsigned<T>::type; constexpr bool is_signed = std::numeric_limits<T>::is_signed; bool const negative = match.length(1) > 0; const uint8_t base = match.length(2) > 0 ? 16 : 10; auto value_match = match[3]; US result = 0; for (auto iter = value_match.first; iter != value_match.second; ++iter) { US digit = 0; if (*iter >= '0' && *iter <= '9') { digit = static_cast<US>(*iter - '0'); } else if (base == 16 && *iter >= 'a' && *iter <= 'f') { digit = static_cast<US>(*iter - 'a' + 10); } else if (base == 16 && *iter >= 'A' && *iter <= 'F') { digit = static_cast<US>(*iter - 'A' + 10); } else { throw_or_mimic<argument_incorrect_type>(text); } const US next = static_cast<US>(result * base + digit); if (result > next) { throw_or_mimic<argument_incorrect_type>(text); } result = next; } detail::check_signed_range<T>(negative, result, text); if (negative) { value = checked_negate<T>(result, text, std::integral_constant<bool, is_signed>()); } else { value = static_cast<T>(result); } } template <typename T> void stringstream_parser(std::string const& text, T& value) { std::stringstream in(text); in >> value; if (!in) { throw_or_mimic<argument_incorrect_type>(text); } } inline void parse_value(std::string const& text, uint8_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, int8_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, uint16_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, int16_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, uint32_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, int32_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, uint64_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, int64_t& value) { integer_parser(text, value); } inline void parse_value(std::string const& text, bool& value) { std::smatch result; std::regex_match(text, result, truthy_pattern); if (!result.empty()) { value = true; return; } std::regex_match(text, result, falsy_pattern); if (!result.empty()) { value = false; return; } throw_or_mimic<argument_incorrect_type>(text); } inline void parse_value(std::string const& text, std::string& value) { value = text; } // The fallback parser. It uses the stringstream parser to parse all types // that have not been overloaded explicitly. It has to be placed in the // source code before all other more specialized templates. template <typename T> void parse_value(std::string const& text, T& value) { stringstream_parser(text, value); } template <typename T> void parse_value(std::string const& text, std::vector<T>& value) { std::stringstream in(text); std::string token; while (in.eof() == false && std::getline(in, token, CXXOPTS_VECTOR_DELIMITER)) { T v; parse_value(token, v); value.emplace_back(std::move(v)); } } inline void parse_value(std::string const& text, char& c) { if (text.length() != 1) { throw_or_mimic<argument_incorrect_type>(text); } c = text[0]; } template <typename T> struct type_is_container { static constexpr bool value = false; }; template <typename T> struct type_is_container<std::vector<T>> { static constexpr bool value = true; }; template <typename T> class abstract_value : public Value { using Self = abstract_value<T>; public: abstract_value() : m_result(std::make_shared<T>()), m_store(m_result.get()) {} abstract_value(T* t) : m_store(t) {} virtual ~abstract_value() = default; abstract_value(abstract_value const& rhs) { if (rhs.m_result) { m_result = std::make_shared<T>(); m_store = m_result.get(); } else { m_store = rhs.m_store; } m_default = rhs.m_default; m_implicit = rhs.m_implicit; m_default_value = rhs.m_default_value; m_implicit_value = rhs.m_implicit_value; } void parse(std::string const& text) const { parse_value(text, *m_store); } bool is_container() const { return type_is_container<T>::value; } void parse() const { parse_value(m_default_value, *m_store); } bool has_default() const { return m_default; } bool has_implicit() const { return m_implicit; } std::shared_ptr<Value> default_value(std::string const& value) { m_default = true; m_default_value = value; return shared_from_this(); } std::shared_ptr<Value> implicit_value(std::string const& value) { m_implicit = true; m_implicit_value = value; return shared_from_this(); } std::shared_ptr<Value> no_implicit_value() { m_implicit = false; return shared_from_this(); } std::string get_default_value() const { return m_default_value; } std::string get_implicit_value() const { return m_implicit_value; } bool is_boolean() const { return std::is_same_v<T, bool>; } T const& get() const { if (m_store == nullptr) { return *m_result; } else { return *m_store; } } protected: std::shared_ptr<T> m_result; T* m_store; bool m_default = false; bool m_implicit = false; std::string m_default_value; std::string m_implicit_value; }; template <typename T> class standard_value : public abstract_value<T> { public: using abstract_value<T>::abstract_value; std::shared_ptr<Value> clone() const { return std::make_shared<standard_value<T>>(*this); } }; template <> class standard_value<bool> : public abstract_value<bool> { public: ~standard_value() = default; standard_value() { set_default_and_implicit(); } standard_value(bool* b) : abstract_value(b) { set_default_and_implicit(); } std::shared_ptr<Value> clone() const { return std::make_shared<standard_value<bool>>(*this); } private: void set_default_and_implicit() { m_default = true; m_default_value = "false"; m_implicit = true; m_implicit_value = "true"; } }; } // namespace values template <typename T> std::shared_ptr<Value> value() { return std::make_shared<values::standard_value<T>>(); } template <typename T> std::shared_ptr<Value> value(T& t) { return std::make_shared<values::standard_value<T>>(&t); } class OptionAdder; class OptionDetails { public: OptionDetails(std::string const& short_, std::string const& long_, String const& desc, std::shared_ptr<Value const> val) : m_short(short_), m_long(long_), m_desc(desc), m_value(val), m_count(0) { } OptionDetails(OptionDetails const& rhs) : m_desc(rhs.m_desc), m_count(rhs.m_count) { m_value = rhs.m_value->clone(); } OptionDetails(OptionDetails&& rhs) = default; String const& description() const { return m_desc; } Value const& value() const { return *m_value; } std::shared_ptr<Value> make_storage() const { return m_value->clone(); } std::string const& short_name() const { return m_short; } std::string const& long_name() const { return m_long; } private: std::string m_short; std::string m_long; String m_desc; std::shared_ptr<Value const> m_value; int m_count; }; struct HelpOptionDetails { std::string s; std::string l; String desc; bool has_default; std::string default_value; bool has_implicit; std::string implicit_value; std::string arg_help; bool is_container; bool is_boolean; }; struct HelpGroupDetails { std::string name; std::string description; std::vector<HelpOptionDetails> options; }; class OptionValue { public: void parse(std::shared_ptr<OptionDetails const> details, std::string const& text) { ensure_value(details); ++m_count; m_value->parse(text); } void parse_default(std::shared_ptr<OptionDetails const> details) { ensure_value(details); m_default = true; m_value->parse(); } size_t count() const noexcept { return m_count; } // TODO: maybe default options should count towards the number of arguments bool has_default() const noexcept { return m_default; } template <typename T> T const& as() const { if (m_value == nullptr) { throw_or_mimic<std::domain_error>("No value"); } #ifdef CXXOPTS_NO_RTTI return static_cast<values::standard_value<T> const&>(*m_value).get(); #else return dynamic_cast<values::standard_value<T> const&>(*m_value).get(); #endif } private: void ensure_value(std::shared_ptr<OptionDetails const> details) { if (m_value == nullptr) { m_value = details->make_storage(); } } std::shared_ptr<Value> m_value; size_t m_count = 0; bool m_default = false; }; class KeyValue { public: KeyValue(std::string key_, std::string value_) : m_key(std::move(key_)), m_value(std::move(value_)) { } std::string const& key() const { return m_key; } std::string const& value() const { return m_value; } template <typename T> T as() const { T result; values::parse_value(m_value, result); return result; } private: std::string m_key; std::string m_value; }; class ParseResult { public: ParseResult( const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>>, std::vector<std::string>, bool allow_unrecognised, int&, char**&); size_t count(std::string const& o) const { auto iter = m_options->find(o); if (iter == m_options->end()) { return 0; } auto riter = m_results.find(iter->second); return riter->second.count(); } OptionValue const& operator[](std::string const& option) const { auto iter = m_options->find(option); if (iter == m_options->end()) { throw_or_mimic<option_not_present_exception>(option); } auto riter = m_results.find(iter->second); return riter->second; } std::vector<KeyValue> const& arguments() const { return m_sequential; } private: void parse(int& argc, char**& argv); void add_to_option(std::string const& option, std::string const& arg); bool consume_positional(std::string a); void parse_option(std::shared_ptr<OptionDetails> value, std::string const& name, std::string const& arg = ""); void parse_default(std::shared_ptr<OptionDetails> details); void checked_parse_arg(int argc, char* argv[], int& current, std::shared_ptr<OptionDetails> value, std::string const& name); const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>> m_options; std::vector<std::string> m_positional; std::vector<std::string>::iterator m_next_positional; std::unordered_set<std::string> m_positional_set; std::unordered_map<std::shared_ptr<OptionDetails>, OptionValue> m_results; bool m_allow_unrecognised; std::vector<KeyValue> m_sequential; }; struct Option { Option(std::string const& opts, std::string const& desc, std::shared_ptr<Value const> const& value = ::cxxopts::value<bool>(), std::string const& arg_help = "") : opts_(opts), desc_(desc), value_(value), arg_help_(arg_help) { } std::string opts_; std::string desc_; std::shared_ptr<Value const> value_; std::string arg_help_; }; class Options { typedef std::unordered_map<std::string, std::shared_ptr<OptionDetails>> OptionMap; public: Options(std::string program, std::string help_string = "") : m_program(std::move(program)), m_help_string(toLocalString(std::move(help_string))), m_custom_help("[OPTION...]"), m_positional_help("positional parameters"), m_show_positional(false), m_allow_unrecognised(false), m_options(std::make_shared<OptionMap>()), m_next_positional(m_positional.end()) { } Options& positional_help(std::string help_text) { m_positional_help = std::move(help_text); return *this; } Options& custom_help(std::string help_text) { m_custom_help = std::move(help_text); return *this; } Options& show_positional_help() { m_show_positional = true; return *this; } Options& allow_unrecognised_options() { m_allow_unrecognised = true; return *this; } ParseResult parse(int& argc, char**& argv); OptionAdder add_options(std::string group = ""); void add_options(std::string const& group, std::initializer_list<Option> options); void add_option(std::string const& group, Option const& option); void add_option(std::string const& group, std::string const& s, std::string const& l, std::string desc, std::shared_ptr<Value const> value, std::string arg_help); // parse positional arguments into the given option void parse_positional(std::string option); void parse_positional(std::vector<std::string> options); void parse_positional(std::initializer_list<std::string> options); template <typename Iterator> void parse_positional(Iterator begin, Iterator end) { parse_positional(std::vector<std::string>{begin, end}); } std::string help(std::vector<std::string> const& groups = {}) const; const std::vector<std::string> groups() const; HelpGroupDetails const& group_help(std::string const& group) const; private: void add_one_option(std::string const& option, std::shared_ptr<OptionDetails> details); String help_one_group(std::string const& group) const; void generate_group_help(String& result, std::vector<std::string> const& groups) const; void generate_all_groups_help(String& result) const; std::string m_program; String m_help_string; std::string m_custom_help; std::string m_positional_help; bool m_show_positional; bool m_allow_unrecognised; std::shared_ptr<OptionMap> m_options; std::vector<std::string> m_positional; std::vector<std::string>::iterator m_next_positional; std::unordered_set<std::string> m_positional_set; // mapping from groups to help options std::map<std::string, HelpGroupDetails> m_help; }; class OptionAdder { public: OptionAdder(Options& options, std::string group) : m_options(options), m_group(std::move(group)) { } OptionAdder& operator()(std::string const& opts, std::string const& desc, std::shared_ptr<Value const> value = ::cxxopts::value<bool>(), std::string arg_help = ""); private: Options& m_options; std::string m_group; }; namespace { constexpr int OPTION_LONGEST = 30; constexpr int OPTION_DESC_GAP = 2; std::basic_regex<char> option_matcher("--([[:alnum:]][-_[:alnum:]]+)(=(.*))?|-([[:alnum:]]+)"); std::basic_regex<char> option_specifier("(([[:alnum:]]),)?[ ]*([[:alnum:]][-_[:alnum:]]*)?"); String format_option(HelpOptionDetails const& o) { auto& s = o.s; auto& l = o.l; String result = " "; if (s.size() > 0) { result += "-" + toLocalString(s) + ","; } else { result += " "; } if (l.size() > 0) { result += " --" + toLocalString(l); } auto arg = o.arg_help.size() > 0 ? toLocalString(o.arg_help) : "arg"; if (!o.is_boolean) { if (o.has_implicit) { result += " [=" + arg + "(=" + toLocalString(o.implicit_value) + ")]"; } else { result += " " + arg; } } return result; } String format_description(HelpOptionDetails const& o, size_t start, size_t width) { auto desc = o.desc; if (o.has_default && (!o.is_boolean || o.default_value != "false")) { if (o.default_value != "") { desc += toLocalString(" (default: " + o.default_value + ")"); } else { desc += toLocalString(" (default: \"\")"); } } String result; auto current = std::begin(desc); auto startLine = current; auto lastSpace = current; auto size = size_t{}; while (current != std::end(desc)) { if (*current == ' ') { lastSpace = current; } if (*current == '\n') { startLine = current + 1; lastSpace = startLine; } else if (size > width) { if (lastSpace == startLine) { stringAppend(result, startLine, current + 1); stringAppend(result, "\n"); stringAppend(result, start, ' '); startLine = current + 1; lastSpace = startLine; } else { stringAppend(result, startLine, lastSpace); stringAppend(result, "\n"); stringAppend(result, start, ' '); startLine = lastSpace + 1; lastSpace = startLine; } size = 0; } else { ++size; } ++current; } // append whatever is left stringAppend(result, startLine, current); return result; } } // namespace inline ParseResult::ParseResult( const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>> options, std::vector<std::string> positional, bool allow_unrecognised, int& argc, char**& argv) : m_options(options), m_positional(std::move(positional)), m_next_positional(m_positional.begin()), m_allow_unrecognised(allow_unrecognised) { parse(argc, argv); } inline void Options::add_options(std::string const& group, std::initializer_list<Option> options) { OptionAdder option_adder(*this, group); for (auto const& option : options) { option_adder(option.opts_, option.desc_, option.value_, option.arg_help_); } } inline OptionAdder Options::add_options(std::string group) { return OptionAdder(*this, std::move(group)); } inline OptionAdder& OptionAdder::operator()(std::string const& opts, std::string const& desc, std::shared_ptr<Value const> value, std::string arg_help) { std::match_results<char const*> result; std::regex_match(opts.c_str(), result, option_specifier); if (result.empty()) { throw_or_mimic<invalid_option_format_error>(opts); } auto const& short_match = result[2]; auto const& long_match = result[3]; if (!short_match.length() && !long_match.length()) { throw_or_mimic<invalid_option_format_error>(opts); } else if (long_match.length() == 1 && short_match.length()) { throw_or_mimic<invalid_option_format_error>(opts); } auto option_names = [](std::sub_match<char const*> const& short_, std::sub_match<char const*> const& long_) { if (long_.length() == 1) { return std::make_tuple(long_.str(), short_.str()); } else { return std::make_tuple(short_.str(), long_.str()); } }(short_match, long_match); m_options.add_option(m_group, std::get<0>(option_names), std::get<1>(option_names), desc, value, std::move(arg_help)); return *this; } inline void ParseResult::parse_default(std::shared_ptr<OptionDetails> details) { m_results[details].parse_default(details); } inline void ParseResult::parse_option(std::shared_ptr<OptionDetails> value, std::string const& /*name*/, std::string const& arg) { auto& result = m_results[value]; result.parse(value, arg); m_sequential.emplace_back(value->long_name(), arg); } inline void ParseResult::checked_parse_arg(int argc, char* argv[], int& current, std::shared_ptr<OptionDetails> value, std::string const& name) { if (current + 1 >= argc) { if (value->value().has_implicit()) { parse_option(value, name, value->value().get_implicit_value()); } else { throw_or_mimic<missing_argument_exception>(name); } } else { if (value->value().has_implicit()) { parse_option(value, name, value->value().get_implicit_value()); } else { parse_option(value, name, argv[current + 1]); ++current; } } } inline void ParseResult::add_to_option(std::string const& option, std::string const& arg) { auto iter = m_options->find(option); if (iter == m_options->end()) { throw_or_mimic<option_not_exists_exception>(option); } parse_option(iter->second, option, arg); } inline bool ParseResult::consume_positional(std::string a) { while (m_next_positional != m_positional.end()) { auto iter = m_options->find(*m_next_positional); if (iter != m_options->end()) { auto& result = m_results[iter->second]; if (!iter->second->value().is_container()) { if (result.count() == 0) { add_to_option(*m_next_positional, a); ++m_next_positional; return true; } else { ++m_next_positional; continue; } } else { add_to_option(*m_next_positional, a); return true; } } else { throw_or_mimic<option_not_exists_exception>(*m_next_positional); } } return false; } inline void Options::parse_positional(std::string option) { parse_positional(std::vector<std::string>{std::move(option)}); } inline void Options::parse_positional(std::vector<std::string> options) { m_positional = std::move(options); m_next_positional = m_positional.begin(); m_positional_set.insert(m_positional.begin(), m_positional.end()); } inline void Options::parse_positional(std::initializer_list<std::string> options) { parse_positional(std::vector<std::string>(std::move(options))); } inline ParseResult Options::parse(int& argc, char**& argv) { ParseResult result(m_options, m_positional, m_allow_unrecognised, argc, argv); return result; } inline void ParseResult::parse(int& argc, char**& argv) { int current = 1; int nextKeep = 1; bool consume_remaining = false; while (current != argc) { if (strcmp(argv[current], "--") == 0) { consume_remaining = true; ++current; break; } std::match_results<char const*> result; std::regex_match(argv[current], result, option_matcher); if (result.empty()) { // not a flag // but if it starts with a `-`, then it's an error if (argv[current][0] == '-' && argv[current][1] != '\0') { if (!m_allow_unrecognised) { throw_or_mimic<option_syntax_exception>(argv[current]); } } // if true is returned here then it was consumed, otherwise it is // ignored if (consume_positional(argv[current])) { } else { argv[nextKeep] = argv[current]; ++nextKeep; } // if we return from here then it was parsed successfully, so continue } else { // short or long option? if (result[4].length() != 0) { std::string const& s = result[4]; for (std::size_t i = 0; i != s.size(); ++i) { std::string name(1, s[i]); auto iter = m_options->find(name); if (iter == m_options->end()) { if (m_allow_unrecognised) { continue; } else { // error throw_or_mimic<option_not_exists_exception>(name); } } auto value = iter->second; if (i + 1 == s.size()) { // it must be the last argument checked_parse_arg(argc, argv, current, value, name); } else if (value->value().has_implicit()) { parse_option(value, name, value->value().get_implicit_value()); } else { // error throw_or_mimic<option_requires_argument_exception>(name); } } } else if (result[1].length() != 0) { std::string const& name = result[1]; auto iter = m_options->find(name); if (iter == m_options->end()) { if (m_allow_unrecognised) { // keep unrecognised options in argument list, skip to next argument argv[nextKeep] = argv[current]; ++nextKeep; ++current; continue; } else { // error throw_or_mimic<option_not_exists_exception>(name); } } auto opt = iter->second; // equals provided for long option? if (result[2].length() != 0) { // parse the option given parse_option(opt, name, result[3]); } else { // parse the next argument checked_parse_arg(argc, argv, current, opt, name); } } } ++current; } for (auto& opt : *m_options) { auto& detail = opt.second; auto& value = detail->value(); auto& store = m_results[detail]; if (value.has_default() && !store.count() && !store.has_default()) { parse_default(detail); } } if (consume_remaining) { while (current < argc) { if (!consume_positional(argv[current])) { break; } ++current; } // adjust argv for any that couldn't be swallowed while (current != argc) { argv[nextKeep] = argv[current]; ++nextKeep; ++current; } } argc = nextKeep; } inline void Options::add_option(std::string const& group, Option const& option) { add_options(group, {option}); } inline void Options::add_option(std::string const& group, std::string const& s, std::string const& l, std::string desc, std::shared_ptr<Value const> value, std::string arg_help) { auto stringDesc = toLocalString(std::move(desc)); auto option = std::make_shared<OptionDetails>(s, l, stringDesc, value); if (s.size() > 0) { add_one_option(s, option); } if (l.size() > 0) { add_one_option(l, option); } // add the help details auto& options = m_help[group]; options.options.emplace_back(HelpOptionDetails{s, l, stringDesc, value->has_default(), value->get_default_value(), value->has_implicit(), value->get_implicit_value(), std::move(arg_help), value->is_container(), value->is_boolean()}); } inline void Options::add_one_option(std::string const& option, std::shared_ptr<OptionDetails> details) { auto in = m_options->emplace(option, details); if (!in.second) { throw_or_mimic<option_exists_error>(option); } } inline String Options::help_one_group(std::string const& g) const { typedef std::vector<std::pair<String, String>> OptionHelp; auto group = m_help.find(g); if (group == m_help.end()) { return ""; } OptionHelp format; size_t longest = 0; String result; if (!g.empty()) { result += toLocalString(" " + g + " options:\n"); } for (auto const& o : group->second.options) { if (m_positional_set.find(o.l) != m_positional_set.end() && !m_show_positional) { continue; } auto s = format_option(o); longest = (std::max)(longest, stringLength(s)); format.push_back(std::make_pair(s, String())); } longest = (std::min)(longest, static_cast<size_t>(OPTION_LONGEST)); // widest allowed description auto allowed = size_t{76} - longest - OPTION_DESC_GAP; auto fiter = format.begin(); for (auto const& o : group->second.options) { if (m_positional_set.find(o.l) != m_positional_set.end() && !m_show_positional) { continue; } auto d = format_description(o, longest + OPTION_DESC_GAP, allowed); result += fiter->first; if (stringLength(fiter->first) > longest) { result += '\n'; result += toLocalString(std::string(longest + OPTION_DESC_GAP, ' ')); } else { result += toLocalString(std::string(longest + OPTION_DESC_GAP - stringLength(fiter->first), ' ')); } result += d; result += '\n'; ++fiter; } return result; } inline void Options::generate_group_help(String& result, std::vector<std::string> const& print_groups) const { for (size_t i = 0; i != print_groups.size(); ++i) { String const& group_help_text = help_one_group(print_groups[i]); if (empty(group_help_text)) { continue; } result += group_help_text; if (i < print_groups.size() - 1) { result += '\n'; } } } inline void Options::generate_all_groups_help(String& result) const { std::vector<std::string> all_groups; all_groups.reserve(m_help.size()); for (auto& group : m_help) { all_groups.push_back(group.first); } generate_group_help(result, all_groups); } inline std::string Options::help(std::vector<std::string> const& help_groups) const { String result = m_help_string + "\nUsage:\n " + toLocalString(m_program) + " " + toLocalString(m_custom_help); if (m_positional.size() > 0 && m_positional_help.size() > 0) { result += " " + toLocalString(m_positional_help); } result += "\n\n"; if (help_groups.empty()) { generate_all_groups_help(result); } else { generate_group_help(result, help_groups); } return toUTF8String(result); } inline const std::vector<std::string> Options::groups() const { std::vector<std::string> g; std::transform( m_help.begin(), m_help.end(), std::back_inserter(g), [](const std::map<std::string, HelpGroupDetails>::value_type& pair) { return pair.first; }); return g; } inline HelpGroupDetails const& Options::group_help(std::string const& group) const { return m_help.at(group); } } // namespace cxxopts #endif // DOXYGEN_SHOULD_SKIP_THIS #endif // CXXOPTS_HPP_INCLUDED
0
rapidsai_public_repos/cudf/cpp/include
rapidsai_public_repos/cudf/cpp/include/cudf_test/timestamp_utilities.cuh
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cudf/detail/iterator.cuh> #include <cudf/wrappers/timestamps.hpp> #include <cudf_test/column_wrapper.hpp> #include <thrust/logical.h> #include <thrust/sequence.h> namespace cudf { namespace test { using time_point_ms = cuda::std::chrono::time_point<cuda::std::chrono::system_clock, cuda::std::chrono::milliseconds>; /** * @brief Creates a `fixed_width_column_wrapper` with ascending timestamps in the * range `[start, stop)`. * * The period is inferred from `count` and difference between `start` * and `stop`. * * @tparam Rep The arithmetic type representing the number of ticks * @tparam Period A cuda::std::ratio representing the tick period (i.e. the *number of seconds per tick) * @param count The number of timestamps to create * @param start The first timestamp as a cuda::std::chrono::time_point * @param stop The last timestamp as a cuda::std::chrono::time_point */ template <typename T, bool nullable = false> inline cudf::test::fixed_width_column_wrapper<T, int64_t> generate_timestamps(int32_t count, time_point_ms start, time_point_ms stop) { using Rep = typename T::rep; using Period = typename T::period; using ToDuration = cuda::std::chrono::duration<Rep, Period>; auto lhs = start.time_since_epoch().count(); auto rhs = stop.time_since_epoch().count(); auto const min = std::min(lhs, rhs); auto const max = std::max(lhs, rhs); auto const range = max - min; auto iter = cudf::detail::make_counting_transform_iterator(0, [=](auto i) { return cuda::std::chrono::floor<ToDuration>( cuda::std::chrono::milliseconds(min + (range / count) * i)) .count(); }); if (nullable) { auto mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; }); return cudf::test::fixed_width_column_wrapper<T, int64_t>(iter, iter + count, mask); } else { // This needs to be in an else to quash `statement_not_reachable` warnings return cudf::test::fixed_width_column_wrapper<T, int64_t>(iter, iter + count); } } } // namespace test } // namespace cudf
0
rapidsai_public_repos/cudf/cpp
rapidsai_public_repos/cudf/cpp/tests/CMakeLists.txt
# ============================================================================= # Copyright (c) 2018-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # ============================================================================= # ################################################################################################## # enable testing ################################################################################ # ################################################################################################## enable_testing() include(rapids-test) rapids_test_init() # This function takes in a test name and test source and handles setting all of the associated # properties and linking to build the test function(ConfigureTest CMAKE_TEST_NAME) set(options) set(one_value GPUS PERCENT STREAM_MODE) set(multi_value) cmake_parse_arguments(_CUDF_TEST "${options}" "${one_value}" "${multi_value}" ${ARGN}) if(NOT DEFINED _CUDF_TEST_GPUS AND NOT DEFINED _CUDF_TEST_PERCENT) set(_CUDF_TEST_GPUS 1) set(_CUDF_TEST_PERCENT 15) endif() if(NOT DEFINED _CUDF_TEST_GPUS) set(_CUDF_TEST_GPUS 1) endif() if(NOT DEFINED _CUDF_TEST_PERCENT) set(_CUDF_TEST_PERCENT 100) endif() if(NOT DEFINED _CUDF_TEST_STREAM_MODE) set(_CUDF_TEST_STREAM_MODE cudf) endif() add_executable(${CMAKE_TEST_NAME} ${_CUDF_TEST_UNPARSED_ARGUMENTS}) set_target_properties( ${CMAKE_TEST_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${CUDF_BINARY_DIR}/gtests>" INSTALL_RPATH "\$ORIGIN/../../../lib" CXX_STANDARD 17 CXX_STANDARD_REQUIRED ON # For std:: support of __int128_t. Can be removed once using cuda::std CXX_EXTENSIONS ON CUDA_STANDARD 17 CUDA_STANDARD_REQUIRED ON ) target_link_libraries( ${CMAKE_TEST_NAME} PRIVATE cudftestutil GTest::gmock_main GTest::gtest_main $<TARGET_NAME_IF_EXISTS:conda_env> ) rapids_test_add( NAME ${CMAKE_TEST_NAME} COMMAND ${CMAKE_TEST_NAME} GPUS ${_CUDF_TEST_GPUS} PERCENT ${_CUDF_TEST_PERCENT} INSTALL_COMPONENT_SET testing ) set_tests_properties( ${CMAKE_TEST_NAME} PROPERTIES ENVIRONMENT "GTEST_CUDF_STREAM_MODE=new_${_CUDF_TEST_STREAM_MODE}_default;LD_PRELOAD=$<TARGET_FILE:cudf_identify_stream_usage_mode_${_CUDF_TEST_STREAM_MODE}>" ) endfunction() # ################################################################################################## # test sources ################################################################################## # ################################################################################################## # ################################################################################################## # * column tests ---------------------------------------------------------------------------------- ConfigureTest( COLUMN_TEST column/bit_cast_test.cpp column/column_device_view_test.cu column/column_test.cpp column/column_view_device_span_test.cpp column/column_view_shallow_test.cpp column/compound_test.cu ) # ################################################################################################## # * scalar tests ---------------------------------------------------------------------------------- ConfigureTest(SCALAR_TEST scalar/scalar_test.cpp scalar/scalar_device_view_test.cu) # ################################################################################################## # * timestamps tests ------------------------------------------------------------------------------ ConfigureTest(TIMESTAMPS_TEST wrappers/timestamps_test.cu) # ################################################################################################## # * cudf tests ------------------------------------------------------------------------------------ ConfigureTest(ERROR_TEST error/error_handling_test.cu) # ################################################################################################## # * groupby tests --------------------------------------------------------------------------------- ConfigureTest( GROUPBY_TEST groupby/argmin_tests.cpp groupby/argmax_tests.cpp groupby/collect_list_tests.cpp groupby/collect_set_tests.cpp groupby/correlation_tests.cpp groupby/count_scan_tests.cpp groupby/count_tests.cpp groupby/covariance_tests.cpp groupby/groupby_test_util.cpp groupby/groups_tests.cpp groupby/histogram_tests.cpp groupby/keys_tests.cpp groupby/lists_tests.cpp groupby/m2_tests.cpp groupby/min_tests.cpp groupby/max_scan_tests.cpp groupby/max_tests.cpp groupby/mean_tests.cpp groupby/median_tests.cpp groupby/merge_m2_tests.cpp groupby/merge_lists_tests.cpp groupby/merge_sets_tests.cpp groupby/min_scan_tests.cpp groupby/nth_element_tests.cpp groupby/nunique_tests.cpp groupby/product_tests.cpp groupby/quantile_tests.cpp groupby/rank_scan_tests.cpp groupby/replace_nulls_tests.cpp groupby/shift_tests.cpp groupby/std_tests.cpp groupby/structs_tests.cpp groupby/sum_of_squares_tests.cpp groupby/sum_scan_tests.cpp groupby/sum_tests.cpp groupby/tdigest_tests.cu groupby/var_tests.cpp GPUS 1 PERCENT 100 ) # ################################################################################################## # * join tests ------------------------------------------------------------------------------------ ConfigureTest( JOIN_TEST join/join_tests.cpp join/conditional_join_tests.cu join/cross_join_tests.cpp join/semi_anti_join_tests.cpp join/mixed_join_tests.cu ) # ################################################################################################## # * is_sorted tests ------------------------------------------------------------------------------- ConfigureTest(IS_SORTED_TEST sort/is_sorted_tests.cpp) # ################################################################################################## # * datetime tests -------------------------------------------------------------------------------- ConfigureTest(DATETIME_OPS_TEST datetime/datetime_ops_test.cpp) # ################################################################################################## # * hashing tests --------------------------------------------------------------------------------- ConfigureTest( HASHING_TEST hashing/md5_test.cpp hashing/murmurhash3_x86_32_test.cpp hashing/murmurhash3_x64_128_test.cpp hashing/spark_murmurhash3_x86_32_test.cpp hashing/xxhash_64_test.cpp ) # ################################################################################################## # * partitioning tests ---------------------------------------------------------------------------- ConfigureTest( PARTITIONING_TEST partitioning/hash_partition_test.cpp partitioning/round_robin_test.cpp partitioning/partition_test.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * hash_map tests -------------------------------------------------------------------------------- ConfigureTest(HASH_MAP_TEST hash_map/map_test.cu) # ################################################################################################## # * quantiles tests ------------------------------------------------------------------------------- ConfigureTest( QUANTILES_TEST quantiles/percentile_approx_test.cpp quantiles/quantile_test.cpp quantiles/quantiles_test.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * reduction tests ------------------------------------------------------------------------------- ConfigureTest( REDUCTIONS_TEST reductions/collect_ops_tests.cpp reductions/rank_tests.cpp reductions/reduction_tests.cpp reductions/scan_tests.cpp reductions/segmented_reduction_tests.cpp reductions/list_rank_test.cpp reductions/tdigest_tests.cu GPUS 1 PERCENT 70 ) # ################################################################################################## # * replace tests --------------------------------------------------------------------------------- ConfigureTest(REPLACE_TEST replace/replace_tests.cpp) ConfigureTest(REPLACE_NULLS_TEST replace/replace_nulls_tests.cpp) ConfigureTest(REPLACE_NANS_TEST replace/replace_nans_tests.cpp) ConfigureTest(NORMALIZE_REPLACE_TEST replace/normalize_replace_tests.cpp) ConfigureTest(CLAMP_TEST replace/clamp_test.cpp) # ################################################################################################## # * fixed_point tests ----------------------------------------------------------------------------- ConfigureTest(FIXED_POINT_TEST fixed_point/fixed_point_tests.cpp fixed_point/fixed_point_tests.cu) # ################################################################################################## # * unary tests ----------------------------------------------------------------------------------- ConfigureTest(UNARY_TEST unary/math_ops_test.cpp unary/unary_ops_test.cpp unary/cast_tests.cpp) # ################################################################################################## # * round tests ----------------------------------------------------------------------------------- ConfigureTest(ROUND_TEST round/round_tests.cpp) # ################################################################################################## # * binary tests ---------------------------------------------------------------------------------- ConfigureTest( BINARYOP_TEST binaryop/binop-verify-input-test.cpp binaryop/binop-null-test.cpp binaryop/binop-compiled-test.cpp binaryop/binop-compiled-fixed_point-test.cpp binaryop/binop-generic-ptx-test.cpp ) # ################################################################################################## # * unary transform tests ------------------------------------------------------------------------- ConfigureTest( TRANSFORM_TEST transform/integration/unary_transform_test.cpp transform/nans_to_null_test.cpp transform/mask_to_bools_test.cpp transform/bools_to_mask_test.cpp transform/row_bit_count_test.cu transform/one_hot_encode_tests.cpp ) # ################################################################################################## # * interop tests ------------------------------------------------------------------------- ConfigureTest( INTEROP_TEST interop/to_arrow_test.cpp interop/from_arrow_test.cpp interop/dlpack_test.cpp ) # ################################################################################################## # * io tests -------------------------------------------------------------------------------------- ConfigureTest(DECOMPRESSION_TEST io/comp/decomp_test.cpp) ConfigureTest(ROW_SELECTION_TEST io/row_selection_test.cpp) ConfigureTest( CSV_TEST io/csv_test.cpp GPUS 1 PERCENT 30 ) ConfigureTest( FILE_IO_TEST io/file_io_test.cpp GPUS 1 PERCENT 30 ) ConfigureTest( ORC_TEST io/orc_test.cpp GPUS 1 PERCENT 30 ) ConfigureTest( PARQUET_TEST io/parquet_test.cpp io/parquet_chunked_reader_test.cpp GPUS 1 PERCENT 30 ) ConfigureTest( JSON_TEST io/json_test.cpp io/json_chunked_reader.cpp GPUS 1 PERCENT 30 ) ConfigureTest(JSON_WRITER_TEST io/json_writer.cpp) ConfigureTest(JSON_TYPE_CAST_TEST io/json_type_cast_test.cu) ConfigureTest(NESTED_JSON_TEST io/nested_json_test.cpp io/json_tree.cpp) ConfigureTest(ARROW_IO_SOURCE_TEST io/arrow_io_source_test.cpp) ConfigureTest(MULTIBYTE_SPLIT_TEST io/text/multibyte_split_test.cpp) ConfigureTest( DATA_CHUNK_SOURCE_TEST io/text/data_chunk_source_test.cpp GPUS 1 PERCENT 30 ) target_link_libraries(DATA_CHUNK_SOURCE_TEST PRIVATE ZLIB::ZLIB) ConfigureTest(LOGICAL_STACK_TEST io/fst/logical_stack_test.cu) ConfigureTest(FST_TEST io/fst/fst_test.cu) ConfigureTest(TYPE_INFERENCE_TEST io/type_inference_test.cu) if(CUDF_ENABLE_ARROW_S3) target_compile_definitions(ARROW_IO_SOURCE_TEST PRIVATE "S3_ENABLED") endif() # ################################################################################################## # * sort tests ------------------------------------------------------------------------------------ ConfigureTest( SORT_TEST sort/segmented_sort_tests.cpp sort/sort_nested_types_tests.cpp sort/sort_test.cpp sort/stable_sort_tests.cpp sort/rank_test.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * copying tests --------------------------------------------------------------------------------- ConfigureTest( COPYING_TEST copying/concatenate_tests.cpp copying/copy_if_else_nested_tests.cpp copying/copy_range_tests.cpp copying/copy_tests.cpp copying/detail_gather_tests.cu copying/gather_list_tests.cpp copying/gather_str_tests.cpp copying/gather_struct_tests.cpp copying/gather_tests.cpp copying/get_value_tests.cpp copying/pack_tests.cpp copying/purge_nonempty_nulls_tests.cpp copying/sample_tests.cpp copying/scatter_tests.cpp copying/scatter_list_tests.cpp copying/scatter_list_scalar_tests.cpp copying/scatter_struct_tests.cpp copying/scatter_struct_scalar_tests.cpp copying/segmented_gather_list_tests.cpp copying/shift_tests.cpp copying/slice_tests.cpp copying/split_tests.cpp copying/utility_tests.cpp copying/reverse_tests.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * utilities tests ------------------------------------------------------------------------------- ConfigureTest( UTILITIES_TEST utilities_tests/type_list_tests.cpp utilities_tests/column_debug_tests.cpp utilities_tests/column_utilities_tests.cpp utilities_tests/column_wrapper_tests.cpp utilities_tests/lists_column_wrapper_tests.cpp utilities_tests/logger_tests.cpp utilities_tests/default_stream_tests.cpp utilities_tests/type_check_tests.cpp ) # ################################################################################################## # * span tests ------------------------------------------------------------------------------- # This test must be split into two executables so that one can use the preload library and one does # not. The one that doesn't includes a thrust::device_vector copy, which is always synchronous on # the default stream and is out of libcudf's control (but must be tested). set(_allowlist_filter SpanTest.CanConstructFromDeviceContainers) ConfigureTest(SPAN_TEST utilities_tests/span_tests.cu) ConfigureTest(SPAN_TEST_DEVICE_VECTOR utilities_tests/span_tests.cu) # Overwrite the environments set by ConfigureTest set_tests_properties( SPAN_TEST PROPERTIES ENVIRONMENT "GTEST_FILTER=-${_allowlist_filter};GTEST_CUDF_STREAM_MODE=new_cudf_default;LD_PRELOAD=$<TARGET_FILE:cudf_identify_stream_usage_mode_cudf>" ) set_tests_properties( SPAN_TEST_DEVICE_VECTOR PROPERTIES ENVIRONMENT "GTEST_FILTER=${_allowlist_filter}" ) # ################################################################################################## # * iterator tests -------------------------------------------------------------------------------- ConfigureTest( ITERATOR_TEST iterator/indexalator_test.cu iterator/offsetalator_test.cu iterator/optional_iterator_test_chrono.cu iterator/optional_iterator_test_numeric.cu iterator/pair_iterator_test_chrono.cu iterator/pair_iterator_test_numeric.cu iterator/scalar_iterator_test.cu iterator/sizes_to_offsets_iterator_test.cu iterator/value_iterator.cpp iterator/value_iterator_test_chrono.cu iterator/value_iterator_test_numeric.cu iterator/value_iterator_test_strings.cu iterator/value_iterator_test_transform.cu ) # ################################################################################################## # * device atomics tests -------------------------------------------------------------------------- ConfigureTest(DEVICE_ATOMICS_TEST device_atomics/device_atomics_test.cu) # ################################################################################################## # * transpose tests ------------------------------------------------------------------------------- ConfigureTest( TRANSPOSE_TEST transpose/transpose_test.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * table tests ----------------------------------------------------------------------------------- ConfigureTest( TABLE_TEST table/table_tests.cpp table/table_view_tests.cu table/row_operators_tests.cpp table/experimental_row_operator_tests.cu table/row_operator_tests_utilities.cu ) # ################################################################################################## # * sorted-merge tests ---------------------------------------------------------------------------- ConfigureTest( MERGE_TEST merge/merge_test.cpp merge/merge_dictionary_test.cpp merge/merge_string_test.cpp ) # ################################################################################################## # * stream compaction tests ----------------------------------------------------------------------- ConfigureTest( STREAM_COMPACTION_TEST stream_compaction/apply_boolean_mask_tests.cpp stream_compaction/distinct_count_tests.cpp stream_compaction/distinct_tests.cpp stream_compaction/drop_nans_tests.cpp stream_compaction/drop_nulls_tests.cpp stream_compaction/stable_distinct_tests.cpp stream_compaction/unique_count_tests.cpp stream_compaction/unique_tests.cpp ) # ################################################################################################## # * rolling tests --------------------------------------------------------------------------------- ConfigureTest( ROLLING_TEST rolling/collect_ops_test.cpp rolling/empty_input_test.cpp rolling/grouped_rolling_range_test.cpp rolling/grouped_rolling_test.cpp rolling/lead_lag_test.cpp rolling/nth_element_test.cpp rolling/offset_row_window_test.cpp rolling/range_comparator_test.cu rolling/range_rolling_window_test.cpp rolling/range_window_bounds_test.cpp rolling/rolling_test.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * filling test ---------------------------------------------------------------------------------- ConfigureTest( FILLING_TEST filling/fill_tests.cpp filling/repeat_tests.cpp filling/sequence_tests.cpp ) # ################################################################################################## # * search test ----------------------------------------------------------------------------------- ConfigureTest( SEARCH_TEST search/search_dictionary_test.cpp search/search_list_test.cpp search/search_struct_test.cpp search/search_test.cpp ) # ################################################################################################## # * reshape test ---------------------------------------------------------------------------------- ConfigureTest( RESHAPE_TEST reshape/byte_cast_tests.cpp reshape/interleave_columns_tests.cpp reshape/tile_tests.cpp ) # ################################################################################################## # * traits test ----------------------------------------------------------------------------------- ConfigureTest(TRAITS_TEST types/traits_test.cpp) # ################################################################################################## # * factories test -------------------------------------------------------------------------------- ConfigureTest(FACTORIES_TEST scalar/factories_test.cpp column/factories_test.cpp) # ################################################################################################## # * dispatcher test ------------------------------------------------------------------------------- ConfigureTest(DISPATCHER_TEST types/type_dispatcher_test.cu) # ################################################################################################## # * strings test ---------------------------------------------------------------------------------- ConfigureTest( STRINGS_TEST strings/array_tests.cpp strings/attrs_tests.cpp strings/booleans_tests.cpp strings/case_tests.cpp strings/chars_types_tests.cpp strings/combine/concatenate_tests.cpp strings/combine/join_list_elements_tests.cpp strings/combine/join_strings_tests.cpp strings/concatenate_tests.cpp strings/contains_tests.cpp strings/datetime_tests.cpp strings/durations_tests.cpp strings/extract_tests.cpp strings/factories_test.cu strings/fill_tests.cpp strings/findall_tests.cpp strings/find_tests.cpp strings/find_multiple_tests.cpp strings/fixed_point_tests.cpp strings/floats_tests.cpp strings/format_lists_tests.cpp strings/integers_tests.cpp strings/ipv4_tests.cpp strings/like_tests.cpp strings/pad_tests.cpp strings/repeat_strings_tests.cpp strings/replace_regex_tests.cpp strings/replace_tests.cpp strings/reverse_tests.cpp strings/slice_tests.cpp strings/split_tests.cpp strings/strip_tests.cpp strings/translate_tests.cpp strings/urls_tests.cpp ) # ################################################################################################## # * json path test -------------------------------------------------------------------------------- ConfigureTest(JSON_PATH_TEST json/json_tests.cpp) # ################################################################################################## # * structs test ---------------------------------------------------------------------------------- ConfigureTest(STRUCTS_TEST structs/structs_column_tests.cpp structs/utilities_tests.cpp) # ################################################################################################## # * nvtext test ----------------------------------------------------------------------------------- ConfigureTest( TEXT_TEST text/bpe_tests.cpp text/edit_distance_tests.cpp text/jaccard_tests.cpp text/minhash_tests.cpp text/ngrams_tests.cpp text/ngrams_tokenize_tests.cpp text/normalize_tests.cpp text/replace_tests.cpp text/stemmer_tests.cpp text/subword_tests.cpp text/tokenize_tests.cpp ) # ################################################################################################## # * bitmask tests --------------------------------------------------------------------------------- ConfigureTest( BITMASK_TEST bitmask/valid_if_tests.cu bitmask/set_nullmask_tests.cu bitmask/bitmask_tests.cpp bitmask/is_element_valid_tests.cpp ) # ################################################################################################## # * dictionary tests ------------------------------------------------------------------------------ ConfigureTest( DICTIONARY_TEST dictionary/add_keys_test.cpp dictionary/decode_test.cpp dictionary/encode_test.cpp dictionary/factories_test.cpp dictionary/fill_test.cpp dictionary/gather_test.cpp dictionary/remove_keys_test.cpp dictionary/scatter_test.cpp dictionary/search_test.cpp dictionary/set_keys_test.cpp dictionary/slice_test.cpp ) # ################################################################################################## # * encode tests ----------------------------------------------------------------------------------- ConfigureTest(ENCODE_TEST encode/encode_tests.cpp) # ################################################################################################## # * ast tests ------------------------------------------------------------------------------------- ConfigureTest(AST_TEST ast/transform_tests.cpp) # ################################################################################################## # * lists tests ---------------------------------------------------------------------------------- ConfigureTest( LISTS_TEST lists/combine/concatenate_list_elements_tests.cpp lists/combine/concatenate_rows_tests.cpp lists/contains_tests.cpp lists/count_elements_tests.cpp lists/explode_tests.cpp lists/extract_tests.cpp lists/reverse_tests.cpp lists/sequences_tests.cpp lists/set_operations/difference_distinct_tests.cpp lists/set_operations/have_overlap_tests.cpp lists/set_operations/intersect_distinct_tests.cpp lists/set_operations/union_distinct_tests.cpp lists/sort_lists_tests.cpp lists/stream_compaction/apply_boolean_mask_tests.cpp lists/stream_compaction/distinct_tests.cpp GPUS 1 PERCENT 70 ) # ################################################################################################## # * bin tests ---------------------------------------------------------------------------------- ConfigureTest(LABEL_BINS_TEST labeling/label_bins_tests.cpp) # ################################################################################################## # * jit tests ---------------------------------------------------------------------------------- ConfigureTest(JIT_PARSER_TEST jit/parse_ptx_function.cpp) target_include_directories(JIT_PARSER_TEST PRIVATE "$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/src>") # ################################################################################################## # * stream testing --------------------------------------------------------------------------------- ConfigureTest( STREAM_IDENTIFICATION_TEST identify_stream_usage/test_default_stream_identification.cu ) ConfigureTest(STREAM_BINARYOP_TEST streams/binaryop_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CONCATENATE_TEST streams/concatenate_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_COPYING_TEST streams/copying_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_CSVIO_TEST streams/io/csv_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_DICTIONARY_TEST streams/dictionary_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_FILLING_TEST streams/filling_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_GROUPBY_TEST streams/groupby_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_HASHING_TEST streams/hash_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_INTEROP_TEST streams/interop_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_JSONIO_TEST streams/io/json_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_LISTS_TEST streams/lists_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_NULL_MASK_TEST streams/null_mask_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_REPLACE_TEST streams/replace_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SEARCH_TEST streams/search_test.cpp STREAM_MODE testing) ConfigureTest(STREAM_SORTING_TEST streams/sorting_test.cpp STREAM_MODE testing) ConfigureTest( STREAM_STRINGS_TEST streams/strings/case_test.cpp streams/strings/combine_test.cpp streams/strings/contains_test.cpp streams/strings/convert_test.cpp streams/strings/extract_test.cpp streams/strings/filter_test.cpp streams/strings/find_test.cpp streams/strings/replace_test.cpp streams/strings/reverse_test.cpp streams/strings/split_test.cpp streams/strings/strings_tests.cpp STREAM_MODE testing ) ConfigureTest( STREAM_TEXT_TEST streams/text/edit_distance_test.cpp streams/text/ngrams_test.cpp streams/text/replace_test.cpp streams/text/stemmer_test.cpp streams/text/tokenize_test.cpp STREAM_MODE testing ) ConfigureTest(STREAM_UNARY_TEST streams/unary_test.cpp STREAM_MODE testing) # ################################################################################################## # Install tests #################################################################################### # ################################################################################################## rapids_test_install_relocatable(INSTALL_COMPONENT_SET testing DESTINATION bin/gtests/libcudf)
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/error/error_handling_test.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/default_stream.hpp> #include <cudf_test/stream_checking_resource_adaptor.hpp> #include <cudf/filling.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream.hpp> TEST(ExpectsTest, FalseCondition) { EXPECT_THROW(CUDF_EXPECTS(false, "condition is false"), cudf::logic_error); } TEST(ExpectsTest, TrueCondition) { EXPECT_NO_THROW(CUDF_EXPECTS(true, "condition is true")); } TEST(CudaTryTest, Error) { EXPECT_THROW(CUDF_CUDA_TRY(cudaErrorLaunchFailure), cudf::cuda_error); } TEST(CudaTryTest, Success) { EXPECT_NO_THROW(CUDF_CUDA_TRY(cudaSuccess)); } TEST(StreamCheck, success) { EXPECT_NO_THROW(CUDF_CHECK_CUDA(0)); } namespace { // Some silly kernel that will cause an error void __global__ test_kernel(int* data) { data[threadIdx.x] = threadIdx.x; } } // namespace // In a release build and without explicit synchronization, CUDF_CHECK_CUDA may // or may not fail on erroneous asynchronous CUDA calls. Invoke // cudaStreamSynchronize to guarantee failure on error. In a non-release build, // CUDF_CHECK_CUDA deterministically fails on erroneous asynchronous CUDA // calls. TEST(StreamCheck, FailedKernel) { rmm::cuda_stream stream; int a; test_kernel<<<0, 0, 0, stream.value()>>>(&a); #ifdef NDEBUG stream.synchronize(); #endif EXPECT_THROW(CUDF_CHECK_CUDA(stream.value()), cudf::cuda_error); } TEST(StreamCheck, CatchFailedKernel) { rmm::cuda_stream stream; int a; test_kernel<<<0, 0, 0, stream.value()>>>(&a); #ifndef NDEBUG stream.synchronize(); #endif EXPECT_THROW(CUDF_CHECK_CUDA(stream.value()), cudf::cuda_error); } __global__ void kernel() { asm("trap;"); } TEST(DeathTest, CudaFatalError) { testing::FLAGS_gtest_death_test_style = "threadsafe"; auto call_kernel = []() { kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(); try { CUDF_CUDA_TRY(cudaDeviceSynchronize()); } catch (const cudf::fatal_cuda_error& fe) { std::abort(); } }; ASSERT_DEATH(call_kernel(), ""); } #ifndef NDEBUG __global__ void assert_false_kernel() { cudf_assert(false && "this kernel should die"); } __global__ void assert_true_kernel() { cudf_assert(true && "this kernel should live"); } TEST(DebugAssertDeathTest, cudf_assert_false) { testing::FLAGS_gtest_death_test_style = "threadsafe"; auto call_kernel = []() { assert_false_kernel<<<1, 1>>>(); // Kernel should fail with `cudaErrorAssert` // This error invalidates the current device context, so we need to kill // the current process. Running with EXPECT_DEATH spawns a new process for // each attempted kernel launch if (cudaErrorAssert == cudaDeviceSynchronize()) { std::abort(); } // If we reach this point, the cudf_assert didn't work so we exit normally, which will cause // EXPECT_DEATH to fail. }; EXPECT_DEATH(call_kernel(), "this kernel should die"); } TEST(DebugAssert, cudf_assert_true) { assert_true_kernel<<<1, 1>>>(); ASSERT_EQ(cudaSuccess, cudaDeviceSynchronize()); } #endif // These tests don't use CUDF_TEST_PROGRAM_MAIN because : // 1.) They don't need the RMM Pool // 2.) The RMM Pool interferes with the death test int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); auto const cmd_opts = parse_cudf_test_opts(argc, argv); auto const stream_mode = cmd_opts["stream_mode"].as<std::string>(); if ((stream_mode == "new_cudf_default") || (stream_mode == "new_testing_default")) { auto resource = rmm::mr::get_current_device_resource(); auto const stream_error_mode = cmd_opts["stream_error_mode"].as<std::string>(); auto const error_on_invalid_stream = (stream_error_mode == "error"); auto const check_default_stream = (stream_mode == "new_cudf_default"); auto adaptor = make_stream_checking_resource_adaptor( resource, error_on_invalid_stream, check_default_stream); rmm::mr::set_current_device_resource(&adaptor); } return RUN_ALL_TESTS(); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/bitmask/valid_if_tests.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/iterator.cuh> #include <cudf/detail/valid_if.cuh> #include <cudf/types.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <thrust/iterator/counting_iterator.h> struct ValidIfTest : public cudf::test::BaseFixture {}; struct odds_valid { __host__ __device__ bool operator()(cudf::size_type i) { return i % 2; } }; struct all_valid { __host__ __device__ bool operator()(cudf::size_type i) { return true; } }; struct all_null { __host__ __device__ bool operator()(cudf::size_type i) { return false; } }; TEST_F(ValidIfTest, EmptyRange) { auto actual = cudf::detail::valid_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(0), odds_valid{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto const& buffer = actual.first; EXPECT_EQ(0u, buffer.size()); EXPECT_EQ(nullptr, buffer.data()); EXPECT_EQ(0, actual.second); } TEST_F(ValidIfTest, InvalidRange) { EXPECT_THROW(cudf::detail::valid_if(thrust::make_counting_iterator(1), thrust::make_counting_iterator(0), odds_valid{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()), cudf::logic_error); } TEST_F(ValidIfTest, OddsValid) { auto iter = cudf::detail::make_counting_transform_iterator(0, odds_valid{}); auto expected = cudf::test::detail::make_null_mask(iter, iter + 10000); auto actual = cudf::detail::valid_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10000), odds_valid{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(expected.first.data(), actual.first.data(), expected.first.size()); EXPECT_EQ(5000, actual.second); EXPECT_EQ(expected.second, actual.second); } TEST_F(ValidIfTest, AllValid) { auto iter = cudf::detail::make_counting_transform_iterator(0, all_valid{}); auto expected = cudf::test::detail::make_null_mask(iter, iter + 10000); auto actual = cudf::detail::valid_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10000), all_valid{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(expected.first.data(), actual.first.data(), expected.first.size()); EXPECT_EQ(0, actual.second); EXPECT_EQ(expected.second, actual.second); } TEST_F(ValidIfTest, AllNull) { auto iter = cudf::detail::make_counting_transform_iterator(0, all_null{}); auto expected = cudf::test::detail::make_null_mask(iter, iter + 10000); auto actual = cudf::detail::valid_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(10000), all_null{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(expected.first.data(), actual.first.data(), expected.first.size()); EXPECT_EQ(10000, actual.second); EXPECT_EQ(expected.second, actual.second); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/bitmask/is_element_valid_tests.cpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf/copying.hpp> #include <cudf/detail/is_element_valid.hpp> #include <cudf/detail/iterator.cuh> #include <thrust/iterator/counting_iterator.h> struct IsElementValidTest : public cudf::test::BaseFixture {}; TEST_F(IsElementValidTest, IsElementValidBasic) { cudf::test::fixed_width_column_wrapper<int32_t> col({1, 1, 1, 1, 1}, {1, 0, 0, 0, 1}); EXPECT_TRUE(cudf::detail::is_element_valid_sync(col, 0, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(col, 1, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(col, 2, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(col, 3, cudf::get_default_stream())); EXPECT_TRUE(cudf::detail::is_element_valid_sync(col, 4, cudf::get_default_stream())); } TEST_F(IsElementValidTest, IsElementValidLarge) { auto filter = [](auto i) { return static_cast<bool>(i % 3); }; auto val = thrust::make_counting_iterator(0); auto valid = cudf::detail::make_counting_transform_iterator(0, filter); cudf::size_type num_rows = 1000; cudf::test::fixed_width_column_wrapper<int32_t> col(val, val + num_rows, valid); for (int i = 0; i < num_rows; i++) { EXPECT_EQ(cudf::detail::is_element_valid_sync(col, i, cudf::get_default_stream()), filter(i)); } } TEST_F(IsElementValidTest, IsElementValidOffset) { cudf::test::fixed_width_column_wrapper<int32_t> col({1, 1, 1, 1, 1}, {1, 0, 0, 0, 1}); { auto offset_col = cudf::slice(col, {1, 5}).front(); EXPECT_FALSE(cudf::detail::is_element_valid_sync(offset_col, 0, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(offset_col, 1, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(offset_col, 2, cudf::get_default_stream())); EXPECT_TRUE(cudf::detail::is_element_valid_sync(offset_col, 3, cudf::get_default_stream())); } { auto offset_col = cudf::slice(col, {2, 5}).front(); EXPECT_FALSE(cudf::detail::is_element_valid_sync(offset_col, 0, cudf::get_default_stream())); EXPECT_FALSE(cudf::detail::is_element_valid_sync(offset_col, 1, cudf::get_default_stream())); EXPECT_TRUE(cudf::detail::is_element_valid_sync(offset_col, 2, cudf::get_default_stream())); } } TEST_F(IsElementValidTest, IsElementValidOffsetLarge) { auto filter = [](auto i) { return static_cast<bool>(i % 3); }; cudf::size_type offset = 37; auto val = thrust::make_counting_iterator(0); auto valid = cudf::detail::make_counting_transform_iterator(0, filter); cudf::size_type num_rows = 1000; cudf::test::fixed_width_column_wrapper<int32_t> col(val, val + num_rows, valid); auto offset_col = cudf::slice(col, {offset, num_rows}).front(); for (int i = 0; i < offset_col.size(); i++) { EXPECT_EQ(cudf::detail::is_element_valid_sync(offset_col, i, cudf::get_default_stream()), filter(i + offset)); } }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/bitmask/bitmask_tests.cpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/concatenate.hpp> #include <cudf/copying.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/null_mask.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> struct BitmaskUtilitiesTest : public cudf::test::BaseFixture {}; TEST_F(BitmaskUtilitiesTest, StateNullCount) { EXPECT_EQ(0, cudf::state_null_count(cudf::mask_state::UNALLOCATED, 42)); EXPECT_EQ(42, cudf::state_null_count(cudf::mask_state::ALL_NULL, 42)); EXPECT_EQ(0, cudf::state_null_count(cudf::mask_state::ALL_VALID, 42)); EXPECT_THROW(cudf::state_null_count(cudf::mask_state::UNINITIALIZED, 42), std::invalid_argument); } TEST_F(BitmaskUtilitiesTest, BitmaskAllocationSize) { EXPECT_EQ(0u, cudf::bitmask_allocation_size_bytes(0)); EXPECT_EQ(64u, cudf::bitmask_allocation_size_bytes(1)); EXPECT_EQ(64u, cudf::bitmask_allocation_size_bytes(512)); EXPECT_EQ(128u, cudf::bitmask_allocation_size_bytes(513)); EXPECT_EQ(128u, cudf::bitmask_allocation_size_bytes(1023)); EXPECT_EQ(128u, cudf::bitmask_allocation_size_bytes(1024)); EXPECT_EQ(192u, cudf::bitmask_allocation_size_bytes(1025)); } TEST_F(BitmaskUtilitiesTest, NumBitmaskWords) { EXPECT_EQ(0, cudf::num_bitmask_words(0)); EXPECT_EQ(1, cudf::num_bitmask_words(1)); EXPECT_EQ(1, cudf::num_bitmask_words(31)); EXPECT_EQ(1, cudf::num_bitmask_words(32)); EXPECT_EQ(2, cudf::num_bitmask_words(33)); EXPECT_EQ(2, cudf::num_bitmask_words(63)); EXPECT_EQ(2, cudf::num_bitmask_words(64)); EXPECT_EQ(3, cudf::num_bitmask_words(65)); } struct CountBitmaskTest : public cudf::test::BaseFixture {}; TEST_F(CountBitmaskTest, NullMask) { EXPECT_THROW(cudf::detail::count_set_bits(nullptr, 0, 32, cudf::get_default_stream()), cudf::logic_error); EXPECT_EQ(32, cudf::detail::valid_count(nullptr, 0, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 32, 7, 25}; EXPECT_THROW(cudf::detail::segmented_count_set_bits(nullptr, indices, cudf::get_default_stream()), cudf::logic_error); auto valid_counts = cudf::detail::segmented_valid_count(nullptr, indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{32, 18})); } // Utility to construct a mask vector. If fill_valid is false (default), it is initialized to all // null. Otherwise it is initialized to all valid. rmm::device_uvector<cudf::bitmask_type> make_mask(cudf::size_type size, bool fill_valid = false) { if (!fill_valid) { return cudf::detail::make_zeroed_device_uvector_sync<cudf::bitmask_type>( size, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); } else { auto ret = rmm::device_uvector<cudf::bitmask_type>(size, cudf::get_default_stream()); CUDF_CUDA_TRY(cudaMemsetAsync(ret.data(), ~cudf::bitmask_type{0}, size * sizeof(cudf::bitmask_type), cudf::get_default_stream().value())); return ret; } } TEST_F(CountBitmaskTest, NegativeStart) { auto mask = make_mask(1); EXPECT_THROW(cudf::detail::count_set_bits(mask.data(), -1, 32, cudf::get_default_stream()), cudf::logic_error); EXPECT_THROW(cudf::detail::valid_count(mask.data(), -1, 32, cudf::get_default_stream()), cudf::logic_error); std::vector<cudf::size_type> indices = {0, 16, -1, 32}; EXPECT_THROW( cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()), cudf::logic_error); EXPECT_THROW( cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()), cudf::logic_error); } TEST_F(CountBitmaskTest, StartLargerThanStop) { auto mask = make_mask(1); EXPECT_THROW(cudf::detail::count_set_bits(mask.data(), 32, 31, cudf::get_default_stream()), cudf::logic_error); EXPECT_THROW(cudf::detail::valid_count(mask.data(), 32, 31, cudf::get_default_stream()), cudf::logic_error); std::vector<cudf::size_type> indices = {0, 16, 31, 30}; EXPECT_THROW( cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()), cudf::logic_error); EXPECT_THROW( cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()), cudf::logic_error); } TEST_F(CountBitmaskTest, EmptyRange) { auto mask = make_mask(1); EXPECT_EQ(0, cudf::detail::count_set_bits(mask.data(), 17, 17, cudf::get_default_stream())); EXPECT_EQ(0, cudf::detail::valid_count(mask.data(), 17, 17, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 0, 17, 17}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); } TEST_F(CountBitmaskTest, SingleWordAllZero) { auto mask = make_mask(1); EXPECT_EQ(0, cudf::detail::count_set_bits(mask.data(), 0, 32, cudf::get_default_stream())); EXPECT_EQ(0, cudf::detail::valid_count(mask.data(), 0, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 32, 0, 32}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); auto valid_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); } TEST_F(CountBitmaskTest, SingleBitAllZero) { auto mask = make_mask(1); EXPECT_EQ(0, cudf::detail::count_set_bits(mask.data(), 17, 18, cudf::get_default_stream())); EXPECT_EQ(0, cudf::detail::valid_count(mask.data(), 17, 18, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {17, 18, 7, 8}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); } TEST_F(CountBitmaskTest, SingleBitAllSet) { auto mask = make_mask(1, true); EXPECT_EQ(1, cudf::detail::count_set_bits(mask.data(), 13, 14, cudf::get_default_stream())); EXPECT_EQ(1, cudf::detail::valid_count(mask.data(), 13, 14, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {13, 14, 0, 1}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1})); } TEST_F(CountBitmaskTest, SingleWordAllBitsSet) { auto mask = make_mask(1, true); EXPECT_EQ(32, cudf::detail::count_set_bits(mask.data(), 0, 32, cudf::get_default_stream())); EXPECT_EQ(32, cudf::detail::valid_count(mask.data(), 0, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 32, 0, 32}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{32, 32})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{32, 32})); } TEST_F(CountBitmaskTest, SingleWordPreSlack) { auto mask = make_mask(1, true); EXPECT_EQ(25, cudf::detail::count_set_bits(mask.data(), 7, 32, cudf::get_default_stream())); EXPECT_EQ(25, cudf::detail::valid_count(mask.data(), 7, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {7, 32, 8, 32}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{25, 24})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{25, 24})); } TEST_F(CountBitmaskTest, SingleWordPostSlack) { auto mask = make_mask(1, true); EXPECT_EQ(17, cudf::detail::count_set_bits(mask.data(), 0, 17, cudf::get_default_stream())); EXPECT_EQ(17, cudf::detail::valid_count(mask.data(), 0, 17, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 17, 0, 18}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{17, 18})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{17, 18})); } TEST_F(CountBitmaskTest, SingleWordSubset) { auto mask = make_mask(1, true); EXPECT_EQ(30, cudf::detail::count_set_bits(mask.data(), 1, 31, cudf::get_default_stream())); EXPECT_EQ(30, cudf::detail::valid_count(mask.data(), 1, 31, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {1, 31, 7, 17}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{30, 10})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{30, 10})); } TEST_F(CountBitmaskTest, SingleWordSubset2) { auto mask = make_mask(1, true); EXPECT_EQ(28, cudf::detail::count_set_bits(mask.data(), 2, 30, cudf::get_default_stream())); EXPECT_EQ(28, cudf::detail::valid_count(mask.data(), 2, 30, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {4, 16, 2, 30}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{12, 28})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{12, 28})); } TEST_F(CountBitmaskTest, MultipleWordsAllBits) { auto mask = make_mask(10, true); EXPECT_EQ(320, cudf::detail::count_set_bits(mask.data(), 0, 320, cudf::get_default_stream())); EXPECT_EQ(320, cudf::detail::valid_count(mask.data(), 0, 320, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 320, 0, 320}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{320, 320})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{320, 320})); } TEST_F(CountBitmaskTest, MultipleWordsSubsetWordBoundary) { auto mask = make_mask(10, true); EXPECT_EQ(256, cudf::detail::count_set_bits(mask.data(), 32, 288, cudf::get_default_stream())); EXPECT_EQ(256, cudf::detail::valid_count(mask.data(), 32, 288, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {32, 192, 32, 288}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{160, 256})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{160, 256})); } TEST_F(CountBitmaskTest, MultipleWordsSplitWordBoundary) { auto mask = make_mask(10, true); EXPECT_EQ(2, cudf::detail::count_set_bits(mask.data(), 31, 33, cudf::get_default_stream())); EXPECT_EQ(2, cudf::detail::valid_count(mask.data(), 31, 33, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {31, 33, 60, 67}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{2, 7})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{2, 7})); } TEST_F(CountBitmaskTest, MultipleWordsSubset) { auto mask = make_mask(10, true); EXPECT_EQ(226, cudf::detail::count_set_bits(mask.data(), 67, 293, cudf::get_default_stream())); EXPECT_EQ(226, cudf::detail::valid_count(mask.data(), 67, 293, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {67, 293, 37, 319}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{226, 282})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{226, 282})); } TEST_F(CountBitmaskTest, MultipleWordsSingleBit) { auto mask = make_mask(10, true); EXPECT_EQ(1, cudf::detail::count_set_bits(mask.data(), 67, 68, cudf::get_default_stream())); EXPECT_EQ(1, cudf::detail::valid_count(mask.data(), 67, 68, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {67, 68, 31, 32, 192, 193}; auto set_counts = cudf::detail::segmented_count_set_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(set_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1, 1})); auto valid_counts = cudf::detail::segmented_valid_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(valid_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1, 1})); } using CountUnsetBitsTest = CountBitmaskTest; TEST_F(CountUnsetBitsTest, SingleBitAllSet) { auto mask = make_mask(1, true); EXPECT_EQ(0, cudf::detail::count_unset_bits(mask.data(), 13, 14, cudf::get_default_stream())); EXPECT_EQ(0, cudf::detail::null_count(mask.data(), 13, 14, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {13, 14, 31, 32}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); auto null_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); } TEST_F(CountUnsetBitsTest, NullMask) { EXPECT_THROW(cudf::detail::count_unset_bits(nullptr, 0, 32, cudf::get_default_stream()), cudf::logic_error); EXPECT_EQ(0, cudf::detail::null_count(nullptr, 0, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 32, 7, 25}; EXPECT_THROW( cudf::detail::segmented_count_unset_bits(nullptr, indices, cudf::get_default_stream()), cudf::logic_error); auto null_counts = cudf::detail::segmented_null_count(nullptr, indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{0, 0})); } TEST_F(CountUnsetBitsTest, SingleWordAllBits) { auto mask = make_mask(1); EXPECT_EQ(32, cudf::detail::count_unset_bits(mask.data(), 0, 32, cudf::get_default_stream())); EXPECT_EQ(32, cudf::detail::null_count(mask.data(), 0, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 32, 0, 32}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{32, 32})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{32, 32})); } TEST_F(CountUnsetBitsTest, SingleWordPreSlack) { auto mask = make_mask(1); EXPECT_EQ(25, cudf::detail::count_unset_bits(mask.data(), 7, 32, cudf::get_default_stream())); EXPECT_EQ(25, cudf::detail::null_count(mask.data(), 7, 32, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {7, 32, 8, 32}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{25, 24})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{25, 24})); } TEST_F(CountUnsetBitsTest, SingleWordPostSlack) { auto mask = make_mask(1); EXPECT_EQ(17, cudf::detail::count_unset_bits(mask.data(), 0, 17, cudf::get_default_stream())); EXPECT_EQ(17, cudf::detail::null_count(mask.data(), 0, 17, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 17, 0, 18}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{17, 18})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{17, 18})); } TEST_F(CountUnsetBitsTest, SingleWordSubset) { auto mask = make_mask(1); EXPECT_EQ(30, cudf::detail::count_unset_bits(mask.data(), 1, 31, cudf::get_default_stream())); EXPECT_EQ(30, cudf::detail::null_count(mask.data(), 1, 31, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {1, 31, 7, 17}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{30, 10})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{30, 10})); } TEST_F(CountUnsetBitsTest, SingleWordSubset2) { auto mask = make_mask(1); EXPECT_EQ(28, cudf::detail::count_unset_bits(mask.data(), 2, 30, cudf::get_default_stream())); EXPECT_EQ(28, cudf::detail::null_count(mask.data(), 2, 30, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {4, 16, 2, 30}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{12, 28})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{12, 28})); } TEST_F(CountUnsetBitsTest, MultipleWordsAllBits) { auto mask = make_mask(10); EXPECT_EQ(320, cudf::detail::count_unset_bits(mask.data(), 0, 320, cudf::get_default_stream())); EXPECT_EQ(320, cudf::detail::null_count(mask.data(), 0, 320, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {0, 320, 0, 320}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{320, 320})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{320, 320})); } TEST_F(CountUnsetBitsTest, MultipleWordsSubsetWordBoundary) { auto mask = make_mask(10); EXPECT_EQ(256, cudf::detail::count_unset_bits(mask.data(), 32, 288, cudf::get_default_stream())); EXPECT_EQ(256, cudf::detail::null_count(mask.data(), 32, 288, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {32, 192, 32, 288}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{160, 256})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{160, 256})); } TEST_F(CountUnsetBitsTest, MultipleWordsSplitWordBoundary) { auto mask = make_mask(10); EXPECT_EQ(2, cudf::detail::count_unset_bits(mask.data(), 31, 33, cudf::get_default_stream())); EXPECT_EQ(2, cudf::detail::null_count(mask.data(), 31, 33, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {31, 33, 60, 67}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{2, 7})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{2, 7})); } TEST_F(CountUnsetBitsTest, MultipleWordsSubset) { auto mask = make_mask(10); EXPECT_EQ(226, cudf::detail::count_unset_bits(mask.data(), 67, 293, cudf::get_default_stream())); EXPECT_EQ(226, cudf::detail::null_count(mask.data(), 67, 293, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {67, 293, 37, 319}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{226, 282})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{226, 282})); } TEST_F(CountUnsetBitsTest, MultipleWordsSingleBit) { auto mask = make_mask(10); EXPECT_EQ(1, cudf::detail::count_unset_bits(mask.data(), 67, 68, cudf::get_default_stream())); EXPECT_EQ(1, cudf::detail::null_count(mask.data(), 67, 68, cudf::get_default_stream())); std::vector<cudf::size_type> indices = {67, 68, 31, 32, 192, 193}; auto unset_counts = cudf::detail::segmented_count_unset_bits(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(unset_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1, 1})); auto null_counts = cudf::detail::segmented_null_count(mask.data(), indices, cudf::get_default_stream()); EXPECT_THAT(null_counts, ::testing::ElementsAreArray(std::vector<cudf::size_type>{1, 1, 1})); } struct CopyBitmaskTest : public cudf::test::BaseFixture, cudf::test::UniformRandomGenerator<int> { CopyBitmaskTest() : cudf::test::UniformRandomGenerator<int>{0, 1} {} }; void cleanEndWord(rmm::device_buffer& mask, int begin_bit, int end_bit) { auto ptr = static_cast<cudf::bitmask_type*>(mask.data()); auto number_of_mask_words = cudf::num_bitmask_words(static_cast<size_t>(end_bit - begin_bit)); auto number_of_bits = end_bit - begin_bit; if (number_of_bits % 32 != 0) { cudf::bitmask_type end_mask = 0; CUDF_CUDA_TRY( cudaMemcpy(&end_mask, ptr + number_of_mask_words - 1, sizeof(end_mask), cudaMemcpyDefault)); end_mask = end_mask & ((1 << (number_of_bits % 32)) - 1); CUDF_CUDA_TRY( cudaMemcpy(ptr + number_of_mask_words - 1, &end_mask, sizeof(end_mask), cudaMemcpyDefault)); } } TEST_F(CopyBitmaskTest, NegativeStart) { auto mask = make_mask(1); EXPECT_THROW(cudf::copy_bitmask(mask.data(), -1, 32), cudf::logic_error); } TEST_F(CopyBitmaskTest, StartLargerThanStop) { auto mask = make_mask(1); EXPECT_THROW(cudf::copy_bitmask(mask.data(), 32, 31), cudf::logic_error); } TEST_F(CopyBitmaskTest, EmptyRange) { auto mask = make_mask(1); auto buff = cudf::copy_bitmask(mask.data(), 17, 17); EXPECT_EQ(0, static_cast<int>(buff.size())); } TEST_F(CopyBitmaskTest, NullPtr) { auto buff = cudf::copy_bitmask(nullptr, 17, 17); EXPECT_EQ(0, static_cast<int>(buff.size())); } TEST_F(CopyBitmaskTest, TestZeroOffset) { std::vector<int> validity_bit(1000); for (auto& m : validity_bit) { m = this->generate(); } auto input_mask = std::get<0>(cudf::test::detail::make_null_mask(validity_bit.begin(), validity_bit.end())); int begin_bit = 0; int end_bit = 800; auto gold_splice_mask = std::get<0>(cudf::test::detail::make_null_mask( validity_bit.begin() + begin_bit, validity_bit.begin() + end_bit)); auto splice_mask = cudf::copy_bitmask( static_cast<cudf::bitmask_type const*>(input_mask.data()), begin_bit, end_bit); cleanEndWord(splice_mask, begin_bit, end_bit); auto number_of_bits = end_bit - begin_bit; CUDF_TEST_EXPECT_EQUAL_BUFFERS( gold_splice_mask.data(), splice_mask.data(), cudf::num_bitmask_words(number_of_bits)); } TEST_F(CopyBitmaskTest, TestNonZeroOffset) { std::vector<int> validity_bit(1000); for (auto& m : validity_bit) { m = this->generate(); } auto input_mask = std::get<0>(cudf::test::detail::make_null_mask(validity_bit.begin(), validity_bit.end())); int begin_bit = 321; int end_bit = 998; auto gold_splice_mask = std::get<0>(cudf::test::detail::make_null_mask( validity_bit.begin() + begin_bit, validity_bit.begin() + end_bit)); auto splice_mask = cudf::copy_bitmask( static_cast<cudf::bitmask_type const*>(input_mask.data()), begin_bit, end_bit); cleanEndWord(splice_mask, begin_bit, end_bit); auto number_of_bits = end_bit - begin_bit; CUDF_TEST_EXPECT_EQUAL_BUFFERS( gold_splice_mask.data(), splice_mask.data(), cudf::num_bitmask_words(number_of_bits)); } TEST_F(CopyBitmaskTest, TestCopyColumnViewVectorContiguous) { cudf::data_type t{cudf::type_id::INT32}; cudf::size_type num_elements = 1001; std::vector<int> validity_bit(num_elements); for (auto& m : validity_bit) { m = this->generate(); } auto [gold_mask, null_count] = cudf::test::detail::make_null_mask(validity_bit.begin(), validity_bit.end()); rmm::device_buffer copy_mask{gold_mask, cudf::get_default_stream()}; cudf::column original{t, num_elements, rmm::device_buffer{num_elements * sizeof(int), cudf::get_default_stream()}, std::move(copy_mask), null_count}; std::vector<cudf::size_type> indices{0, 104, 104, 128, 128, 152, 152, 311, 311, 491, 491, 583, 583, 734, 734, 760, 760, num_elements}; std::vector<cudf::column_view> views = cudf::slice(original, indices); rmm::device_buffer concatenated_bitmask = cudf::concatenate_masks(views); cleanEndWord(concatenated_bitmask, 0, num_elements); CUDF_TEST_EXPECT_EQUAL_BUFFERS( concatenated_bitmask.data(), gold_mask.data(), cudf::num_bitmask_words(num_elements)); } TEST_F(CopyBitmaskTest, TestCopyColumnViewVectorDiscontiguous) { cudf::data_type t{cudf::type_id::INT32}; cudf::size_type num_elements = 1001; std::vector<int> validity_bit(num_elements); for (auto& m : validity_bit) { m = this->generate(); } auto gold_mask = std::get<0>(cudf::test::detail::make_null_mask(validity_bit.begin(), validity_bit.end())); std::vector<cudf::size_type> split{0, 104, 128, 152, 311, 491, 583, 734, 760, num_elements}; std::vector<cudf::column> cols; std::vector<cudf::column_view> views; for (unsigned i = 0; i < split.size() - 1; i++) { auto [null_mask, null_count] = cudf::test::detail::make_null_mask( validity_bit.begin() + split[i], validity_bit.begin() + split[i + 1]); cols.emplace_back( t, split[i + 1] - split[i], rmm::device_buffer{sizeof(int) * (split[i + 1] - split[i]), cudf::get_default_stream()}, std::move(null_mask), null_count); views.push_back(cols.back()); } rmm::device_buffer concatenated_bitmask = cudf::concatenate_masks(views); cleanEndWord(concatenated_bitmask, 0, num_elements); CUDF_TEST_EXPECT_EQUAL_BUFFERS( concatenated_bitmask.data(), gold_mask.data(), cudf::num_bitmask_words(num_elements)); } struct MergeBitmaskTest : public cudf::test::BaseFixture {}; TEST_F(MergeBitmaskTest, TestBitmaskAnd) { cudf::test::fixed_width_column_wrapper<bool> const bools_col1({0, 1, 0, 1, 1}, {0, 1, 1, 1, 0}); cudf::test::fixed_width_column_wrapper<bool> const bools_col2({0, 2, 1, 0, 255}, {1, 1, 0, 1, 0}); cudf::test::fixed_width_column_wrapper<bool> const bools_col3({0, 2, 1, 0, 255}); auto const input1 = cudf::table_view({bools_col3}); auto const input2 = cudf::table_view({bools_col1, bools_col2}); auto const input3 = cudf::table_view({bools_col1, bools_col2, bools_col3}); auto [result1_mask, result1_null_count] = cudf::bitmask_and(input1); auto [result2_mask, result2_null_count] = cudf::bitmask_and(input2); auto [result3_mask, result3_null_count] = cudf::bitmask_and(input3); constexpr cudf::size_type gold_null_count = 3; EXPECT_EQ(result1_null_count, 0); EXPECT_EQ(result2_null_count, gold_null_count); EXPECT_EQ(result3_null_count, gold_null_count); auto odd_indices = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; }); auto odd = std::get<0>(cudf::test::detail::make_null_mask(odd_indices, odd_indices + input2.num_rows())); EXPECT_EQ(nullptr, result1_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS( result2_mask.data(), odd.data(), cudf::num_bitmask_words(input2.num_rows())); CUDF_TEST_EXPECT_EQUAL_BUFFERS( result3_mask.data(), odd.data(), cudf::num_bitmask_words(input2.num_rows())); } TEST_F(MergeBitmaskTest, TestBitmaskOr) { cudf::test::fixed_width_column_wrapper<bool> const bools_col1({0, 1, 0, 1, 1}, {1, 1, 0, 0, 1}); cudf::test::fixed_width_column_wrapper<bool> const bools_col2({0, 2, 1, 0, 255}, {0, 0, 1, 0, 1}); cudf::test::fixed_width_column_wrapper<bool> const bools_col3({0, 2, 1, 0, 255}); auto const input1 = cudf::table_view({bools_col3}); auto const input2 = cudf::table_view({bools_col1, bools_col2}); auto const input3 = cudf::table_view({bools_col1, bools_col2, bools_col3}); auto [result1_mask, result1_null_count] = cudf::bitmask_or(input1); auto [result2_mask, result2_null_count] = cudf::bitmask_or(input2); auto [result3_mask, result3_null_count] = cudf::bitmask_or(input3); EXPECT_EQ(result1_null_count, 0); EXPECT_EQ(result2_null_count, 1); EXPECT_EQ(result3_null_count, 0); auto all_but_index3 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; }); auto null3 = std::get<0>( cudf::test::detail::make_null_mask(all_but_index3, all_but_index3 + input2.num_rows())); EXPECT_EQ(nullptr, result1_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS( result2_mask.data(), null3.data(), cudf::num_bitmask_words(input2.num_rows())); EXPECT_EQ(nullptr, result3_mask.data()); } CUDF_TEST_PROGRAM_MAIN()
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/bitmask/set_nullmask_tests.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <iostream> #include <cudf_test/base_fixture.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/null_mask.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/host_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform.h> struct valid_bit_functor { cudf::bitmask_type const* _null_mask; __device__ bool operator()(cudf::size_type element_index) const noexcept { return cudf::bit_is_set(_null_mask, element_index); } }; std::ostream& operator<<(std::ostream& stream, thrust::host_vector<bool> const& bits) { for (auto _bit : bits) stream << int(_bit); return stream; } struct SetBitmaskTest : public cudf::test::BaseFixture { void expect_bitmask_equal(cudf::bitmask_type const* bitmask, // Device Ptr cudf::size_type start_bit, thrust::host_vector<bool> const& expect, rmm::cuda_stream_view stream = cudf::get_default_stream()) { rmm::device_uvector<bool> result(expect.size(), stream); auto counting_iter = thrust::counting_iterator<cudf::size_type>{0}; thrust::transform(rmm::exec_policy(stream), counting_iter + start_bit, counting_iter + start_bit + expect.size(), result.begin(), valid_bit_functor{bitmask}); auto host_result = cudf::detail::make_host_vector_sync(result, stream); EXPECT_THAT(host_result, testing::ElementsAreArray(expect)); } void test_set_null_range(cudf::size_type size, cudf::size_type begin, cudf::size_type end, bool valid) { thrust::host_vector<bool> expected(end - begin, valid); // TEST rmm::device_buffer mask = create_null_mask(size, cudf::mask_state::UNINITIALIZED); // valid ? cudf::mask_state::ALL_NULL : cudf::mask_state::ALL_VALID); cudf::set_null_mask(static_cast<cudf::bitmask_type*>(mask.data()), begin, end, valid); expect_bitmask_equal(static_cast<cudf::bitmask_type*>(mask.data()), begin, expected); } void test_null_partition(cudf::size_type size, cudf::size_type middle, bool valid) { thrust::host_vector<bool> expected(size); std::generate(expected.begin(), expected.end(), [n = 0, middle, valid]() mutable { auto i = n++; return (!valid) ^ (i < middle); }); // TEST rmm::device_buffer mask = create_null_mask(size, cudf::mask_state::UNINITIALIZED); cudf::set_null_mask(static_cast<cudf::bitmask_type*>(mask.data()), 0, middle, valid); cudf::set_null_mask(static_cast<cudf::bitmask_type*>(mask.data()), middle, size, !valid); expect_bitmask_equal(static_cast<cudf::bitmask_type*>(mask.data()), 0, expected); } }; // tests for set_null_mask TEST_F(SetBitmaskTest, fill_range) { cudf::size_type size = 121; for (auto begin = 0; begin < size; begin += 5) for (auto end = begin + 1; end <= size; end += 7) { this->test_set_null_range(size, begin, end, true); this->test_set_null_range(size, begin, end, false); } } TEST_F(SetBitmaskTest, null_mask_partition) { cudf::size_type size = 64; for (auto middle = 1; middle < size; middle++) { this->test_null_partition(size, middle, true); this->test_null_partition(size, middle, false); } } TEST_F(SetBitmaskTest, error_range) { cudf::size_type size = 121; using size_pair = std::pair<cudf::size_type, cudf::size_type>; std::vector<size_pair> begin_end_fail{ {-1, size}, // begin>=0 {-2, -1}, // begin>=0 {9, 8}, // begin<=end }; for (auto begin_end : begin_end_fail) { auto begin = begin_end.first, end = begin_end.second; EXPECT_ANY_THROW(this->test_set_null_range(size, begin, end, true)); EXPECT_ANY_THROW(this->test_set_null_range(size, begin, end, false)); } std::vector<size_pair> begin_end_pass{ {0, size}, // begin>=0 {0, 1}, // begin>=0 {8, 8}, // begin==end {8, 9}, // begin<=end {size - 1, size}, // begin<=end }; for (auto begin_end : begin_end_pass) { auto begin = begin_end.first, end = begin_end.second; EXPECT_NO_THROW(this->test_set_null_range(size, begin, end, true)); EXPECT_NO_THROW(this->test_set_null_range(size, begin, end, false)); } }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/device_atomics/device_atomics_test.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/device_atomics.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/wrappers/timestamps.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/timestamp_utilities.cuh> #include <cudf_test/type_lists.hpp> #include <thrust/host_vector.h> #include <algorithm> template <typename T> __global__ void gpu_atomic_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAdd(&result[0], data[id]); atomicMin(&result[1], data[id]); atomicMax(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T, typename BinaryOp> constexpr inline bool is_timestamp_sum() { return cudf::is_timestamp<T>() && std::is_same_v<BinaryOp, cudf::DeviceSum>; } // Disable SUM of TIMESTAMP types template <typename T, typename BinaryOp, std::enable_if_t<is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { return {}; } template <typename T, typename BinaryOp, std::enable_if_t<!is_timestamp_sum<T, BinaryOp>()>* = nullptr> __device__ T atomic_op(T* addr, T const& value, BinaryOp op) { T old_value = *addr; T assumed; do { assumed = old_value; T new_value = op(old_value, value); old_value = atomicCAS(addr, assumed, new_value); } while (assumed != old_value); return old_value; } template <typename T> __global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomic_op(&result[0], data[id], cudf::DeviceSum{}); atomic_op(&result[1], data[id], cudf::DeviceMin{}); atomic_op(&result[2], data[id], cudf::DeviceMax{}); atomic_op(&result[3], data[id], cudf::DeviceSum{}); atomic_op(&result[4], data[id], cudf::DeviceMin{}); atomic_op(&result[5], data[id], cudf::DeviceMax{}); } } template <typename T> std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { return std::accumulate(xs.begin(), xs.end(), T{0}); } template <typename T> std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs) { auto ys = std::vector<typename T::rep>(xs.size()); std::transform( xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); }); return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}}; } template <typename T> struct AtomicsTest : public cudf::test::BaseFixture { void atomic_test(std::vector<int> const& v_input, bool is_cas_test, int block_size = 0, int grid_size = 1) { size_t vec_size = v_input.size(); // use transform from thrust::host_vector<int> instead. thrust::host_vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t = cudf::test::make_type_param_scalar<T>(x); return t; }); T exact[3]; exact[0] = accumulate<T>(v); exact[1] = *(std::min_element(v.begin(), v.end())); exact[2] = *(std::max_element(v.begin(), v.end())); thrust::host_vector<T> result_init(9); // +3 padding for int8 tests result_init[0] = cudf::test::make_type_param_scalar<T>(0); if constexpr (cudf::is_chrono<T>()) { result_init[1] = T::max(); result_init[2] = T::min(); } else { result_init[1] = std::numeric_limits<T>::max(); result_init[2] = std::numeric_limits<T>::min(); } result_init[3] = result_init[0]; result_init[4] = result_init[1]; result_init[5] = result_init[2]; auto dev_data = cudf::detail::make_device_uvector_sync( v, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); auto dev_result = cudf::detail::make_device_uvector_sync( result_init, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); if (block_size == 0) { block_size = vec_size; } if (is_cas_test) { gpu_atomicCAS_test<<<grid_size, block_size, 0, cudf::get_default_stream().value()>>>( dev_result.data(), dev_data.data(), vec_size); } else { gpu_atomic_test<<<grid_size, block_size, 0, cudf::get_default_stream().value()>>>( dev_result.data(), dev_data.data(), vec_size); } auto host_result = cudf::detail::make_host_vector_sync(dev_result, cudf::get_default_stream()); CUDF_CHECK_CUDA(cudf::get_default_stream().value()); if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; } EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; if (!is_timestamp_sum<T, cudf::DeviceSum>()) { EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; } EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; } }; TYPED_TEST_SUITE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint); // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOps) { bool is_cas_test = false; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCAS) { bool is_cas_test = true; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOpsGrid) { bool is_cas_test = false; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCASGrid) { bool is_cas_test = true; int block_size = 3; int grid_size = 4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for large array TYPED_TEST(AtomicsTest, atomicOpsRandom) { bool is_cas_test = false; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } TYPED_TEST(AtomicsTest, atomicCASRandom) { bool is_cas_test = true; int block_size = 256; int grid_size = 64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } CUDF_TEST_PROGRAM_MAIN()
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/unary_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/unary.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class UnaryTest : public cudf::test::BaseFixture {}; TEST_F(UnaryTest, UnaryOperation) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::unary_operation(column, cudf::unary_operator::ABS, cudf::test::get_default_stream()); } TEST_F(UnaryTest, IsNull) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::is_null(column, cudf::test::get_default_stream()); } TEST_F(UnaryTest, IsValid) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::is_valid(column, cudf::test::get_default_stream()); } TEST_F(UnaryTest, Cast) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::cast(column, cudf::data_type{cudf::type_id::INT64}, cudf::test::get_default_stream()); } TEST_F(UnaryTest, IsNan) { cudf::test::fixed_width_column_wrapper<float> const column{10, 20, 30, 40, 50}; cudf::is_nan(column, cudf::test::get_default_stream()); } TEST_F(UnaryTest, IsNotNan) { cudf::test::fixed_width_column_wrapper<float> const column{10, 20, 30, 40, 50}; cudf::is_not_nan(column, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/sorting_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/sorting.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class SortingTest : public cudf::test::BaseFixture {}; TEST_F(SortingTest, SortedOrder) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::table_view const tbl{{column}}; cudf::sorted_order(tbl, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, StableSortedOrder) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::table_view const tbl{{column}}; cudf::stable_sorted_order(tbl, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, IsSorted) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::table_view const tbl{{column}}; cudf::is_sorted(tbl, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, Sort) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::table_view const tbl{{column}}; cudf::sort(tbl, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, SortByKey) { cudf::test::fixed_width_column_wrapper<int32_t> const values_col{10, 20, 30, 40, 50}; cudf::table_view const values{{values_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{10, 20, 30, 40, 50}; cudf::table_view const keys{{keys_col}}; cudf::sort_by_key(values, keys, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, StableSortByKey) { cudf::test::fixed_width_column_wrapper<int32_t> const values_col{10, 20, 30, 40, 50}; cudf::table_view const values{{values_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{10, 20, 30, 40, 50}; cudf::table_view const keys{{keys_col}}; cudf::stable_sort_by_key(values, keys, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, Rank) { cudf::test::fixed_width_column_wrapper<int32_t> const column{10, 20, 30, 40, 50}; cudf::rank(column, cudf::rank_method::AVERAGE, cudf::order::ASCENDING, cudf::null_policy::EXCLUDE, cudf::null_order::AFTER, false, cudf::test::get_default_stream()); } TEST_F(SortingTest, SegmentedSortedOrder) { cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; cudf::table_view const keys{{keys_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const segment_offsets{3, 7}; cudf::segmented_sorted_order(keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, StableSegmentedSortedOrder) { cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; cudf::table_view const keys{{keys_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const segment_offsets{3, 7}; cudf::stable_segmented_sorted_order( keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, SegmentedSortByKey) { cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; cudf::table_view const keys{{keys_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const values_col{7, 6, 9, 3, 4, 5, 1, 2, 0, 4}; cudf::table_view const values{{values_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const segment_offsets{0, 3, 7, 10}; cudf::segmented_sort_by_key( values, keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); } TEST_F(SortingTest, StableSegmentedSortByKey) { cudf::test::fixed_width_column_wrapper<int32_t> const keys_col{9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; cudf::table_view const keys{{keys_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const values_col{7, 6, 9, 3, 4, 5, 1, 2, 0, 4}; cudf::table_view const values{{values_col}}; cudf::test::fixed_width_column_wrapper<int32_t> const segment_offsets{0, 3, 7, 10}; cudf::stable_segmented_sort_by_key( values, keys, segment_offsets, {}, {}, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/groupby_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/groupby/groupby_test_util.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/groupby.hpp> using K = int32_t; // Key type. template <typename V> struct groupby_stream_test : public cudf::test::BaseFixture { cudf::test::fixed_width_column_wrapper<K> keys{1, 2, 3, 1, 2, 2, 1, 3, 3, 2}; cudf::test::fixed_width_column_wrapper<V> vals{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; void test_groupby(std::unique_ptr<cudf::groupby_aggregation>&& agg, force_use_sort_impl use_sort = force_use_sort_impl::NO, cudf::null_policy include_null_keys = cudf::null_policy::INCLUDE, cudf::sorted keys_are_sorted = cudf::sorted::NO) { auto requests = [&] { auto requests = std::vector<cudf::groupby::aggregation_request>{}; requests.push_back(cudf::groupby::aggregation_request{}); requests.front().values = vals; if (use_sort == force_use_sort_impl::YES) { requests.front().aggregations.push_back( cudf::make_nth_element_aggregation<cudf::groupby_aggregation>(0)); } requests.front().aggregations.push_back(std::move(agg)); return requests; }(); auto gby = cudf::groupby::groupby{cudf::table_view{{keys}}, include_null_keys, keys_are_sorted, {}, {}}; gby.aggregate(requests, cudf::test::get_default_stream()); // No need to verify results, for stream test. } }; TYPED_TEST_SUITE(groupby_stream_test, cudf::test::AllTypes); TYPED_TEST(groupby_stream_test, test_count) { auto const make_count_agg = [&](cudf::null_policy include_nulls = cudf::null_policy::EXCLUDE) { return cudf::make_count_aggregation<cudf::groupby_aggregation>(include_nulls); }; this->test_groupby(make_count_agg()); this->test_groupby(make_count_agg(), force_use_sort_impl::YES); this->test_groupby(make_count_agg(cudf::null_policy::INCLUDE)); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/dictionary_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/dictionary/encode.hpp> #include <cudf/dictionary/search.hpp> #include <cudf/dictionary/update_keys.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class DictionaryTest : public cudf::test::BaseFixture {}; TEST_F(DictionaryTest, Encode) { cudf::test::fixed_width_column_wrapper<int> col({1, 2, 3, 4, 5}); cudf::data_type int32_type(cudf::type_id::UINT32); cudf::column_view col_view = col; cudf::dictionary::encode(col_view, int32_type, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, Decode) { // keys = {0, 2, 6}, indices = {0, 1, 1, 2, 2} std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::dictionary::decode(dict_col_view, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, GetIndex) { std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::numeric_scalar<int32_t> key_scalar(2, true, cudf::test::get_default_stream()); cudf::dictionary::get_index(dict_col_view, key_scalar, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, AddKeys) { std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::test::fixed_width_column_wrapper<int> new_keys_col({8, 9}); cudf::dictionary::add_keys(dict_col_view, new_keys_col, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, RemoveKeys) { std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::test::fixed_width_column_wrapper<int> keys_to_remove_col({2}); cudf::dictionary::remove_keys( dict_col_view, keys_to_remove_col, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, RemoveUnsedKeys) { std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::dictionary::remove_unused_keys(dict_col_view, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, SetKeys) { std::vector<int32_t> elements{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col(elements.begin(), elements.end()); cudf::dictionary_column_view dict_col_view = dict_col; cudf::test::fixed_width_column_wrapper<int> keys_col({2, 6}); cudf::dictionary::set_keys(dict_col_view, keys_col, cudf::test::get_default_stream()); } TEST_F(DictionaryTest, MatchDictionaries) { std::vector<int32_t> elements_a{0, 2, 2, 6, 6}; cudf::test::dictionary_column_wrapper<int32_t> dict_col_a(elements_a.begin(), elements_a.end()); cudf::dictionary_column_view dict_col_view_a = dict_col_a; std::vector<int32_t> elements_b{1, 3, 4, 5, 5}; cudf::test::dictionary_column_wrapper<int32_t> dict_col_b(elements_b.begin(), elements_b.end()); cudf::dictionary_column_view dict_col_view_b = dict_col_b; std::vector<cudf::dictionary_column_view> dicts = {dict_col_view_a, dict_col_view_b}; cudf::test::fixed_width_column_wrapper<int> keys_col({2, 6}); cudf::dictionary::match_dictionaries(dicts, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/binaryop_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/binaryop/util/runtime_support.h> #include <cudf/binaryop.hpp> #include <cudf/column/column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class BinaryopTest : public cudf::test::BaseFixture {}; TEST_F(BinaryopTest, ColumnColumn) { cudf::test::fixed_width_column_wrapper<int32_t> lhs{10, 20, 30, 40, 50}; cudf::test::fixed_width_column_wrapper<int32_t> rhs{15, 25, 35, 45, 55}; cudf::binary_operation(lhs, rhs, cudf::binary_operator::ADD, cudf::data_type(cudf::type_to_id<int32_t>()), cudf::test::get_default_stream()); } TEST_F(BinaryopTest, ColumnScalar) { cudf::test::fixed_width_column_wrapper<int32_t> lhs{10, 20, 30, 40, 50}; cudf::numeric_scalar<int32_t> rhs{23, true, cudf::test::get_default_stream()}; cudf::binary_operation(lhs, rhs, cudf::binary_operator::ADD, cudf::data_type(cudf::type_to_id<int32_t>()), cudf::test::get_default_stream()); } TEST_F(BinaryopTest, ScalarColumn) { cudf::numeric_scalar<int32_t> lhs{42, true, cudf::test::get_default_stream()}; cudf::test::fixed_width_column_wrapper<int32_t> rhs{15, 25, 35, 45, 55}; cudf::binary_operation(lhs, rhs, cudf::binary_operator::ADD, cudf::data_type(cudf::type_to_id<int32_t>()), cudf::test::get_default_stream()); } class BinaryopPTXTest : public BinaryopTest { protected: void SetUp() override { if (!can_do_runtime_jit()) { GTEST_SKIP() << "Skipping tests that require 11.5 runtime"; } } }; TEST_F(BinaryopPTXTest, ColumnColumnPTX) { cudf::test::fixed_width_column_wrapper<int32_t> lhs{10, 20, 30, 40, 50}; cudf::test::fixed_width_column_wrapper<int64_t> rhs{15, 25, 35, 45, 55}; // c = a*a*a + b*b char const* ptx = R"***( // // Generated by NVIDIA NVVM Compiler // // Compiler Build ID: CL-24817639 // Cuda compilation tools, release 10.0, V10.0.130 // Based on LLVM 3.4svn // .version 6.3 .target sm_70 .address_size 64 // .globl _ZN8__main__7add$241Eix .common .global .align 8 .u64 _ZN08NumbaEnv8__main__7add$241Eix; .common .global .align 8 .u64 _ZN08NumbaEnv5numba7targets7numbers14int_power_impl12$3clocals$3e13int_power$242Exx; .visible .func (.param .b32 func_retval0) _ZN8__main__7add$241Eix( .param .b64 _ZN8__main__7add$241Eix_param_0, .param .b32 _ZN8__main__7add$241Eix_param_1, .param .b64 _ZN8__main__7add$241Eix_param_2 ) { .reg .b32 %r<3>; .reg .b64 %rd<8>; ld.param.u64 %rd1, [_ZN8__main__7add$241Eix_param_0]; ld.param.u32 %r1, [_ZN8__main__7add$241Eix_param_1]; ld.param.u64 %rd2, [_ZN8__main__7add$241Eix_param_2]; cvt.s64.s32 %rd3, %r1; mul.wide.s32 %rd4, %r1, %r1; mul.lo.s64 %rd5, %rd4, %rd3; mul.lo.s64 %rd6, %rd2, %rd2; add.s64 %rd7, %rd6, %rd5; st.u64 [%rd1], %rd7; mov.u32 %r2, 0; st.param.b32 [func_retval0+0], %r2; ret; } )***"; cudf::binary_operation( lhs, rhs, ptx, cudf::data_type(cudf::type_to_id<int32_t>()), cudf::test::get_default_stream()); cudf::binary_operation(lhs, rhs, ptx, cudf::data_type(cudf::type_to_id<int64_t>())); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/concatenate_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/concatenate.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class ConcatenateTest : public cudf::test::BaseFixture {}; TEST_F(ConcatenateTest, Column) { cudf::test::fixed_width_column_wrapper<int> const input1({0, 0, 0, 0, 0}); cudf::test::fixed_width_column_wrapper<int> const input2({1, 1, 1, 1, 1}); std::vector<cudf::column_view> views{input1, input2}; auto result = cudf::concatenate(views, cudf::test::get_default_stream()); } TEST_F(ConcatenateTest, Table) { cudf::test::fixed_width_column_wrapper<int> const input1({0, 0, 0, 0, 0}); cudf::test::fixed_width_column_wrapper<int> const input2({1, 1, 1, 1, 1}); cudf::table_view tbl1({input1, input2}); cudf::table_view tbl2({input2, input1}); std::vector<cudf::table_view> views{tbl1, tbl2}; auto result = cudf::concatenate(views, cudf::test::get_default_stream()); } TEST_F(ConcatenateTest, Masks) { cudf::test::fixed_width_column_wrapper<int> const input1( {{0, 0, 0, 0, 0}, {false, false, false, false, false}}); cudf::test::fixed_width_column_wrapper<int> const input2( {{0, 0, 0, 0, 0}, {true, true, true, true, true}}); std::vector<cudf::column_view> views{input1, input2}; auto result = cudf::concatenate_masks(views, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/interop_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/interop.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> struct ArrowTest : public cudf::test::BaseFixture {}; TEST_F(ArrowTest, ToArrow) { int32_t const value{42}; auto col = cudf::test::fixed_width_column_wrapper<int32_t>{{value}}; cudf::table_view tbl{{col}}; std::vector<cudf::column_metadata> metadata{{""}}; cudf::to_arrow(tbl, metadata, cudf::test::get_default_stream()); } TEST_F(ArrowTest, FromArrow) { std::vector<int64_t> host_values = {1, 2, 3, 5, 6, 7, 8}; std::vector<bool> host_validity = {true, true, true, false, true, true, true}; arrow::Int64Builder builder; auto status = builder.AppendValues(host_values, host_validity); auto maybe_array = builder.Finish(); auto array = *maybe_array; auto field = arrow::field("", arrow::int32()); auto schema = arrow::schema({field}); auto table = arrow::Table::Make(schema, {array}); cudf::from_arrow(*table, cudf::test::get_default_stream()); } TEST_F(ArrowTest, ToArrowScalar) { int32_t const value{42}; auto cudf_scalar = cudf::make_fixed_width_scalar<int32_t>(value, cudf::test::get_default_stream()); cudf::column_metadata metadata{""}; cudf::to_arrow(*cudf_scalar, metadata, cudf::test::get_default_stream()); } TEST_F(ArrowTest, FromArrowScalar) { int32_t const value{42}; auto arrow_scalar = arrow::MakeScalar(value); cudf::from_arrow(*arrow_scalar, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/lists_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf/lists/combine.hpp> #include <cudf/lists/contains.hpp> #include <cudf/lists/count_elements.hpp> #include <cudf/lists/extract.hpp> #include <cudf/lists/filling.hpp> #include <cudf/lists/gather.hpp> #include <cudf/lists/reverse.hpp> #include <cudf/lists/set_operations.hpp> #include <cudf/lists/sorting.hpp> #include <cudf/lists/stream_compaction.hpp> class ListTest : public cudf::test::BaseFixture {}; TEST_F(ListTest, ConcatenateRows) { cudf::test::lists_column_wrapper<int> list_col_1{{0, 1}, {2, 3}, {4, 5}}; cudf::test::lists_column_wrapper<int> list_col_2{{0, 1}, {2, 3}, {4, 5}}; cudf::table_view lists_table({list_col_1, list_col_2}); cudf::lists::concatenate_rows( lists_table, cudf::lists::concatenate_null_policy::IGNORE, cudf::test::get_default_stream()); } TEST_F(ListTest, ConcatenateListElements) { cudf::test::lists_column_wrapper<int> ll_column{{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}}; cudf::lists::concatenate_list_elements( ll_column, cudf::lists::concatenate_null_policy::IGNORE, cudf::test::get_default_stream()); } TEST_F(ListTest, ContainsNulls) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3}, {4, 5}}; cudf::lists::contains_nulls(list_col, cudf::test::get_default_stream()); } TEST_F(ListTest, ContainsSearchKey) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3}, {4, 5}}; cudf::numeric_scalar<int32_t> search_key(2, true, cudf::test::get_default_stream()); cudf::lists::contains(list_col, search_key, cudf::test::get_default_stream()); } TEST_F(ListTest, ContainsSearchKeys) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3}, {4, 5}}; cudf::test::fixed_width_column_wrapper<int> search_keys({1, 2, 3}); cudf::lists::contains(list_col, search_keys, cudf::test::get_default_stream()); } TEST_F(ListTest, IndexOfSearchKey) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3}, {4, 5}}; cudf::numeric_scalar<int32_t> search_key(2, true, cudf::test::get_default_stream()); cudf::lists::index_of(list_col, search_key, cudf::lists::duplicate_find_option::FIND_FIRST, cudf::test::get_default_stream()); } TEST_F(ListTest, IndexOfSearchKeys) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3}, {4, 5}}; cudf::test::fixed_width_column_wrapper<int> search_keys({1, 2, 3}); cudf::lists::index_of(list_col, search_keys, cudf::lists::duplicate_find_option::FIND_FIRST, cudf::test::get_default_stream()); } TEST_F(ListTest, CountElements) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7}, {4, 5}}; cudf::lists::count_elements(list_col, cudf::test::get_default_stream()); } TEST_F(ListTest, ExtractListElementFromIndex) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7}, {4, 5}}; cudf::lists::extract_list_element(list_col, -1, cudf::test::get_default_stream()); } TEST_F(ListTest, ExtractListElementFromIndices) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7}, {4, 5}}; cudf::test::fixed_width_column_wrapper<int> indices({-1, -2, -1}); cudf::lists::extract_list_element(list_col, indices, cudf::test::get_default_stream()); } TEST_F(ListTest, SegmentedGather) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> gather_map_list{{0}, {1, 2}, {1}}; cudf::lists::segmented_gather(list_col, gather_map_list, cudf::out_of_bounds_policy::DONT_CHECK, cudf::test::get_default_stream()); } TEST_F(ListTest, Sequences) { cudf::test::fixed_width_column_wrapper<int> starts({0, 1, 2, 3, 4}); cudf::test::fixed_width_column_wrapper<int> sizes({0, 1, 2, 2, 1}); cudf::lists::sequences(starts, sizes, cudf::test::get_default_stream()); } TEST_F(ListTest, SequencesWithSteps) { cudf::test::fixed_width_column_wrapper<int> starts({0, 1, 2, 3, 4}); cudf::test::fixed_width_column_wrapper<int> steps({2, 1, 1, 1, -3}); cudf::test::fixed_width_column_wrapper<int> sizes({0, 1, 2, 2, 1}); cudf::lists::sequences(starts, steps, sizes, cudf::test::get_default_stream()); } TEST_F(ListTest, Reverse) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::lists::reverse(list_col, cudf::test::get_default_stream()); } TEST_F(ListTest, SortLists) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::lists::sort_lists( list_col, cudf::order::DESCENDING, cudf::null_order::AFTER, cudf::test::get_default_stream()); } TEST_F(ListTest, StableSortLists) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::lists::stable_sort_lists( list_col, cudf::order::DESCENDING, cudf::null_order::AFTER, cudf::test::get_default_stream()); } TEST_F(ListTest, ApplyBooleanMask) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<bool> boolean_mask{{0, 1}, {1, 1, 1, 0}, {0, 1}}; cudf::lists::apply_boolean_mask(list_col, boolean_mask, cudf::test::get_default_stream()); } TEST_F(ListTest, Distinct) { cudf::test::lists_column_wrapper<int> list_col{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> boolean_mask{{0, 1}, {1, 1, 1, 0}, {0, 1}}; cudf::lists::distinct(list_col, cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL, cudf::test::get_default_stream()); } TEST_F(ListTest, DifferenceDistinct) { cudf::test::lists_column_wrapper<int> list_col_a{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> list_col_b{{0, 1}, {1, 3, 6, 8}, {5}}; cudf::lists::difference_distinct(list_col_a, list_col_b, cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL, cudf::test::get_default_stream()); } TEST_F(ListTest, IntersectDistinct) { cudf::test::lists_column_wrapper<int> list_col_a{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> list_col_b{{0, 1}, {1, 3, 6, 8}, {5}}; cudf::lists::intersect_distinct(list_col_a, list_col_b, cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL, cudf::test::get_default_stream()); } TEST_F(ListTest, UnionDistinct) { cudf::test::lists_column_wrapper<int> list_col_a{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> list_col_b{{0, 1}, {1, 3, 6, 8}, {5}}; cudf::lists::union_distinct(list_col_a, list_col_b, cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL, cudf::test::get_default_stream()); } TEST_F(ListTest, HaveOverlap) { cudf::test::lists_column_wrapper<int> list_col_a{{0, 1}, {2, 3, 7, 8}, {4, 5}}; cudf::test::lists_column_wrapper<int> list_col_b{{0, 1}, {1, 3, 6, 8}, {5}}; cudf::lists::have_overlap(list_col_a, list_col_b, cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/hash_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/hashing.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class HashTest : public cudf::test::BaseFixture {}; TEST_F(HashTest, MultiValue) { cudf::test::strings_column_wrapper const strings_col( {"", "The quick brown fox", "jumps over the lazy dog.", "All work and no play makes Jack a dull boy", R"(!"#$%&'()*+,-./0123456789:;<=>?@[\]^_`{|}~)"}); using limits = std::numeric_limits<int32_t>; cudf::test::fixed_width_column_wrapper<int32_t> const ints_col( {0, 100, -100, limits::min(), limits::max()}); // Different truth values should be equal cudf::test::fixed_width_column_wrapper<bool> const bools_col1({0, 1, 1, 1, 0}); cudf::test::fixed_width_column_wrapper<bool> const bools_col2({0, 1, 2, 255, 0}); using ts = cudf::timestamp_s; cudf::test::fixed_width_column_wrapper<ts, ts::duration> const secs_col( {ts::duration::zero(), static_cast<ts::duration>(100), static_cast<ts::duration>(-100), ts::duration::min(), ts::duration::max()}); auto const input1 = cudf::table_view({strings_col, ints_col, bools_col1, secs_col}); auto const output1 = cudf::hash( input1, cudf::hash_id::HASH_MURMUR3, cudf::DEFAULT_HASH_SEED, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/copying_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf_test/iterator_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/copying.hpp> #include <cudf/detail/null_mask.hpp> #include <limits> class CopyingTest : public cudf::test::BaseFixture {}; TEST_F(CopyingTest, Gather) { constexpr cudf::size_type source_size{1000}; auto data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; }); cudf::test::fixed_width_column_wrapper<int32_t> source_column(data, data + source_size); cudf::test::fixed_width_column_wrapper<int32_t> gather_map(data, data + source_size); cudf::table_view source_table({source_column}); cudf::gather(source_table, gather_map, cudf::out_of_bounds_policy::DONT_CHECK, cudf::test::get_default_stream()); } TEST_F(CopyingTest, ReverseTable) { constexpr cudf::size_type num_values{10}; auto input = cudf::test::fixed_width_column_wrapper<int32_t, int32_t>( thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_values); auto input_table = cudf::table_view{{input}}; cudf::reverse(input_table, cudf::test::get_default_stream()); } TEST_F(CopyingTest, ReverseColumn) { constexpr cudf::size_type num_values{10}; auto input = cudf::test::fixed_width_column_wrapper<int32_t, int32_t>( thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + num_values); cudf::reverse(input, cudf::test::get_default_stream()); } TEST_F(CopyingTest, ScatterTable) { cudf::test::fixed_width_column_wrapper<int32_t> source({1, 2, 3, 4, 5, 6}); cudf::test::fixed_width_column_wrapper<int32_t> target({10, 20, 30, 40, 50, 60, 70, 80}); cudf::test::fixed_width_column_wrapper<int32_t> scatter_map({-3, 3, 1, -1}); auto const source_table = cudf::table_view({source, source}); auto const target_table = cudf::table_view({target, target}); cudf::scatter(source_table, scatter_map, target_table, cudf::test::get_default_stream()); } TEST_F(CopyingTest, ScatterScalars) { auto const source = cudf::scalar_type_t<int32_t>(100, true, cudf::test::get_default_stream()); std::reference_wrapper<const cudf::scalar> slr_ref{source}; std::vector<std::reference_wrapper<const cudf::scalar>> source_vector{slr_ref}; cudf::test::fixed_width_column_wrapper<int32_t> target({10, 20, 30, 40, 50, 60, 70, 80}); cudf::test::fixed_width_column_wrapper<int32_t> scatter_map({-3, 3, 1, -1}); auto const target_table = cudf::table_view({target}); cudf::scatter(source_vector, scatter_map, target_table, cudf::test::get_default_stream()); } TEST_F(CopyingTest, AllocateLike) { // For same size as input cudf::size_type size = 10; auto input = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<int32_t>()}, size, cudf::mask_state::UNALLOCATED, cudf::test::get_default_stream()); cudf::allocate_like( input->view(), cudf::mask_allocation_policy::RETAIN, cudf::test::get_default_stream()); } TEST_F(CopyingTest, AllocateLikeSize) { // For same size as input cudf::size_type size = 10; cudf::size_type new_size = 10; auto input = cudf::make_numeric_column(cudf::data_type{cudf::type_to_id<int32_t>()}, size, cudf::mask_state::UNALLOCATED, cudf::test::get_default_stream()); cudf::allocate_like(input->view(), new_size, cudf::mask_allocation_policy::RETAIN, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyRangeInPlace) { constexpr cudf::size_type size{1000}; cudf::test::fixed_width_column_wrapper<int32_t, int32_t> target( thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + size); auto source_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i * 2; }); cudf::test::fixed_width_column_wrapper<int32_t, typename decltype(source_elements)::value_type> source(source_elements, source_elements + size); cudf::mutable_column_view target_view{target}; constexpr cudf::size_type source_begin{9}; constexpr cudf::size_type source_end{size - 50}; constexpr cudf::size_type target_begin{30}; cudf::copy_range_in_place( source, target_view, source_begin, source_end, target_begin, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyRange) { constexpr cudf::size_type size{1000}; cudf::test::fixed_width_column_wrapper<int32_t, int32_t> target( thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + size); auto source_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i * 2; }); cudf::test::fixed_width_column_wrapper<int32_t, typename decltype(source_elements)::value_type> source(source_elements, source_elements + size); cudf::mutable_column_view target_view{target}; const cudf::column_view immutable_view{target_view}; cudf::size_type source_begin{9}; cudf::size_type source_end{size - 50}; cudf::size_type target_begin{30}; cudf::copy_range(source, immutable_view, source_begin, source_end, target_begin, cudf::test::get_default_stream()); } TEST_F(CopyingTest, Shift) { auto input = cudf::test::fixed_width_column_wrapper<int32_t>{std::numeric_limits<int32_t>::min(), cudf::test::make_type_param_scalar<int32_t>(1), cudf::test::make_type_param_scalar<int32_t>(2), cudf::test::make_type_param_scalar<int32_t>(3), cudf::test::make_type_param_scalar<int32_t>(4), cudf::test::make_type_param_scalar<int32_t>(5), std::numeric_limits<int32_t>::max()}; auto fill = cudf::scalar_type_t<int32_t>( cudf::test::make_type_param_scalar<int32_t>(7), true, cudf::test::get_default_stream()); cudf::shift(input, 2, fill, cudf::test::get_default_stream()); } TEST_F(CopyingTest, SliceColumn) { cudf::test::fixed_width_column_wrapper<int32_t> col = cudf::test::fixed_width_column_wrapper<int32_t>{0, 1, 2, 3, 4, 5}; std::vector<cudf::size_type> indices{1, 3, 2, 2, 2, 5}; cudf::slice(col, indices, cudf::test::get_default_stream()); cudf::slice(col, {1, 3, 2, 2, 2, 5}, cudf::test::get_default_stream()); } TEST_F(CopyingTest, SliceTable) { cudf::test::fixed_width_column_wrapper<int32_t> col = cudf::test::fixed_width_column_wrapper<int32_t>{0, 1, 2, 3, 4, 5}; std::vector<cudf::size_type> indices{1, 3, 2, 2, 2, 5}; cudf::table_view tbl({col}); cudf::slice(tbl, indices, cudf::test::get_default_stream()); cudf::slice(tbl, {1, 3, 2, 2, 2, 5}, cudf::test::get_default_stream()); } TEST_F(CopyingTest, SplitColumn) { cudf::test::fixed_width_column_wrapper<int32_t> col = cudf::test::fixed_width_column_wrapper<int32_t>{0, 1, 2, 3, 4, 5}; std::vector<cudf::size_type> indices{1, 3, 5}; cudf::split(col, indices, cudf::test::get_default_stream()); cudf::split(col, {1, 3, 5}, cudf::test::get_default_stream()); } TEST_F(CopyingTest, SplitTable) { cudf::test::fixed_width_column_wrapper<int32_t> col = cudf::test::fixed_width_column_wrapper<int32_t>{0, 1, 2, 3, 4, 5}; std::vector<cudf::size_type> indices{1, 3, 5}; cudf::table_view tbl({col}); cudf::split(tbl, indices, cudf::test::get_default_stream()); cudf::split(tbl, {1, 3, 5}, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyIfElseColumnColumn) { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; cudf::test::fixed_width_column_wrapper<int32_t, int32_t> lhs_w{5, 5, 5, 5}; cudf::test::fixed_width_column_wrapper<int32_t, int32_t> rhs_w{6, 6, 6, 6}; cudf::copy_if_else(lhs_w, rhs_w, mask_w, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyIfElseScalarColumn) { auto scalar = cudf::scalar_type_t<int32_t>( cudf::test::make_type_param_scalar<int32_t>(7), true, cudf::test::get_default_stream()); cudf::test::fixed_width_column_wrapper<int32_t, int32_t> column{5, 5, 5, 5}; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; cudf::copy_if_else(scalar, column, mask_w, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyIfElseColumnScalar) { auto scalar = cudf::scalar_type_t<int32_t>( cudf::test::make_type_param_scalar<int32_t>(7), true, cudf::test::get_default_stream()); cudf::test::fixed_width_column_wrapper<int32_t, int32_t> column{5, 5, 5, 5}; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; cudf::copy_if_else(column, scalar, mask_w, cudf::test::get_default_stream()); } TEST_F(CopyingTest, CopyIfElseScalarScalar) { auto lhs = cudf::scalar_type_t<int32_t>( cudf::test::make_type_param_scalar<int32_t>(7), true, cudf::test::get_default_stream()); auto rhs = cudf::scalar_type_t<int32_t>( cudf::test::make_type_param_scalar<int32_t>(6), true, cudf::test::get_default_stream()); cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; cudf::copy_if_else(lhs, rhs, mask_w, cudf::test::get_default_stream()); } TEST_F(CopyingTest, BooleanMaskScatter) { cudf::test::fixed_width_column_wrapper<int32_t, int32_t> source({1, 5, 6, 8, 9}); cudf::test::fixed_width_column_wrapper<int32_t, int32_t> target( {2, 2, 3, 4, 11, 12, 7, 7, 10, 10}); cudf::test::fixed_width_column_wrapper<bool> mask( {true, false, false, false, true, true, false, true, true, false}); auto source_table = cudf::table_view({source}); auto target_table = cudf::table_view({target}); cudf::boolean_mask_scatter(source_table, target_table, mask, cudf::test::get_default_stream()); } TEST_F(CopyingTest, BooleanMaskScatterScalars) { std::vector<std::reference_wrapper<const cudf::scalar>> scalars; auto s = cudf::scalar_type_t<int32_t>(1, true, cudf::test::get_default_stream()); scalars.emplace_back(s); cudf::test::fixed_width_column_wrapper<int32_t, int32_t> target( {2, 2, 3, 4, 11, 12, 7, 7, 10, 10}); cudf::test::fixed_width_column_wrapper<bool> mask( {true, false, false, false, true, true, false, true, true, false}); auto target_table = cudf::table_view({target}); cudf::boolean_mask_scatter(scalars, target_table, mask, cudf::test::get_default_stream()); } TEST_F(CopyingTest, GetElement) { cudf::test::fixed_width_column_wrapper<int32_t> _col{1, 2}; cudf::get_element(_col, 0, cudf::test::get_default_stream()); } TEST_F(CopyingTest, Sample) { cudf::size_type const table_size = 1024; auto const n_samples = 10; auto const multi_smpl = cudf::sample_with_replacement::FALSE; auto data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; }); cudf::test::fixed_width_column_wrapper<int16_t> col1(data, data + table_size); cudf::table_view input({col1}); cudf::sample(input, n_samples, multi_smpl, 0, cudf::test::get_default_stream()); } template <typename T> using LCW = cudf::test::lists_column_wrapper<T, int32_t>; TEST_F(CopyingTest, HasNonemptyNulls) { auto const input = LCW<int32_t>{{{{1, 2, 3, 4}, cudf::test::iterators::null_at(2)}, {5}, {}, {8, 9, 10}}, cudf::test::iterators::no_nulls()} .release(); cudf::has_nonempty_nulls(*input, cudf::test::get_default_stream()); } TEST_F(CopyingTest, PurgeNonEmptyNulls) { auto const input = LCW<int32_t>{{{{1, 2, 3, 4}, cudf::test::iterators::null_at(2)}, {5}, {6, 7}, // <--- Will be set to NULL. Unsanitized row. {8, 9, 10}}, cudf::test::iterators::no_nulls()} .release(); // Set nullmask, post construction. // TODO: Once set_null_mask's public API exposes a stream parameter, use that // instead of the detail API. cudf::detail::set_null_mask( input->mutable_view().null_mask(), 2, 3, false, cudf::test::get_default_stream()); input->set_null_count(1); cudf::purge_nonempty_nulls(*input, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/filling_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/filling.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class FillingTest : public cudf::test::BaseFixture {}; TEST_F(FillingTest, FillInPlace) { cudf::test::fixed_width_column_wrapper<int> col({0, 0, 0, 0, 0}); auto scalar = cudf::numeric_scalar<int>(5, true, cudf::test::get_default_stream()); cudf::mutable_column_view mut_view = col; cudf::fill_in_place(mut_view, 0, 4, scalar, cudf::test::get_default_stream()); } TEST_F(FillingTest, Fill) { cudf::test::fixed_width_column_wrapper<int> const col({0, 0, 0, 0, 0}); auto scalar = cudf::numeric_scalar<int>(5, true, cudf::test::get_default_stream()); cudf::fill(col, 0, 4, scalar, cudf::test::get_default_stream()); } TEST_F(FillingTest, RepeatVariable) { cudf::test::fixed_width_column_wrapper<int> const col({0, 0, 0, 0, 0}); cudf::table_view const table({col}); cudf::test::fixed_width_column_wrapper<int> const counts({1, 2, 3, 4, 5}); cudf::repeat(table, counts, cudf::test::get_default_stream()); } TEST_F(FillingTest, RepeatConst) { cudf::test::fixed_width_column_wrapper<int> const col({0, 0, 0, 0, 0}); cudf::table_view const table({col}); cudf::repeat(table, 5, cudf::test::get_default_stream()); } TEST_F(FillingTest, SequenceStep) { auto init = cudf::numeric_scalar<int>(5, true, cudf::test::get_default_stream()); auto step = cudf::numeric_scalar<int>(2, true, cudf::test::get_default_stream()); cudf::sequence(10, init, step, cudf::test::get_default_stream()); } TEST_F(FillingTest, Sequence) { auto init = cudf::numeric_scalar<int>(5, true, cudf::test::get_default_stream()); cudf::sequence(10, init, cudf::test::get_default_stream()); } TEST_F(FillingTest, CalendricalMonthSequence) { cudf::timestamp_scalar<cudf::timestamp_s> init( 1629852896L, true, cudf::test::get_default_stream()); // 2021-08-25 00:54:56 GMT cudf::calendrical_month_sequence(10, init, 2, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/replace_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/replace.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf_test/type_lists.hpp> class ReplaceTest : public cudf::test::BaseFixture {}; TEST_F(ReplaceTest, ReplaceNullsColumn) { cudf::test::fixed_width_column_wrapper<int> input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); cudf::test::fixed_width_column_wrapper<int> replacement({1, 1, 1, 1, 1}); cudf::replace_nulls(input, replacement, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, ReplaceNullsScalar) { cudf::test::fixed_width_column_wrapper<int> input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); auto replacement = cudf::numeric_scalar<int>(1, true, cudf::test::get_default_stream()); cudf::replace_nulls(input, replacement, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, ReplaceNullsPolicy) { cudf::test::fixed_width_column_wrapper<int> input({{0, 0, 0, 0, 0}, {0, 0, 1, 1, 1}}); cudf::replace_nulls(input, cudf::replace_policy::FOLLOWING, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, ReplaceNansColumn) { auto nan = std::numeric_limits<double>::quiet_NaN(); auto input_column = cudf::test::make_type_param_vector<double>({0.0, 0.0, nan, nan, nan}); cudf::test::fixed_width_column_wrapper<double> input(input_column.begin(), input_column.end()); cudf::test::fixed_width_column_wrapper<double> replacement({0, 1, 2, 3, 4}); cudf::replace_nans(input, replacement, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, ReplaceNansScalar) { auto nan = std::numeric_limits<double>::quiet_NaN(); auto input_column = cudf::test::make_type_param_vector<double>({0.0, 0.0, nan, nan, nan}); cudf::test::fixed_width_column_wrapper<double> input(input_column.begin(), input_column.end()); auto replacement = cudf::numeric_scalar<double>(4, true, cudf::test::get_default_stream()); cudf::replace_nans(input, replacement, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, FindAndReplaceAll) { cudf::test::fixed_width_column_wrapper<int> input({0, 0, 0, 0, 0}); cudf::test::fixed_width_column_wrapper<int> values_to_replace({0, 0, 0, 0, 0}); cudf::test::fixed_width_column_wrapper<int> replacement_values({1, 1, 1, 1, 1}); cudf::find_and_replace_all( input, values_to_replace, replacement_values, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, ClampWithReplace) { cudf::test::fixed_width_column_wrapper<int> input({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); auto low = cudf::numeric_scalar<int>(3, true, cudf::test::get_default_stream()); auto low_replace = cudf::numeric_scalar<int>(5, true, cudf::test::get_default_stream()); auto high = cudf::numeric_scalar<int>(7, true, cudf::test::get_default_stream()); auto high_replace = cudf::numeric_scalar<int>(6, true, cudf::test::get_default_stream()); cudf::clamp(input, low, low_replace, high, high_replace, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, Clamp) { cudf::test::fixed_width_column_wrapper<int> input({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); auto low = cudf::numeric_scalar<int>(3, true, cudf::test::get_default_stream()); auto high = cudf::numeric_scalar<int>(7, true, cudf::test::get_default_stream()); cudf::clamp(input, low, high, cudf::test::get_default_stream()); } TEST_F(ReplaceTest, NormalizeNansAndZeros) { auto nan = std::numeric_limits<double>::quiet_NaN(); auto input_column = cudf::test::make_type_param_vector<double>({-0.0, 0.0, -nan, nan, nan}); cudf::test::fixed_width_column_wrapper<double> input(input_column.begin(), input_column.end()); cudf::normalize_nans_and_zeros(static_cast<cudf::column_view>(input), cudf::test::get_default_stream()); } TEST_F(ReplaceTest, NormalizeNansAndZerosMutable) { auto nan = std::numeric_limits<double>::quiet_NaN(); auto input_column = cudf::test::make_type_param_vector<double>({-0.0, 0.0, -nan, nan, nan}); cudf::test::fixed_width_column_wrapper<double> input(input_column.begin(), input_column.end()); cudf::normalize_nans_and_zeros(static_cast<cudf::mutable_column_view>(input), cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/search_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/search.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class SearchTest : public cudf::test::BaseFixture {}; TEST_F(SearchTest, LowerBound) { cudf::test::fixed_width_column_wrapper<int32_t> column{10, 20, 30, 40, 50}; cudf::test::fixed_width_column_wrapper<int32_t> values{0, 7, 10, 11, 30, 32, 40, 47, 50, 90}; cudf::test::fixed_width_column_wrapper<cudf::size_type> expect{0, 0, 0, 1, 2, 3, 3, 4, 4, 5}; cudf::lower_bound({cudf::table_view{{column}}}, {cudf::table_view{{values}}}, {cudf::order::ASCENDING}, {cudf::null_order::BEFORE}, cudf::test::get_default_stream()); } TEST_F(SearchTest, UpperBound) { cudf::test::fixed_width_column_wrapper<int32_t> column{10, 20, 30, 40, 50}; cudf::test::fixed_width_column_wrapper<int32_t> values{0, 7, 10, 11, 30, 32, 40, 47, 50, 90}; cudf::test::fixed_width_column_wrapper<cudf::size_type> expect{0, 0, 0, 1, 2, 3, 3, 4, 4, 5}; cudf::upper_bound({cudf::table_view{{column}}}, {cudf::table_view{{values}}}, {cudf::order::ASCENDING}, {cudf::null_order::BEFORE}, cudf::test::get_default_stream()); } TEST_F(SearchTest, ContainsScalar) { cudf::test::fixed_width_column_wrapper<int32_t> column{0, 1, 17, 19, 23, 29, 71}; cudf::numeric_scalar<int32_t> scalar{23, true, cudf::test::get_default_stream()}; cudf::contains(column, scalar, cudf::test::get_default_stream()); } TEST_F(SearchTest, ContainsColumn) { cudf::test::fixed_width_column_wrapper<int32_t> haystack{0, 1, 17, 19, 23, 29, 71}; cudf::test::fixed_width_column_wrapper<int32_t> needles{17, 19, 45, 72}; cudf::test::fixed_width_column_wrapper<bool> expect{1, 1, 0, 0}; cudf::contains(haystack, needles, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests
rapidsai_public_repos/cudf/cpp/tests/streams/null_mask_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/binaryop/util/runtime_support.h> #include <cudf/column/column_view.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class NullMaskTest : public cudf::test::BaseFixture {}; TEST_F(NullMaskTest, CreateNullMask) { cudf::create_null_mask(10, cudf::mask_state::ALL_VALID, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, SetNullMask) { cudf::test::fixed_width_column_wrapper<bool> col({0, 1, 0, 1, 1}, {true, false, true, false, false}); cudf::set_null_mask(static_cast<cudf::mutable_column_view>(col).null_mask(), 0, 3, false, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, CopyBitmask) { cudf::test::fixed_width_column_wrapper<bool> const col({0, 1, 0, 1, 1}, {true, false, true, false, false}); cudf::copy_bitmask( static_cast<cudf::column_view>(col).null_mask(), 0, 3, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, CopyBitmaskFromColumn) { cudf::test::fixed_width_column_wrapper<bool> const col({0, 1, 0, 1, 1}, {true, false, true, false, false}); cudf::copy_bitmask(col, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, BitMaskAnd) { cudf::test::fixed_width_column_wrapper<bool> const col1({0, 1, 0, 1, 1}, {true, false, true, false, false}); cudf::test::fixed_width_column_wrapper<bool> const col2({0, 1, 0, 1, 1}, {true, true, false, false, true}); auto tbl = cudf::table_view{{col1, col2}}; cudf::bitmask_and(tbl, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, BitMaskOr) { cudf::test::fixed_width_column_wrapper<bool> const col1({0, 1, 0, 1, 1}, {true, false, true, false, false}); cudf::test::fixed_width_column_wrapper<bool> const col2({0, 1, 0, 1, 1}, {true, true, false, false, true}); auto tbl = cudf::table_view{{col1, col2}}; cudf::bitmask_or(tbl, cudf::test::get_default_stream()); } TEST_F(NullMaskTest, NullCount) { cudf::test::fixed_width_column_wrapper<bool> const col({0, 1, 0, 1, 1}, {true, true, false, false, true}); cudf::null_count( static_cast<cudf::column_view>(col).null_mask(), 0, 4, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/extract_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf/strings/extract.hpp> #include <cudf/strings/regex/regex_program.hpp> #include <string> class StringsExtractTest : public cudf::test::BaseFixture {}; TEST_F(StringsExtractTest, Extract) { auto input = cudf::test::strings_column_wrapper({"Joe Schmoe", "John Smith", "Jane Smith"}); auto view = cudf::strings_column_view(input); auto const pattern = std::string("([A-Z][a-z]+)"); auto const prog = cudf::strings::regex_program::create(pattern); cudf::strings::extract(view, *prog, cudf::test::get_default_stream()); cudf::strings::extract_all_record(view, *prog, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/filter_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/strings/char_types/char_types.hpp> #include <cudf/strings/translate.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <string> #include <vector> class StringsFilterTest : public cudf::test::BaseFixture {}; static std::pair<cudf::char_utf8, cudf::char_utf8> make_entry(char const* from, char const* to) { cudf::char_utf8 in = 0; cudf::char_utf8 out = 0; cudf::strings::detail::to_char_utf8(from, in); if (to) cudf::strings::detail::to_char_utf8(to, out); return std::pair(in, out); } TEST_F(StringsFilterTest, Translate) { auto input = cudf::test::strings_column_wrapper({" aBc ", " ", "aaaa ", "\tb"}); auto view = cudf::strings_column_view(input); std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> translate_table{ make_entry("b", 0), make_entry("a", "A"), make_entry(" ", "_")}; cudf::strings::translate(view, translate_table, cudf::test::get_default_stream()); } TEST_F(StringsFilterTest, Filter) { auto input = cudf::test::strings_column_wrapper({" aBc ", " ", "aaaa ", "\tb"}); auto view = cudf::strings_column_view(input); std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> filter_table{ make_entry("b", 0), make_entry("a", "A"), make_entry(" ", "_")}; auto const repl = cudf::string_scalar("X", true, cudf::test::get_default_stream()); auto const keep = cudf::strings::filter_type::KEEP; cudf::strings::filter_characters( view, filter_table, keep, repl, cudf::test::get_default_stream()); } TEST_F(StringsFilterTest, FilterTypes) { auto input = cudf::test::strings_column_wrapper({" aBc ", " ", "aaaa ", "\tb"}); auto view = cudf::strings_column_view(input); auto const verify_types = cudf::strings::string_character_types::LOWER | cudf::strings::string_character_types::UPPER; auto const all_types = cudf::strings::string_character_types::ALL_TYPES; cudf::strings::all_characters_of_type( view, verify_types, all_types, cudf::test::get_default_stream()); auto const repl = cudf::string_scalar("X", true, cudf::test::get_default_stream()); auto const space_types = cudf::strings::string_character_types::SPACE; cudf::strings::filter_characters_of_type( view, all_types, repl, space_types, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/reverse_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/strings/reverse.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <string> #include <vector> class StringsReverseTest : public cudf::test::BaseFixture {}; TEST_F(StringsReverseTest, Reverse) { auto input = cudf::test::strings_column_wrapper({"aBcdef", " ", "12345"}); auto view = cudf::strings_column_view(input); cudf::strings::reverse(view, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/combine_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf/strings/combine.hpp> #include <cudf/strings/repeat_strings.hpp> #include <string> class StringsCombineTest : public cudf::test::BaseFixture {}; TEST_F(StringsCombineTest, Concatenate) { auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést"}); auto view = cudf::table_view({input, input}); auto separators = cudf::test::strings_column_wrapper({"_", ".", " "}); auto separators_view = cudf::strings_column_view(separators); auto sep_on_null = cudf::strings::separator_on_nulls::YES; auto const separator = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); auto const narep = cudf::string_scalar("n/a", true, cudf::test::get_default_stream()); cudf::strings::concatenate(view, separator, narep, sep_on_null, cudf::test::get_default_stream()); cudf::strings::concatenate( view, separators_view, narep, narep, sep_on_null, cudf::test::get_default_stream()); } TEST_F(StringsCombineTest, Join) { auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést"}); auto view = cudf::strings_column_view(input); auto const separator = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); auto const narep = cudf::string_scalar("n/a", true, cudf::test::get_default_stream()); cudf::strings::join_strings(view, separator, narep, cudf::test::get_default_stream()); } TEST_F(StringsCombineTest, JoinLists) { using STR_LISTS = cudf::test::lists_column_wrapper<cudf::string_view>; auto const input = STR_LISTS{ STR_LISTS{"a", "bb", "ccc"}, STR_LISTS{"ddd", "efgh", "ijk"}, STR_LISTS{"zzz", "xxxxx"}}; auto view = cudf::lists_column_view(input); auto separators = cudf::test::strings_column_wrapper({"_", ".", " "}); auto separators_view = cudf::strings_column_view(separators); auto sep_on_null = cudf::strings::separator_on_nulls::YES; auto if_empty = cudf::strings::output_if_empty_list::EMPTY_STRING; auto const separator = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); auto const narep = cudf::string_scalar("n/a", true, cudf::test::get_default_stream()); cudf::strings::join_list_elements( view, separator, narep, sep_on_null, if_empty, cudf::test::get_default_stream()); cudf::strings::join_list_elements( view, separators_view, narep, narep, sep_on_null, if_empty, cudf::test::get_default_stream()); } TEST_F(StringsCombineTest, Repeat) { auto input = cudf::test::strings_column_wrapper({"Héllo", "thesé", "tést"}); auto view = cudf::strings_column_view(input); cudf::strings::repeat_strings(view, 0, cudf::test::get_default_stream()); cudf::strings::repeat_strings(view, 1, cudf::test::get_default_stream()); cudf::strings::repeat_strings(view, 10, cudf::test::get_default_stream()); auto counts = cudf::test::fixed_width_column_wrapper<cudf::size_type>({9, 8, 7}); cudf::strings::repeat_strings(view, counts, cudf::test::get_default_stream()); cudf::strings::repeat_strings(view, counts, cudf::test::get_default_stream()); auto const str = cudf::string_scalar("X", true, cudf::test::get_default_stream()); cudf::strings::repeat_string(str, 0, cudf::test::get_default_stream()); cudf::strings::repeat_string(str, 1, cudf::test::get_default_stream()); cudf::strings::repeat_string(str, 10, cudf::test::get_default_stream()); auto const invalid = cudf::string_scalar("", false, cudf::test::get_default_stream()); cudf::strings::repeat_string(invalid, 10, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/convert_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> #include <cudf/strings/convert/convert_booleans.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/strings/convert/convert_durations.hpp> #include <cudf/strings/convert/convert_fixed_point.hpp> #include <cudf/strings/convert/convert_floats.hpp> #include <cudf/strings/convert/convert_integers.hpp> #include <cudf/strings/convert/convert_ipv4.hpp> #include <cudf/strings/convert/convert_lists.hpp> #include <cudf/strings/convert/convert_urls.hpp> #include <string> class StringsConvertTest : public cudf::test::BaseFixture {}; TEST_F(StringsConvertTest, Booleans) { auto input = cudf::test::strings_column_wrapper({"true", "false", "True", ""}); auto view = cudf::strings_column_view(input); auto true_scalar = cudf::string_scalar("true", true, cudf::test::get_default_stream()); auto false_scalar = cudf::string_scalar("false", true, cudf::test::get_default_stream()); auto bools = cudf::strings::to_booleans(view, true_scalar, cudf::test::get_default_stream()); cudf::strings::from_booleans( bools->view(), true_scalar, false_scalar, cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, Timestamps) { auto input = cudf::test::strings_column_wrapper({"2019-03-20T12:34:56Z", "2020-02-29T00:00:00Z"}); auto view = cudf::strings_column_view(input); std::string format = "%Y-%m-%dT%H:%M:%SZ"; auto dtype = cudf::data_type{cudf::type_id::TIMESTAMP_SECONDS}; cudf::strings::is_timestamp(view, format, cudf::test::get_default_stream()); auto timestamps = cudf::strings::to_timestamps(view, dtype, format, cudf::test::get_default_stream()); auto empty = cudf::test::strings_column_wrapper(); cudf::strings::from_timestamps( timestamps->view(), format, cudf::strings_column_view(empty), cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, Durations) { auto input = cudf::test::strings_column_wrapper({"17975 days 12:34:56", "18321 days 00:00:00"}); auto view = cudf::strings_column_view(input); std::string format = "%D days %H:%M:%S"; auto dtype = cudf::data_type{cudf::type_id::DURATION_SECONDS}; auto durations = cudf::strings::to_durations(view, dtype, format, cudf::test::get_default_stream()); cudf::strings::from_durations(durations->view(), format, cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, FixedPoint) { auto input = cudf::test::strings_column_wrapper({"1.234E3", "-876", "543.2"}); auto view = cudf::strings_column_view(input); auto dtype = cudf::data_type{cudf::type_id::DECIMAL64, numeric::scale_type{-3}}; auto values = cudf::strings::to_fixed_point(view, dtype, cudf::test::get_default_stream()); cudf::strings::from_fixed_point(values->view(), cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, Floats) { auto input = cudf::test::strings_column_wrapper({"1.234E3", "-876", "543.2"}); auto view = cudf::strings_column_view(input); auto dtype = cudf::data_type{cudf::type_id::FLOAT32}; auto values = cudf::strings::to_floats(view, dtype, cudf::test::get_default_stream()); cudf::strings::from_floats(values->view(), cudf::test::get_default_stream()); cudf::strings::is_float(view, cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, Integers) { auto input = cudf::test::strings_column_wrapper({"1234", "-876", "5432"}); auto view = cudf::strings_column_view(input); auto dtype = cudf::data_type{cudf::type_id::INT32}; auto values = cudf::strings::to_integers(view, dtype, cudf::test::get_default_stream()); cudf::strings::from_integers(values->view(), cudf::test::get_default_stream()); cudf::strings::is_integer(view, cudf::test::get_default_stream()); cudf::strings::is_hex(view, cudf::test::get_default_stream()); cudf::strings::hex_to_integers(view, dtype, cudf::test::get_default_stream()); cudf::strings::integers_to_hex(values->view(), cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, IPv4) { auto input = cudf::test::strings_column_wrapper({"192.168.0.1", "10.0.0.1"}); auto view = cudf::strings_column_view(input); auto values = cudf::strings::ipv4_to_integers(view, cudf::test::get_default_stream()); cudf::strings::integers_to_ipv4(values->view(), cudf::test::get_default_stream()); cudf::strings::is_ipv4(view, cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, URLs) { auto input = cudf::test::strings_column_wrapper({"www.nvidia.com/rapids?p=é", "/_file-7.txt"}); auto view = cudf::strings_column_view(input); auto values = cudf::strings::url_encode(view, cudf::test::get_default_stream()); cudf::strings::url_decode(values->view(), cudf::test::get_default_stream()); } TEST_F(StringsConvertTest, ListsFormat) { using STR_LISTS = cudf::test::lists_column_wrapper<cudf::string_view>; auto const input = STR_LISTS{{STR_LISTS{"a", "bb", "ccc"}, STR_LISTS{}, STR_LISTS{"ddd", "ee", "f"}}, {STR_LISTS{"gg", "hhh"}, STR_LISTS{"i", "", "", "jj"}}}; auto view = cudf::lists_column_view(input); auto null_scalar = cudf::string_scalar("NULL", true, cudf::test::get_default_stream()); auto separators = cudf::strings_column_view(cudf::test::strings_column_wrapper()); cudf::strings::format_list_column( view, null_scalar, separators, cudf::test::get_default_stream()); }
0
rapidsai_public_repos/cudf/cpp/tests/streams
rapidsai_public_repos/cudf/cpp/tests/streams/strings/case_test.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/strings/capitalize.hpp> #include <cudf/strings/case.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/default_stream.hpp> class StringsCaseTest : public cudf::test::BaseFixture {}; TEST_F(StringsCaseTest, LowerUpper) { auto const input = cudf::test::strings_column_wrapper({"", "The quick brown fox", "jumps over the lazy dog.", "all work and no play makes Jack a dull boy", R"(!"#$%&'()*+,-./0123456789:;<=>?@[\]^_`{|}~)"}); auto view = cudf::strings_column_view(input); cudf::strings::to_lower(view, cudf::test::get_default_stream()); cudf::strings::to_upper(view, cudf::test::get_default_stream()); cudf::strings::swapcase(view, cudf::test::get_default_stream()); } TEST_F(StringsCaseTest, Capitalize) { auto const input = cudf::test::strings_column_wrapper({"", "The Quick Brown Fox", "jumps over the lazy dog", "all work and no play makes Jack a dull boy"}); auto view = cudf::strings_column_view(input); auto const delimiter = cudf::string_scalar(" ", true, cudf::test::get_default_stream()); cudf::strings::capitalize(view, delimiter, cudf::test::get_default_stream()); cudf::strings::is_title(view, cudf::test::get_default_stream()); cudf::strings::title( view, cudf::strings::string_character_types::ALPHA, cudf::test::get_default_stream()); }
0