repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/text/bpe_tests.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/byte_pair_encoding.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/strings_column_view.hpp>
struct TextBytePairEncoding : public cudf::test::BaseFixture {};
TEST_F(TextBytePairEncoding, BytePairEncoding)
{
// partial table based on values from https://huggingface.co/gpt2/raw/main/merges.txt
auto mpt = cudf::test::strings_column_wrapper({
"e n", // 14
"i t", // 16
"i s", // 17
"e s", // 20
"en t", // 44
"c e", // 90
"es t", // 141
"en ce", // 340
"t h", // 146
"h i", // 5049
"th is", // 5407
"t est", // 9034
"s i", // 13142
"s ent" // 33832
});
auto merge_pairs = nvtext::load_merge_pairs(cudf::strings_column_view(mpt));
auto validity = cudf::test::iterators::null_at(4);
cudf::test::strings_column_wrapper input(
{"thisisit", "thisis test-sentence-1", "thisistestsentence-2", "this-istestsentence 3", "", ""},
validity);
auto sv = cudf::strings_column_view(input);
auto results = nvtext::byte_pair_encoding(sv, *merge_pairs);
auto expected = cudf::test::strings_column_wrapper({"this is it",
"this is test - sent ence - 1",
"this is test sent ence - 2",
"this - is test sent ence 3",
"",
""},
validity);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(results->view(), expected);
auto sliced = cudf::slice(input, {1, 4}).front();
auto sliced_expected = cudf::slice(expected, {1, 4}).front();
sv = cudf::strings_column_view(sliced);
results = nvtext::byte_pair_encoding(sv, *merge_pairs);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(results->view(), sliced_expected);
}
TEST_F(TextBytePairEncoding, BytePairEncodingSeparator)
{
auto mpt = cudf::test::strings_column_wrapper(
{"Ġ t", "Ġt he", "h e", "e n", "i t", "e s", "en t", "c e", "es t", "en ce", "t est", "s ent"});
auto merge_pairs = nvtext::load_merge_pairs(cudf::strings_column_view(mpt));
cudf::test::strings_column_wrapper input(
{"Ġthe test sentence", "test Ġthe sentence", "Ġthetest sentence", "testĠthesentence"});
auto sv = cudf::strings_column_view(input);
auto results = nvtext::byte_pair_encoding(sv, *merge_pairs, std::string("$"));
auto expected = cudf::test::strings_column_wrapper({"Ġthe$ $test$ $sent$ence",
"test$ $Ġthe$ $sent$ence",
"Ġthe$test$ $sent$ence",
"test$Ġthe$sent$ence"});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(results->view(), expected);
}
TEST_F(TextBytePairEncoding, BPEAdjacentPairs)
{
auto mpt = cudf::test::strings_column_wrapper({
"▁ H", // 157
"m m", // 10742
"? !", // 50675
"▁H mm", // 174381
"mm m", // 262776
"?! !", // 352313
"? !?", // 352314
"mm mm", // 387733
"▁H m", // 471269
"?! ?!", // 506981
"?!? !", // 506982
});
auto merge_pairs = nvtext::load_merge_pairs(cudf::strings_column_view(mpt));
cudf::test::strings_column_wrapper input({"▁Hmmmmm", "?!?!?!"});
auto results = nvtext::byte_pair_encoding(cudf::strings_column_view(input), *merge_pairs);
auto expected = cudf::test::strings_column_wrapper({"▁Hmm mmm", "?!?! ?!"});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(results->view(), expected);
}
TEST_F(TextBytePairEncoding, BPE_Empty)
{
auto mpt = cudf::test::strings_column_wrapper({"i s", "i t"});
auto merge_pairs = nvtext::load_merge_pairs(cudf::strings_column_view(mpt));
auto empty = cudf::make_empty_column(cudf::type_id::STRING);
auto results = nvtext::byte_pair_encoding(cudf::strings_column_view(empty->view()), *merge_pairs);
EXPECT_EQ(0, results->size());
}
TEST_F(TextBytePairEncoding, BPE_Error)
{
auto empty = cudf::make_empty_column(cudf::type_id::STRING);
EXPECT_THROW(nvtext::load_merge_pairs(cudf::strings_column_view(*empty)), cudf::logic_error);
auto null_pairs = cudf::test::strings_column_wrapper({"", ""}, {1, 0});
EXPECT_THROW(nvtext::load_merge_pairs(cudf::strings_column_view(null_pairs)), cudf::logic_error);
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/text/jaccard_tests.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <nvtext/jaccard.hpp>
#include <cudf/strings/strings_column_view.hpp>
struct JaccardTest : public cudf::test::BaseFixture {};
TEST_F(JaccardTest, Basic)
{
auto input1 =
cudf::test::strings_column_wrapper({"the quick brown fox", "jumped over the lazy dog."});
auto input2 =
cudf::test::strings_column_wrapper({"the slowest brown cat", "crawled under the jumping fox"});
auto view1 = cudf::strings_column_view(input1);
auto view2 = cudf::strings_column_view(input2);
auto results = nvtext::jaccard_index(view1, view2, 5);
auto expected = cudf::test::fixed_width_column_wrapper<float>({0.103448279f, 0.0697674453f});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*results, expected);
expected = cudf::test::fixed_width_column_wrapper<float>({1.0f, 1.0f});
results = nvtext::jaccard_index(view1, view1, 5);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*results, expected);
results = nvtext::jaccard_index(view2, view2, 10);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*results, expected);
}
TEST_F(JaccardTest, WithNulls)
{
auto input1 =
cudf::test::strings_column_wrapper({"brown fox", "jumps over dog", "", ""}, {1, 1, 0, 1});
auto input2 =
cudf::test::strings_column_wrapper({"brown cat", "jumps on fox", "", ""}, {1, 1, 1, 0});
auto view1 = cudf::strings_column_view(input1);
auto view2 = cudf::strings_column_view(input2);
auto results = nvtext::jaccard_index(view1, view2, 5);
auto expected =
cudf::test::fixed_width_column_wrapper<float>({0.25f, 0.200000003f, 0.f, 0.f}, {1, 1, 0, 0});
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*results, expected);
expected = cudf::test::fixed_width_column_wrapper<float>({1.0f, 1.0f, 0.f, 0.f}, {1, 1, 0, 1});
results = nvtext::jaccard_index(view1, view1, 7);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*results, expected);
}
TEST_F(JaccardTest, Errors)
{
auto input = cudf::test::strings_column_wrapper({"1", "2", "3"});
auto view = cudf::strings_column_view(input);
// invalid parameter value
EXPECT_THROW(nvtext::jaccard_index(view, view, 1), std::invalid_argument);
// invalid size
auto input2 = cudf::test::strings_column_wrapper({"1", "2"});
auto view2 = cudf::strings_column_view(input2);
EXPECT_THROW(nvtext::jaccard_index(view, view2, 5), std::invalid_argument);
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/text/replace_tests.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <nvtext/replace.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <vector>
struct TextReplaceTest : public cudf::test::BaseFixture {};
TEST_F(TextReplaceTest, ReplaceTokens)
{
std::vector<char const*> h_strings{"the fox jumped over the dog",
"is theme of the thesis",
nullptr,
"",
"no change",
"thé is the cheese is"};
cudf::test::strings_column_wrapper strings(
h_strings.begin(),
h_strings.end(),
thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; }));
cudf::test::strings_column_wrapper targets({"is", "the"});
cudf::test::strings_column_wrapper repls({"___", ""});
std::vector<char const*> h_expected{" fox jumped over dog",
"___ theme of thesis",
nullptr,
"",
"no change",
"thé ___ cheese ___"};
cudf::test::strings_column_wrapper expected(
h_expected.begin(),
h_expected.end(),
thrust::make_transform_iterator(h_expected.begin(), [](auto str) { return str != nullptr; }));
auto results = nvtext::replace_tokens(cudf::strings_column_view(strings),
cudf::strings_column_view(targets),
cudf::strings_column_view(repls));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
results = nvtext::replace_tokens(cudf::strings_column_view(strings),
cudf::strings_column_view(targets),
cudf::strings_column_view(repls),
cudf::string_scalar("o "));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(TextReplaceTest, ReplaceTokensSingleRepl)
{
cudf::test::strings_column_wrapper strings({"this\t is that", "is then \tis", "us them is us"});
cudf::test::strings_column_wrapper targets({"is", "us"});
cudf::test::strings_column_wrapper repls({"_"});
cudf::test::strings_column_wrapper expected({"this\t _ that", "_ then \t_", "_ them _ _"});
auto results = nvtext::replace_tokens(cudf::strings_column_view(strings),
cudf::strings_column_view(targets),
cudf::strings_column_view(repls));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(TextReplaceTest, ReplaceTokensEmptyTest)
{
auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
cudf::strings_column_view strings_view(strings->view());
auto const results = nvtext::replace_tokens(strings_view, strings_view, strings_view);
EXPECT_EQ(results->size(), 0);
EXPECT_EQ(results->has_nulls(), false);
}
TEST_F(TextReplaceTest, ReplaceTokensErrorTest)
{
auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
cudf::strings_column_view strings_view(strings->view());
cudf::test::strings_column_wrapper notnulls({"", "", ""});
cudf::strings_column_view notnulls_view(notnulls);
cudf::test::strings_column_wrapper nulls({"", ""}, {0, 0});
cudf::strings_column_view nulls_view(nulls);
EXPECT_THROW(nvtext::replace_tokens(strings_view, nulls_view, notnulls_view), cudf::logic_error);
EXPECT_THROW(nvtext::replace_tokens(strings_view, notnulls_view, nulls_view), cudf::logic_error);
EXPECT_THROW(nvtext::replace_tokens(notnulls_view, notnulls_view, strings_view),
cudf::logic_error);
EXPECT_THROW(
nvtext::replace_tokens(notnulls_view, nulls_view, strings_view, cudf::string_scalar("", false)),
cudf::logic_error);
}
TEST_F(TextReplaceTest, FilterTokens)
{
cudf::test::strings_column_wrapper strings({" one two three ", "four fivé six", "sevén eight"});
auto results = nvtext::filter_tokens(cudf::strings_column_view(strings), 1);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, strings); // no change
{
auto results = nvtext::filter_tokens(cudf::strings_column_view(strings), 4);
cudf::test::strings_column_wrapper expected({" three ", "four fivé ", "sevén eight"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
{
auto results = nvtext::filter_tokens(cudf::strings_column_view(strings), 5);
cudf::test::strings_column_wrapper expected({" three ", " ", "sevén eight"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
{
auto results =
nvtext::filter_tokens(cudf::strings_column_view(strings), 4, cudf::string_scalar("--"));
cudf::test::strings_column_wrapper expected({" -- -- three ", "four fivé --", "sevén eight"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
}
TEST_F(TextReplaceTest, FilterTokensEmptyTest)
{
auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
auto const results = nvtext::filter_tokens(cudf::strings_column_view(strings->view()), 7);
EXPECT_EQ(results->size(), 0);
}
TEST_F(TextReplaceTest, FilterTokensErrorTest)
{
auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
EXPECT_THROW(nvtext::filter_tokens(
cudf::strings_column_view(strings->view()), 1, cudf::string_scalar("", false)),
cudf::logic_error);
EXPECT_THROW(nvtext::filter_tokens(cudf::strings_column_view(strings->view()),
1,
cudf::string_scalar("-"),
cudf::string_scalar("", false)),
cudf::logic_error);
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/text/normalize_tests.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/copying.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <nvtext/normalize.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <vector>
struct TextNormalizeTest : public cudf::test::BaseFixture {};
TEST_F(TextNormalizeTest, NormalizeSpaces)
{
std::vector<char const*> h_strings{"the\t fox jumped over the dog",
"the dog\f chased the cat\r",
" the cat chaséd the mouse\n",
nullptr,
"",
" \r\t\n",
"no change",
"the mousé ate the cheese"};
cudf::test::strings_column_wrapper strings(
h_strings.begin(),
h_strings.end(),
thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; }));
cudf::strings_column_view strings_view(strings);
std::vector<char const*> h_expected{"the fox jumped over the dog",
"the dog chased the cat",
"the cat chaséd the mouse",
nullptr,
"",
"",
"no change",
"the mousé ate the cheese"};
cudf::test::strings_column_wrapper expected(
h_expected.begin(),
h_expected.end(),
thrust::make_transform_iterator(h_expected.begin(), [](auto str) { return str != nullptr; }));
auto const results = nvtext::normalize_spaces(strings_view);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(TextNormalizeTest, NormalizeEmptyTest)
{
auto strings = cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING});
cudf::strings_column_view strings_view(strings->view());
auto results = nvtext::normalize_spaces(strings_view);
EXPECT_EQ(results->size(), 0);
results = nvtext::normalize_characters(strings_view, true);
EXPECT_EQ(results->size(), 0);
results = nvtext::normalize_characters(strings_view, false);
EXPECT_EQ(results->size(), 0);
}
TEST_F(TextNormalizeTest, AllNullStrings)
{
cudf::test::strings_column_wrapper strings({"", "", ""}, {0, 0, 0});
cudf::strings_column_view strings_view(strings);
auto results = nvtext::normalize_spaces(strings_view);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, strings);
results = nvtext::normalize_characters(strings_view, false);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, strings);
}
TEST_F(TextNormalizeTest, SomeNullStrings)
{
cudf::test::strings_column_wrapper strings({"", ".", "a"}, {0, 1, 1});
cudf::strings_column_view strings_view(strings);
auto results = nvtext::normalize_characters(strings_view, false);
cudf::test::strings_column_wrapper expected({"", " . ", "a"}, {0, 1, 1});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
TEST_F(TextNormalizeTest, NormalizeCharacters)
{
// These include punctuation, accents, whitespace, and CJK characters
std::vector<char const*> h_strings{"abc£def",
nullptr,
"éè â îô\taeio",
"\tĂĆĖÑ Ü",
"ACEN U",
"P^NP",
"$41.07",
"[a,b]",
"丏丟",
""};
auto validity =
thrust::make_transform_iterator(h_strings.begin(), [](auto str) { return str != nullptr; });
cudf::test::strings_column_wrapper strings(h_strings.begin(), h_strings.end(), validity);
cudf::strings_column_view strings_view(strings);
{
auto results = nvtext::normalize_characters(strings_view, true);
cudf::test::strings_column_wrapper expected({"abc£def",
"",
"ee a io aeio",
" acen u",
"acen u",
"p ^ np",
" $ 41 . 07",
" [ a , b ] ",
" 丏 丟 ",
""},
validity);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
{
auto results = nvtext::normalize_characters(strings_view, false);
cudf::test::strings_column_wrapper expected({"abc£def",
"",
"éè â îô aeio",
" ĂĆĖÑ Ü",
"ACEN U",
"P ^ NP",
" $ 41 . 07",
" [ a , b ] ",
" 丏 丟 ",
""},
validity);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
}
}
TEST_F(TextNormalizeTest, NormalizeSlicedColumn)
{
cudf::test::strings_column_wrapper strings(
{"abc£def", "éè â îô\taeio", "ACEN U", "P^NP", "$41.07", "[a,b]", "丏丟"});
std::vector<cudf::column_view> sliced = cudf::split(strings, {4});
auto results = nvtext::normalize_characters(cudf::strings_column_view(sliced.front()), true);
cudf::test::strings_column_wrapper expected({"abc£def", "ee a io aeio", "acen u", "p ^ np"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected);
results = nvtext::normalize_characters(cudf::strings_column_view(sliced[1]), false);
cudf::test::strings_column_wrapper expected2({" $ 41 . 07", " [ a , b ] ", " 丏 丟 "});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected2);
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/file_io_test.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <src/io/utilities/file_io_utilities.hpp>
#include <type_traits>
// Base test fixture for tests
struct CuFileIOTest : public cudf::test::BaseFixture {};
TEST_F(CuFileIOTest, SliceSize)
{
std::vector<std::pair<size_t, size_t>> test_cases{
{1 << 20, 1 << 18}, {1 << 18, 1 << 20}, {1 << 20, 3333}, {0, 1 << 18}, {0, 0}, {1 << 20, 0}};
for (auto const& test_case : test_cases) {
auto const slices = cudf::io::detail::make_file_io_slices(test_case.first, test_case.second);
if (slices.empty()) {
ASSERT_EQ(test_case.first, 0);
} else {
ASSERT_EQ(slices.front().offset, 0);
ASSERT_EQ(slices.back().offset + slices.back().size, test_case.first);
for (auto i = 1u; i < slices.size(); ++i) {
ASSERT_EQ(slices[i].offset, slices[i - 1].offset + slices[i - 1].size);
}
}
}
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/row_selection_test.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <src/io/utilities/row_selection.hpp>
#include <limits>
using cudf::io::detail::skip_rows_num_rows_from_options;
// Base test fixture for tests
struct FromOptsTest : public cudf::test::BaseFixture {};
TEST_F(FromOptsTest, PassThrough)
{
// select all rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(0, 100, 100);
EXPECT_EQ(out_skip, 0);
EXPECT_EQ(out_num, 100);
}
// select all except first skip_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(10, 90, 100);
EXPECT_EQ(out_skip, 10);
EXPECT_EQ(out_num, 90);
}
// select first num_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(0, 60, 100);
EXPECT_EQ(out_skip, 0);
EXPECT_EQ(out_num, 60);
}
}
TEST_F(FromOptsTest, DefaultNumRows)
{
// no skip_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(0, std::nullopt, 100);
EXPECT_EQ(out_skip, 0);
EXPECT_EQ(out_num, 100);
}
// with skip_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(20, std::nullopt, 100);
EXPECT_EQ(out_skip, 20);
EXPECT_EQ(out_num, 80);
}
}
TEST_F(FromOptsTest, InputSize32BitOverflow)
{
// Input number of rows too large to fit into cudf::size_type
// Test that we can still select rows from such input
auto const too_large_for_32bit = std::numeric_limits<int64_t>::max();
// no num_rows
{
auto [out_skip, out_num] =
skip_rows_num_rows_from_options(too_large_for_32bit - 10, std::nullopt, too_large_for_32bit);
EXPECT_EQ(out_skip, too_large_for_32bit - 10);
EXPECT_EQ(out_num, 10);
}
// with num_rows
{
auto [out_skip, out_num] =
skip_rows_num_rows_from_options(too_large_for_32bit - 100, 30, too_large_for_32bit);
EXPECT_EQ(out_skip, too_large_for_32bit - 100);
EXPECT_EQ(out_num, 30);
}
}
TEST_F(FromOptsTest, LimitOptionsToFileRows)
{
// limit skip_rows without num_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(1000, std::nullopt, 100);
EXPECT_EQ(out_skip, 100);
EXPECT_EQ(out_num, 0);
}
// limit skip_rows with num_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(1000, 2, 100);
EXPECT_EQ(out_skip, 100);
EXPECT_EQ(out_num, 0);
}
// limit num_rows without skip_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(0, 1000, 100);
EXPECT_EQ(out_skip, 0);
EXPECT_EQ(out_num, 100);
}
// limit num_rows with skip_rows
{
auto [out_skip, out_num] = skip_rows_num_rows_from_options(10, 1000, 100);
EXPECT_EQ(out_skip, 10);
EXPECT_EQ(out_num, 90);
}
}
TEST_F(FromOptsTest, OverFlowDetection)
{
auto const too_large_for_32bit = std::numeric_limits<int64_t>::max();
// Too many rows to read until the end of the file
EXPECT_THROW(skip_rows_num_rows_from_options(0, std::nullopt, too_large_for_32bit),
std::overflow_error);
// Should work fine with num_rows
EXPECT_NO_THROW(
skip_rows_num_rows_from_options(1000, too_large_for_32bit - 100, too_large_for_32bit));
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/json_type_cast_test.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <io/utilities/string_parsing.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/json.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/exec_policy.hpp>
#include <algorithm>
#include <iterator>
#include <type_traits>
using namespace cudf::test::iterators;
struct JSONTypeCastTest : public cudf::test::BaseFixture {};
namespace {
struct offsets_to_length {
__device__ cudf::size_type operator()(thrust::tuple<cudf::size_type, cudf::size_type> const& p)
{
return thrust::get<1>(p) - thrust::get<0>(p);
}
};
/// Returns length of each string in the column
auto string_offset_to_length(cudf::strings_column_view const& column, rmm::cuda_stream_view stream)
{
auto offsets_begin = column.offsets_begin();
auto offsets_pair =
thrust::make_zip_iterator(thrust::make_tuple(offsets_begin, thrust::next(offsets_begin)));
rmm::device_uvector<cudf::size_type> svs_length(column.size(), stream);
thrust::transform(rmm::exec_policy(cudf::get_default_stream()),
offsets_pair,
offsets_pair + column.size(),
svs_length.begin(),
offsets_to_length{});
return svs_length;
}
} // namespace
auto default_json_options()
{
auto parse_opts = cudf::io::parse_options{',', '\n', '\"', '.'};
auto const stream = cudf::get_default_stream();
parse_opts.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
parse_opts.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
parse_opts.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
return parse_opts;
}
TEST_F(JSONTypeCastTest, String)
{
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
auto const type = cudf::data_type{cudf::type_id::STRING};
auto in_valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 4; });
std::vector<char const*> input_values{"this", "is", "null", "of", "", "strings", R"("null")"};
cudf::test::strings_column_wrapper input(input_values.begin(), input_values.end(), in_valids);
auto column = cudf::strings_column_view(input);
rmm::device_uvector<cudf::size_type> svs_length = string_offset_to_length(column, stream);
auto null_mask_it = no_nulls();
auto null_mask =
std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size()));
auto str_col = cudf::io::json::detail::parse_data(
column.chars().data<char>(),
thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())),
column.size(),
type,
std::move(null_mask),
0,
default_json_options().view(),
stream,
mr);
auto out_valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 2 and i != 4; });
std::vector<char const*> expected_values{"this", "is", "", "of", "", "strings", "null"};
cudf::test::strings_column_wrapper expected(
expected_values.begin(), expected_values.end(), out_valids);
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(str_col->view(), expected);
}
TEST_F(JSONTypeCastTest, Int)
{
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
auto const type = cudf::data_type{cudf::type_id::INT64};
cudf::test::strings_column_wrapper data({"1", "null", "3", "true", "5", "false"});
auto column = cudf::strings_column_view(data);
rmm::device_uvector<cudf::size_type> svs_length = string_offset_to_length(column, stream);
auto null_mask_it = no_nulls();
auto null_mask =
std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size()));
auto col = cudf::io::json::detail::parse_data(
column.chars().data<char>(),
thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())),
column.size(),
type,
std::move(null_mask),
0,
default_json_options().view(),
stream,
mr);
auto expected =
cudf::test::fixed_width_column_wrapper<int64_t>{{1, 2, 3, 1, 5, 0}, {1, 0, 1, 1, 1, 1}};
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(col->view(), expected);
}
TEST_F(JSONTypeCastTest, StringEscapes)
{
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
auto const type = cudf::data_type{cudf::type_id::STRING};
cudf::test::strings_column_wrapper data({
R"("\uD83D\uDE80")",
R"("\uff21\ud83d\ude80\uff21\uff21")",
R"("invalid char being escaped escape char\-")",
R"("too few hex digits \u12")",
R"("too few hex digits for surrogate pair \uD83D\uDE")",
R"("\u005C")",
R"("\u27A9")",
R"("escape with nothing to escape \")",
R"("\"\\\/\b\f\n\r\t")",
});
auto column = cudf::strings_column_view(data);
rmm::device_uvector<cudf::size_type> svs_length = string_offset_to_length(column, stream);
auto null_mask_it = no_nulls();
auto null_mask =
std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size()));
auto col = cudf::io::json::detail::parse_data(
column.chars().data<char>(),
thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())),
column.size(),
type,
std::move(null_mask),
0,
default_json_options().view(),
stream,
mr);
auto expected = cudf::test::strings_column_wrapper{
{"🚀", "A🚀AA", "", "", "", "\\", "➩", "", "\"\\/\b\f\n\r\t"},
{true, true, false, false, false, true, true, false, true}};
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(col->view(), expected);
}
TEST_F(JSONTypeCastTest, ErrorNulls)
{
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
auto const type = cudf::data_type{cudf::type_id::STRING};
// error in decoding
std::vector<char const*> input_values{R"("\"\a")",
R"("\u")",
R"("\u0")",
R"("\u0b")",
R"("\u00b")",
R"("\u00bz")",
R"("\t34567890123456\t9012345678901\ug0bc")",
R"("\t34567890123456\t90123456789012\u0hbc")",
R"("\t34567890123456\t90123456789012\u00ic")",
R"("\t34567890123456\t9012345678901\")",
R"("\t34567890123456\t90123456789012\")",
R"(null)"};
// Note: without quotes are copied without decoding
cudf::test::strings_column_wrapper input(input_values.begin(), input_values.end());
auto column = cudf::strings_column_view(input);
auto space_length = 128;
auto prepend_space = [&space_length](auto const& s) {
if (s[0] == '"') return "\"" + std::string(space_length, ' ') + std::string(s + 1);
return std::string(s);
};
std::vector<std::string> small_input;
std::transform(
input_values.begin(), input_values.end(), std::back_inserter(small_input), prepend_space);
cudf::test::strings_column_wrapper small_col(small_input.begin(), small_input.end());
std::vector<std::string> large_input;
space_length = 128 * 128;
std::transform(
input_values.begin(), input_values.end(), std::back_inserter(large_input), prepend_space);
cudf::test::strings_column_wrapper large_col(large_input.begin(), large_input.end());
std::vector<char const*> expected_values{"", "", "", "", "", "", "", "", "", "", "", ""};
cudf::test::strings_column_wrapper expected(
expected_values.begin(), expected_values.end(), cudf::test::iterators::all_nulls());
// single threads, warp, block.
for (auto const& column :
{column, cudf::strings_column_view(small_col), cudf::strings_column_view(large_col)}) {
rmm::device_uvector<cudf::size_type> svs_length = string_offset_to_length(column, stream);
auto null_mask_it = no_nulls();
auto null_mask =
std::get<0>(cudf::test::detail::make_null_mask(null_mask_it, null_mask_it + column.size()));
auto str_col = cudf::io::json::detail::parse_data(
column.chars().data<char>(),
thrust::make_zip_iterator(thrust::make_tuple(column.offsets_begin(), svs_length.begin())),
column.size(),
type,
std::move(null_mask),
0,
default_json_options().view(),
stream,
mr);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(str_col->view(), expected);
}
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/metadata_utilities.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/io_metadata_utilities.hpp>
#include <gmock/gmock.h>
namespace cudf::test {
void expect_metadata_equal(cudf::io::table_input_metadata in_meta,
cudf::io::table_metadata out_meta)
{
std::function<void(cudf::io::column_name_info, cudf::io::column_in_metadata)> compare_names =
[&](cudf::io::column_name_info out_col, cudf::io::column_in_metadata in_col) {
if (not in_col.get_name().empty()) { EXPECT_EQ(out_col.name, in_col.get_name()); }
ASSERT_EQ(out_col.children.size(), in_col.num_children());
for (size_t i = 0; i < out_col.children.size(); ++i) {
compare_names(out_col.children[i], in_col.child(i));
}
};
ASSERT_EQ(out_meta.schema_info.size(), in_meta.column_metadata.size());
for (size_t i = 0; i < out_meta.schema_info.size(); ++i) {
compare_names(out_meta.schema_info[i], in_meta.column_metadata[i]);
}
}
void expect_metadata_equal(cudf::io::table_metadata lhs_meta, cudf::io::table_metadata rhs_meta)
{
std::function<void(cudf::io::column_name_info, cudf::io::column_name_info)> compare_names =
[&](cudf::io::column_name_info lhs, cudf::io::column_name_info rhs) {
// Ensure column names match
EXPECT_EQ(lhs.name, rhs.name);
// Ensure number of child columns match
ASSERT_EQ(lhs.children.size(), rhs.children.size());
for (size_t i = 0; i < lhs.children.size(); ++i) {
compare_names(lhs.children[i], rhs.children[i]);
}
};
// Ensure the number of columns at the root level matches
ASSERT_EQ(lhs_meta.schema_info.size(), rhs_meta.schema_info.size());
// Recurse for each column making sure their names and descendants match
for (size_t i = 0; i < rhs_meta.schema_info.size(); ++i) {
compare_names(lhs_meta.schema_info[i], rhs_meta.schema_info[i]);
}
}
} // namespace cudf::test
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/json_writer.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/default_stream.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/io/json.hpp>
#include <cudf/io/types.hpp>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <string>
#include <vector>
struct JsonWriterTest : public cudf::test::BaseFixture {};
TEST_F(JsonWriterTest, EmptyInput)
{
cudf::test::strings_column_wrapper col1;
cudf::test::strings_column_wrapper col2;
cudf::test::fixed_width_column_wrapper<int> col3;
cudf::test::fixed_width_column_wrapper<float> col4;
cudf::test::fixed_width_column_wrapper<int16_t> col5;
cudf::table_view tbl_view{{col1, col2, col3, col4}};
cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int"}, {"float"}, {"int16"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto out_options = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(false)
.na_rep("null")
.build();
// Empty columns in table
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
std::string const expected = R"([])";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
// Empty columns in table - JSON Lines
out_buffer.clear();
out_options.enable_lines(true);
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
std::string const expected_lines = "\n";
EXPECT_EQ(expected_lines, std::string(out_buffer.data(), out_buffer.size()));
// Empty table - JSON Lines
cudf::table_view tbl_view2{};
out_options.set_table(tbl_view2);
out_buffer.clear();
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
EXPECT_EQ(expected_lines, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, ErrorCases)
{
cudf::test::strings_column_wrapper col1{"a", "b", "c"};
cudf::test::strings_column_wrapper col2{"d", "e", "f"};
cudf::test::fixed_width_column_wrapper<int> col3{1, 2, 3};
cudf::test::fixed_width_column_wrapper<float> col4{1.5, 2.5, 3.5};
cudf::test::fixed_width_column_wrapper<int16_t> col5{{1, 2, 3},
cudf::test::iterators::nulls_at({0, 2})};
cudf::table_view tbl_view{{col1, col2, col3, col4, col5}};
cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int"}, {"float"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto out_options = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(false)
.na_rep("null")
.build();
// not enough column names
EXPECT_THROW(
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()),
cudf::logic_error);
mt.schema_info.emplace_back("int16");
out_options.set_metadata(mt);
EXPECT_NO_THROW(cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()));
// chunk_rows must be at least 8
out_options.set_rows_per_chunk(0);
EXPECT_THROW(
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource()),
cudf::logic_error);
}
TEST_F(JsonWriterTest, PlainTable)
{
cudf::test::strings_column_wrapper col1{"a", "b", "c"};
cudf::test::strings_column_wrapper col2{"d", "e", "f"};
cudf::test::fixed_width_column_wrapper<int> col3{1, 2, 3};
cudf::test::fixed_width_column_wrapper<float> col4{1.5, 2.5, 3.5};
cudf::test::fixed_width_column_wrapper<int16_t> col5{{1, 2, 3},
cudf::test::iterators::nulls_at({0, 2})};
cudf::table_view tbl_view{{col1, col2, col3, col4, col5}};
cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int"}, {"float"}, {"int16"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(false)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected =
R"([{"col1":"a","col2":"d","int":1,"float":1.5,"int16":null},{"col1":"b","col2":"e","int":2,"float":2.5,"int16":2},{"col1":"c","col2":"f","int":3,"float":3.5,"int16":null}])";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, SimpleNested)
{
std::string const data = R"(
{"a": 1, "b": 2, "c": {"d": 3 }, "f": 5.5, "g": [1]}
{"a": 6, "b": 7, "c": {"d": 8 }, "f": 10.5, "g": null}
{"a": 1, "b": 2, "c": { "e": 4}, "f": 5.5, "g": [2, null]}
{"a": 6, "b": 7, "c": { "e": 9}, "f": 10.5, "g": [3, 4, 5]} )";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
cudf::table_view tbl_view = result.tbl->view();
cudf::io::table_metadata mt{result.metadata};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(true)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected = R"({"a":1,"b":2,"c":{"d":3},"f":5.5,"g":[1]}
{"a":6,"b":7,"c":{"d":8},"f":10.5}
{"a":1,"b":2,"c":{"e":4},"f":5.5,"g":[2,null]}
{"a":6,"b":7,"c":{"e":9},"f":10.5,"g":[3,4,5]}
)";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, MixedNested)
{
std::string const data = R"(
{"a": 1, "b": 2, "c": {"d": [3] }, "f": 5.5, "g": [ {"h": 1}]}
{"a": 6, "b": 7, "c": {"d": [8] }, "f": 10.5, "g": null}
{"a": 1, "b": 2, "c": { "e": 4}, "f": 5.5, "g": [{"h": 2}, null]}
{"a": 6, "b": 7, "c": { "e": 9}, "f": 10.5, "g": [{"h": 3}, {"h": 4}, {"h": 5}]} )";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
cudf::table_view tbl_view = result.tbl->view();
cudf::io::table_metadata mt{result.metadata};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(false)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected =
R"([{"a":1,"b":2,"c":{"d":[3]},"f":5.5,"g":[{"h":1}]},)"
R"({"a":6,"b":7,"c":{"d":[8]},"f":10.5},)"
R"({"a":1,"b":2,"c":{"e":4},"f":5.5,"g":[{"h":2},null]},)"
R"({"a":6,"b":7,"c":{"e":9},"f":10.5,"g":[{"h":3},{"h":4},{"h":5}]}])";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, WriteReadNested)
{
using namespace cudf::test::iterators;
using LCW = cudf::test::lists_column_wrapper<int64_t>;
cudf::test::fixed_width_column_wrapper<int> a{1, 6, 1, 6};
cudf::test::fixed_width_column_wrapper<uint8_t> b{2, 7, 2, 7};
cudf::test::fixed_width_column_wrapper<int64_t> d{{3, 8, 0, 0}, nulls_at({2, 3})};
cudf::test::fixed_width_column_wrapper<int64_t> e{{0, 0, 4, 9}, nulls_at({0, 1})};
cudf::test::structs_column_wrapper c{{d, e}};
cudf::test::fixed_width_column_wrapper<float> f{5.5, 10.5, 5.5, 10.5};
LCW g{{LCW{1}, LCW{0}, LCW{{2, 0}, null_at(1)}, LCW{3, 4, 5}}, null_at(1)};
cudf::table_view tbl_view{{a, b, c, f, g}};
cudf::io::table_metadata mt{{{"a"}, {"b"}, {"c"}, {"f"}, {"g"}}};
mt.schema_info[2].children = {{"d"}, {"e"}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto out_options = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(true)
.na_rep("null")
.build();
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
std::string const expected = R"({"a":1,"b":2,"c":{"d":3},"f":5.5,"g":[1]}
{"a":6,"b":7,"c":{"d":8},"f":10.5}
{"a":1,"b":2,"c":{"e":4},"f":5.5,"g":[2,null]}
{"a":6,"b":7,"c":{"e":9},"f":10.5,"g":[3,4,5]}
)";
auto const output_string = std::string(out_buffer.data(), out_buffer.size());
EXPECT_EQ(expected, output_string);
// Read back the written JSON, and compare with the original table
// Without type information
auto in_options = cudf::io::json_reader_options::builder(
cudf::io::source_info{output_string.data(), output_string.size()})
.lines(true)
.build();
auto result = cudf::io::read_json(in_options);
auto tbl_out = result.tbl->view();
auto const int64_dtype = cudf::data_type{cudf::type_id::INT64};
auto const double_dtype = cudf::data_type{cudf::type_id::FLOAT64};
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*cudf::cast(a, int64_dtype), tbl_out.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*cudf::cast(b, int64_dtype), tbl_out.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(c, tbl_out.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*cudf::cast(f, double_dtype), tbl_out.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(g, tbl_out.column(4));
mt.schema_info[4].children = {{"offsets"}, {"element"}}; // list child column names
EXPECT_EQ(mt.schema_info.size(), result.metadata.schema_info.size());
for (auto i = 0UL; i < mt.schema_info.size(); i++) {
EXPECT_EQ(mt.schema_info[i].name, result.metadata.schema_info[i].name) << "[" << i << "]";
EXPECT_EQ(mt.schema_info[i].children.size(), result.metadata.schema_info[i].children.size())
<< "[" << i << "]";
for (auto j = 0UL; j < mt.schema_info[i].children.size(); j++) {
EXPECT_EQ(mt.schema_info[i].children[j].name, result.metadata.schema_info[i].children[j].name)
<< "[" << i << "][" << j << "]";
}
}
// Read with type information
std::map<std::string, cudf::io::schema_element> types;
types["a"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::INT32}};
types["b"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::UINT8}};
types["c"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::STRUCT}};
types["c"].child_types["d"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::INT64}};
types["c"].child_types["e"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::INT64}};
types["f"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::FLOAT32}};
types["g"] = cudf::io::schema_element{cudf::data_type{cudf::type_id::LIST}};
types["g"].child_types["element"] =
cudf::io::schema_element{cudf::data_type{cudf::type_id::INT64}};
in_options.set_dtypes(types);
result = cudf::io::read_json(in_options);
tbl_out = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(a, tbl_out.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(b, tbl_out.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(c, tbl_out.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(f, tbl_out.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(g, tbl_out.column(4));
EXPECT_EQ(mt.schema_info.size(), result.metadata.schema_info.size());
for (auto i = 0UL; i < mt.schema_info.size(); i++) {
EXPECT_EQ(mt.schema_info[i].name, result.metadata.schema_info[i].name) << "[" << i << "]";
EXPECT_EQ(mt.schema_info[i].children.size(), result.metadata.schema_info[i].children.size())
<< "[" << i << "]";
for (auto j = 0UL; j < mt.schema_info[i].children.size(); j++) {
EXPECT_EQ(mt.schema_info[i].children[j].name, result.metadata.schema_info[i].children[j].name)
<< "[" << i << "][" << j << "]";
}
}
// Without children column names
mt.schema_info[2].children.clear();
out_options.set_metadata(mt);
out_buffer.clear();
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
in_options = cudf::io::json_reader_options::builder(
cudf::io::source_info{out_buffer.data(), out_buffer.size()})
.lines(true)
.build();
result = cudf::io::read_json(in_options);
mt.schema_info[2].children = {{"0"}, {"1"}};
EXPECT_EQ(mt.schema_info.size(), result.metadata.schema_info.size());
for (auto i = 0UL; i < mt.schema_info.size(); i++) {
EXPECT_EQ(mt.schema_info[i].name, result.metadata.schema_info[i].name) << "[" << i << "]";
EXPECT_EQ(mt.schema_info[i].children.size(), result.metadata.schema_info[i].children.size())
<< "[" << i << "]";
for (auto j = 0UL; j < mt.schema_info[i].children.size(); j++) {
EXPECT_EQ(mt.schema_info[i].children[j].name, result.metadata.schema_info[i].children[j].name)
<< "[" << i << "][" << j << "]";
}
}
// without column names
out_options.set_metadata(cudf::io::table_metadata{});
out_buffer.clear();
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
in_options = cudf::io::json_reader_options::builder(
cudf::io::source_info{out_buffer.data(), out_buffer.size()})
.lines(true)
.build();
result = cudf::io::read_json(in_options);
mt.schema_info = {{"0"}, {"1"}, {"2"}, {"3"}, {"4"}};
mt.schema_info[2].children = {{"0"}, {"1"}};
mt.schema_info[4].children = {{"offsets"}, {"element"}}; // list child column names
EXPECT_EQ(mt.schema_info.size(), result.metadata.schema_info.size());
for (auto i = 0UL; i < mt.schema_info.size(); i++) {
EXPECT_EQ(mt.schema_info[i].name, result.metadata.schema_info[i].name) << "[" << i << "]";
EXPECT_EQ(mt.schema_info[i].children.size(), result.metadata.schema_info[i].children.size())
<< "[" << i << "]";
for (auto j = 0UL; j < mt.schema_info[i].children.size(); j++) {
EXPECT_EQ(mt.schema_info[i].children[j].name, result.metadata.schema_info[i].children[j].name)
<< "[" << i << "][" << j << "]";
}
}
}
TEST_F(JsonWriterTest, SpecialChars)
{
cudf::test::fixed_width_column_wrapper<int> a{1, 6, 1, 6};
cudf::test::strings_column_wrapper b{"abcd", "b\b\f\n\r\t", "\"c\"", "/\\"};
cudf::table_view tbl_view{{a, b}};
cudf::io::table_metadata mt{{{"\"a\""}, {"\'b\'"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto out_options = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(true)
.na_rep("null")
.build();
cudf::io::write_json(
out_options, cudf::test::get_default_stream(), rmm::mr::get_current_device_resource());
std::string const expected = R"({"\"a\"":1,"'b'":"abcd"}
{"\"a\"":6,"'b'":"b\b\f\n\r\t"}
{"\"a\"":1,"'b'":"\"c\""}
{"\"a\"":6,"'b'":"\/\\"}
)";
auto const output_string = std::string(out_buffer.data(), out_buffer.size());
EXPECT_EQ(expected, output_string);
}
TEST_F(JsonWriterTest, NullList)
{
std::string const data = R"(
{"a": [null], "b": [[1, 2, 3], [null], [null, null, null], [4, null, 5]]}
{"a": [2, null, null, 3] , "b": null}
{"a": [null, null, 4], "b": [[2, null], null]}
{"a": [5, null, null], "b": [null, [3, 4, 5]]} )";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
cudf::table_view tbl_view = result.tbl->view();
cudf::io::table_metadata mt{result.metadata};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(true)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected = R"({"a":[null],"b":[[1,2,3],[null],[null,null,null],[4,null,5]]}
{"a":[2,null,null,3],"b":null}
{"a":[null,null,4],"b":[[2,null],null]}
{"a":[5,null,null],"b":[null,[3,4,5]]}
)";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, ChunkedNested)
{
std::string const data = R"(
{"a": 1, "b": -2, "c": { }, "e": [{"f": 1}]}
{"a": 2, "b": -2, "c": { }, "e": null}
{"a": 3, "b": -2, "c": {"d": 9}, "e": [{"f": 2}, null]}
{"a": 4, "b": -2, "c": {"d": 16}, "e": [{"f": 3}, {"f": 4}, {"f": 5}]}
{"a": 5, "b": -2, "c": { }, "e": []}
{"a": 6, "b": -2, "c": {"d": 36}, "e": [{"f": 6}]}
{"a": 7, "b": -2, "c": {"d": 49}, "e": [{"f": 7}]}
{"a": 8, "b": -2, "c": {"d": 64}, "e": [{"f": 8}]}
{"a": 9, "b": -2, "c": {"d": 81}, "e": [{"f": 9}]}
)";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
cudf::table_view tbl_view = result.tbl->view();
cudf::io::table_metadata mt{result.metadata};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(true)
.na_rep("null")
.rows_per_chunk(8);
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected =
R"({"a":1,"b":-2,"c":{},"e":[{"f":1}]}
{"a":2,"b":-2,"c":{}}
{"a":3,"b":-2,"c":{"d":9},"e":[{"f":2},null]}
{"a":4,"b":-2,"c":{"d":16},"e":[{"f":3},{"f":4},{"f":5}]}
{"a":5,"b":-2,"c":{},"e":[]}
{"a":6,"b":-2,"c":{"d":36},"e":[{"f":6}]}
{"a":7,"b":-2,"c":{"d":49},"e":[{"f":7}]}
{"a":8,"b":-2,"c":{"d":64},"e":[{"f":8}]}
{"a":9,"b":-2,"c":{"d":81},"e":[{"f":9}]}
)";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, StructAllNullCombinations)
{
auto const_1_iter = thrust::make_constant_iterator(1);
auto col_a = cudf::test::fixed_width_column_wrapper<int>(
const_1_iter, const_1_iter + 32, cudf::detail::make_counting_transform_iterator(0, [](auto i) {
return i / 16;
}));
auto col_b = cudf::test::fixed_width_column_wrapper<int>(
const_1_iter, const_1_iter + 32, cudf::detail::make_counting_transform_iterator(0, [](auto i) {
return (i / 8) % 2;
}));
auto col_c = cudf::test::fixed_width_column_wrapper<int>(
const_1_iter, const_1_iter + 32, cudf::detail::make_counting_transform_iterator(0, [](auto i) {
return (i / 4) % 2;
}));
auto col_d = cudf::test::fixed_width_column_wrapper<int>(
const_1_iter, const_1_iter + 32, cudf::detail::make_counting_transform_iterator(0, [](auto i) {
return (i / 2) % 2;
}));
auto col_e = cudf::test::fixed_width_column_wrapper<int>(
const_1_iter, const_1_iter + 32, cudf::detail::make_counting_transform_iterator(0, [](auto i) {
return i % 2;
}));
// The table has 32 rows with validity from 00000 to 11111
cudf::table_view tbl_view = cudf::table_view({col_a, col_b, col_c, col_d, col_e});
cudf::io::table_metadata mt{{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(false)
.metadata(mt)
.lines(true)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected = R"({}
{"e":1}
{"d":1}
{"d":1,"e":1}
{"c":1}
{"c":1,"e":1}
{"c":1,"d":1}
{"c":1,"d":1,"e":1}
{"b":1}
{"b":1,"e":1}
{"b":1,"d":1}
{"b":1,"d":1,"e":1}
{"b":1,"c":1}
{"b":1,"c":1,"e":1}
{"b":1,"c":1,"d":1}
{"b":1,"c":1,"d":1,"e":1}
{"a":1}
{"a":1,"e":1}
{"a":1,"d":1}
{"a":1,"d":1,"e":1}
{"a":1,"c":1}
{"a":1,"c":1,"e":1}
{"a":1,"c":1,"d":1}
{"a":1,"c":1,"d":1,"e":1}
{"a":1,"b":1}
{"a":1,"b":1,"e":1}
{"a":1,"b":1,"d":1}
{"a":1,"b":1,"d":1,"e":1}
{"a":1,"b":1,"c":1}
{"a":1,"b":1,"c":1,"e":1}
{"a":1,"b":1,"c":1,"d":1}
{"a":1,"b":1,"c":1,"d":1,"e":1}
)";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
TEST_F(JsonWriterTest, Unicode)
{
// UTF-8, UTF-16
cudf::test::strings_column_wrapper col1{"\"\\/\b\f\n\r\t", "ராபிட்ஸ்", "$€𐐷𤭢", "C𝞵𝓓𝒻"};
// Unicode
// 0000-FFFF Basic Multilingual Plane
// 10000-10FFFF Supplementary Plane
cudf::test::strings_column_wrapper col2{
"CႮ≪ㇳ䍏凹沦王辿龸ꁗ믜스폶ﴠ", // 0000-FFFF
"𐀀𑿪𒐦𓃰𔙆 𖦆𗿿𘳕𚿾[↳] 𜽆𝓚𞤁🄰", // 10000-1FFFF
"𠘨𡥌𢗉𣇊𤊩𥅽𦉱𧴱𨁲𩁹𪐢𫇭𬬭𭺷𮊦屮", // 20000-2FFFF
"𰾑𱔈𲍉"}; // 30000-3FFFF
cudf::test::fixed_width_column_wrapper<int16_t> col3{{1, 2, 3, 4},
cudf::test::iterators::nulls_at({0, 2})};
cudf::table_view tbl_view{{col1, col2, col3}};
cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int16"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(true)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
std::string const expected =
R"({"col1":"\"\\\/\b\f\n\r\t","col2":"C\u10ae\u226a\u31f3\u434f\u51f9\u6ca6\u738b\u8fbf\u9fb8\ua057\ubbdc\uc2a4\ud3f6\ue4fe\ufd20","int16":null}
{"col1":"\u0bb0\u0bbe\u0baa\u0bbf\u0b9f\u0bcd\u0bb8\u0bcd","col2":"\ud800\udc00\ud807\udfea\ud809\udc26\ud80c\udcf0\ud811\ude46 \ud81a\udd86\ud81f\udfff\ud823\udcd5\ud82b\udffe[\u21b3] \ud833\udf46\ud835\udcda\ud83a\udd01\ud83c\udd30","int16":2}
{"col1":"$\u20ac\ud801\udc37\ud852\udf62","col2":"\ud841\ude28\ud846\udd4c\ud849\uddc9\ud84c\uddca\ud850\udea9\ud854\udd7d\ud858\ude71\ud85f\udd31\ud860\udc72\ud864\udc79\ud869\udc22\ud86c\udded\ud872\udf2d\ud877\udeb7\ud878\udea6\u5c6e","int16":null}
{"col1":"C\ud835\udfb5\ud835\udcd3\ud835\udcbb","col2":"\ud883\udf91\ud885\udd08\ud888\udf49","int16":4}
)";
EXPECT_EQ(expected, std::string(out_buffer.data(), out_buffer.size()));
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/parquet_chunked_reader_test.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/io_metadata_utilities.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/structs/utilities.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/io/data_sink.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/span.hpp>
#include <src/io/parquet/compact_protocol_reader.hpp>
#include <src/io/parquet/parquet.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <rmm/cuda_stream_view.hpp>
#include <fstream>
#include <type_traits>
namespace {
// Global environment for temporary files
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
using int32s_col = cudf::test::fixed_width_column_wrapper<int32_t>;
using int64s_col = cudf::test::fixed_width_column_wrapper<int64_t>;
using strings_col = cudf::test::strings_column_wrapper;
using structs_col = cudf::test::structs_column_wrapper;
using int32s_lists_col = cudf::test::lists_column_wrapper<int32_t>;
auto write_file(std::vector<std::unique_ptr<cudf::column>>& input_columns,
std::string const& filename,
bool nullable,
std::size_t max_page_size_bytes = cudf::io::default_max_page_size_bytes,
std::size_t max_page_size_rows = cudf::io::default_max_page_size_rows)
{
// Just shift nulls of the next column by one position to avoid having all nulls in the same
// table rows.
if (nullable) {
// Generate deterministic bitmask instead of random bitmask for easy computation of data size.
auto const valid_iter = cudf::detail::make_counting_transform_iterator(
0, [](cudf::size_type i) { return i % 4 != 3; });
cudf::size_type offset{0};
for (auto& col : input_columns) {
auto const [null_mask, null_count] =
cudf::test::detail::make_null_mask(valid_iter + offset, valid_iter + col->size() + offset);
col = cudf::structs::detail::superimpose_nulls(
static_cast<cudf::bitmask_type const*>(null_mask.data()),
null_count,
std::move(col),
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
}
}
auto input_table = std::make_unique<cudf::table>(std::move(input_columns));
auto filepath =
temp_env->get_temp_filepath(nullable ? filename + "_nullable.parquet" : filename + ".parquet");
auto const write_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *input_table)
.max_page_size_bytes(max_page_size_bytes)
.max_page_size_rows(max_page_size_rows)
.max_page_fragment_size(cudf::io::default_max_page_fragment_size)
.build();
cudf::io::write_parquet(write_opts);
return std::pair{std::move(input_table), std::move(filepath)};
}
auto chunked_read(std::string const& filepath,
std::size_t output_limit,
std::size_t input_limit = 0)
{
auto const read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).build();
auto reader = cudf::io::chunked_parquet_reader(output_limit, input_limit, read_opts);
auto num_chunks = 0;
auto out_tables = std::vector<std::unique_ptr<cudf::table>>{};
do {
auto chunk = reader.read_chunk();
// If the input file is empty, the first call to `read_chunk` will return an empty table.
// Thus, we only check for non-empty output table from the second call.
if (num_chunks > 0) {
CUDF_EXPECTS(chunk.tbl->num_rows() != 0, "Number of rows in the new chunk is zero.");
}
++num_chunks;
out_tables.emplace_back(std::move(chunk.tbl));
} while (reader.has_next());
auto out_tviews = std::vector<cudf::table_view>{};
for (auto const& tbl : out_tables) {
out_tviews.emplace_back(tbl->view());
}
return std::pair(cudf::concatenate(out_tviews), num_chunks);
}
} // namespace
struct ParquetChunkedReaderTest : public cudf::test::BaseFixture {};
TEST_F(ParquetChunkedReaderTest, TestChunkedReadNoData)
{
std::vector<std::unique_ptr<cudf::column>> input_columns;
input_columns.emplace_back(int32s_col{}.release());
input_columns.emplace_back(int64s_col{}.release());
auto const [expected, filepath] = write_file(input_columns, "chunked_read_empty", false);
auto const [result, num_chunks] = chunked_read(filepath, 1'000);
EXPECT_EQ(num_chunks, 1);
EXPECT_EQ(result->num_rows(), 0);
EXPECT_EQ(result->num_columns(), 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadSimpleData)
{
auto constexpr num_rows = 40'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const value_iter = thrust::make_counting_iterator(0);
input_columns.emplace_back(int32s_col(value_iter, value_iter + num_rows).release());
input_columns.emplace_back(int64s_col(value_iter, value_iter + num_rows).release());
return write_file(input_columns, "chunked_read_simple", nullable);
};
{
auto const [expected, filepath] = generate_input(false);
auto const [result, num_chunks] = chunked_read(filepath, 240'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
{
auto const [expected, filepath] = generate_input(true);
auto const [result, num_chunks] = chunked_read(filepath, 240'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadBoundaryCases)
{
// Tests some specific boundary conditions in the split calculations.
auto constexpr num_rows = 40'000;
auto const [expected, filepath] = [num_rows]() {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const value_iter = thrust::make_counting_iterator(0);
input_columns.emplace_back(int32s_col(value_iter, value_iter + num_rows).release());
return write_file(input_columns, "chunked_read_simple_boundary", false /*nullable*/);
}();
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath, 1);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit slightly less than one page of data
{
auto const [result, num_chunks] = chunked_read(filepath, 79'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit exactly the size one page of data
{
auto const [result, num_chunks] = chunked_read(filepath, 80'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit slightly more the size one page of data
{
auto const [result, num_chunks] = chunked_read(filepath, 81'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit slightly less than two pages of data
{
auto const [result, num_chunks] = chunked_read(filepath, 159'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit exactly the size of two pages of data minus one byte
{
auto const [result, num_chunks] = chunked_read(filepath, 159'999);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit exactly the size of two pages of data
{
auto const [result, num_chunks] = chunked_read(filepath, 160'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a limit slightly more the size two pages of data
{
auto const [result, num_chunks] = chunked_read(filepath, 161'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithString)
{
auto constexpr num_rows = 60'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const value_iter = thrust::make_counting_iterator(0);
// ints Page total bytes cumulative bytes
// 20000 rows of 4 bytes each = A0 80000 80000
// 20000 rows of 4 bytes each = A1 80000 160000
// 20000 rows of 4 bytes each = A2 80000 240000
input_columns.emplace_back(int32s_col(value_iter, value_iter + num_rows).release());
// strings Page total bytes cumulative bytes
// 20000 rows of 1 char each (20000 + 80004) = B0 100004 100004
// 20000 rows of 4 chars each (80000 + 80004) = B1 160004 260008
// 20000 rows of 16 chars each (320000 + 80004) = B2 400004 660012
auto const strings = std::vector<std::string>{"a", "bbbb", "cccccccccccccccc"};
auto const str_iter = cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) {
if (i < 20000) { return strings[0]; }
if (i < 40000) { return strings[1]; }
return strings[2];
});
input_columns.emplace_back(strings_col(str_iter, str_iter + num_rows).release());
// Cumulative sizes:
// A0 + B0 : 180004
// A1 + B1 : 420008
// A2 + B2 : 900012
// skip_rows / num_rows
// byte_limit==500000 should give 2 chunks: {0, 40000}, {40000, 20000}
// byte_limit==1000000 should give 1 chunks: {0, 60000},
return write_file(input_columns,
"chunked_read_with_strings",
nullable,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
};
auto const [expected_no_null, filepath_no_null] = generate_input(false);
auto const [expected_with_nulls, filepath_with_nulls] = generate_input(true);
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Other tests:
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 500'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 500'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1'000'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1'000'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithStringPrecise)
{
auto constexpr num_rows = 60'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
// strings Page total bytes cumulative
// 20000 rows alternating 1-4 chars each (50000 + 80004) A0 130004 130004
// 20000 rows alternating 1-4 chars each (50000 + 80004) A1 130004 260008
// ...
auto const strings = std::vector<std::string>{"a", "bbbb"};
auto const str_iter =
cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) { return strings[i % 2]; });
input_columns.emplace_back(strings_col(str_iter, str_iter + num_rows).release());
// Cumulative sizes:
// A0 : 130004
// A1 : 260008
// A2 : 390012
return write_file(input_columns,
"chunked_read_with_strings_precise",
nullable,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
};
auto const [expected_no_null, filepath_no_null] = generate_input(false);
// a chunk limit of 1 byte less than 2 pages should force it to produce 3 chunks:
// each 1 page in size
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 260'007);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
// a chunk limit of exactly equal to 2 pages should force it to produce 2 chunks
// pages 0-1 and page 2
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 260'008);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithStructs)
{
auto constexpr num_rows = 100'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const int_iter = thrust::make_counting_iterator(0);
input_columns.emplace_back(int32s_col(int_iter, int_iter + num_rows).release());
input_columns.emplace_back([=] {
auto child1 = int32s_col(int_iter, int_iter + num_rows);
auto child2 = int32s_col(int_iter + num_rows, int_iter + num_rows * 2);
auto const str_iter = cudf::detail::make_counting_transform_iterator(
0, [&](int32_t i) { return std::to_string(i); });
auto child3 = strings_col{str_iter, str_iter + num_rows};
return structs_col{{child1, child2, child3}}.release();
}());
return write_file(input_columns,
"chunked_read_with_structs",
nullable,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
};
auto const [expected_no_null, filepath_no_null] = generate_input(false);
auto const [expected_with_nulls, filepath_with_nulls] = generate_input(true);
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Other tests:
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 500'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 500'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithListsNoNulls)
{
auto constexpr num_rows = 100'000;
auto const [expected, filepath] = [num_rows]() {
std::vector<std::unique_ptr<cudf::column>> input_columns;
// 20000 rows in 1 page consist of:
//
// 20001 offsets : 80004 bytes
// 30000 ints : 120000 bytes
// total : 200004 bytes
auto const template_lists = int32s_lists_col{
int32s_lists_col{}, int32s_lists_col{0}, int32s_lists_col{1, 2}, int32s_lists_col{3, 4, 5}};
auto const gather_iter =
cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) { return i % 4; });
auto const gather_map = int32s_col(gather_iter, gather_iter + num_rows);
input_columns.emplace_back(
std::move(cudf::gather(cudf::table_view{{template_lists}}, gather_map)->release().front()));
return write_file(input_columns,
"chunked_read_with_lists_no_null",
false /*nullable*/,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
}();
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size slightly less than 1 page (forcing it to be at least 1 page per read)
{
auto const [result, num_chunks] = chunked_read(filepath, 200'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size exactly 1 page
{
auto const [result, num_chunks] = chunked_read(filepath, 200'004);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size 2 pages. 3 chunks (2 pages + 2 pages + 1 page)
{
auto const [result, num_chunks] = chunked_read(filepath, 400'008);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size 2 pages minus one byte: each chunk will be just one page
{
auto const [result, num_chunks] = chunked_read(filepath, 400'007);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithListsHavingNulls)
{
auto constexpr num_rows = 100'000;
auto const [expected, filepath] = [num_rows]() {
std::vector<std::unique_ptr<cudf::column>> input_columns;
// 20000 rows in 1 page consist of:
//
// 625 validity words : 2500 bytes (a null every 4 rows: null at indices [3, 7, 11, ...])
// 20001 offsets : 80004 bytes
// 15000 ints : 60000 bytes
// total : 142504 bytes
auto const template_lists =
int32s_lists_col{// these will all be null
int32s_lists_col{},
int32s_lists_col{0},
int32s_lists_col{1, 2},
int32s_lists_col{3, 4, 5, 6, 7, 8, 9} /* this list will be nullified out */};
auto const gather_iter =
cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) { return i % 4; });
auto const gather_map = int32s_col(gather_iter, gather_iter + num_rows);
input_columns.emplace_back(
std::move(cudf::gather(cudf::table_view{{template_lists}}, gather_map)->release().front()));
return write_file(input_columns,
"chunked_read_with_lists_nulls",
true /*nullable*/,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
}();
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size slightly less than 1 page (forcing it to be at least 1 page per read)
{
auto const [result, num_chunks] = chunked_read(filepath, 142'500);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size exactly 1 page
{
auto const [result, num_chunks] = chunked_read(filepath, 142'504);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size 2 pages. 3 chunks (2 pages + 2 pages + 1 page)
{
auto const [result, num_chunks] = chunked_read(filepath, 285'008);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
// chunk size 2 pages minus 1 byte: each chunk will be just one page
{
auto const [result, num_chunks] = chunked_read(filepath, 285'007);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithStructsOfLists)
{
auto constexpr num_rows = 100'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const int_iter = thrust::make_counting_iterator(0);
input_columns.emplace_back(int32s_col(int_iter, int_iter + num_rows).release());
input_columns.emplace_back([=] {
std::vector<std::unique_ptr<cudf::column>> child_columns;
child_columns.emplace_back(int32s_col(int_iter, int_iter + num_rows).release());
child_columns.emplace_back(
int32s_col(int_iter + num_rows, int_iter + num_rows * 2).release());
auto const str_iter = cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) {
return std::to_string(i) + "++++++++++++++++++++" + std::to_string(i);
});
child_columns.emplace_back(strings_col{str_iter, str_iter + num_rows}.release());
auto const template_lists = int32s_lists_col{
int32s_lists_col{}, int32s_lists_col{0}, int32s_lists_col{0, 1}, int32s_lists_col{0, 1, 2}};
auto const gather_iter =
cudf::detail::make_counting_transform_iterator(0, [&](int32_t i) { return i % 4; });
auto const gather_map = int32s_col(gather_iter, gather_iter + num_rows);
child_columns.emplace_back(
std::move(cudf::gather(cudf::table_view{{template_lists}}, gather_map)->release().front()));
return structs_col(std::move(child_columns)).release();
}());
return write_file(input_columns,
"chunked_read_with_structs_of_lists",
nullable,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
};
auto const [expected_no_null, filepath_no_null] = generate_input(false);
auto const [expected_with_nulls, filepath_with_nulls] = generate_input(true);
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1);
EXPECT_EQ(num_chunks, 10);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Other tests:
// for these tests, different columns get written to different numbers of pages so it's a
// little tricky to describe the expected results by page counts. To get an idea of how
// these values are chosen, see the debug output from the call to print_cumulative_row_info() in
// reader_impl_preprocess.cu -> find_splits()
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1'000'000);
EXPECT_EQ(num_chunks, 7);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1'500'000);
EXPECT_EQ(num_chunks, 4);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2'000'000);
EXPECT_EQ(num_chunks, 4);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 5'000'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1'000'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1'500'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2'000'000);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 5'000'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadWithListsOfStructs)
{
auto constexpr num_rows = 100'000;
auto const generate_input = [num_rows](bool nullable) {
std::vector<std::unique_ptr<cudf::column>> input_columns;
auto const int_iter = thrust::make_counting_iterator(0);
input_columns.emplace_back(int32s_col(int_iter, int_iter + num_rows).release());
auto offsets = std::vector<cudf::size_type>{};
offsets.reserve(num_rows * 2);
cudf::size_type num_structs = 0;
for (int i = 0; i < num_rows; ++i) {
offsets.push_back(num_structs);
auto const new_list_size = i % 4;
num_structs += new_list_size;
}
offsets.push_back(num_structs);
auto const make_structs_col = [=] {
auto child1 = int32s_col(int_iter, int_iter + num_structs);
auto child2 = int32s_col(int_iter + num_structs, int_iter + num_structs * 2);
auto const str_iter = cudf::detail::make_counting_transform_iterator(
0, [&](int32_t i) { return std::to_string(i) + std::to_string(i) + std::to_string(i); });
auto child3 = strings_col{str_iter, str_iter + num_structs};
return structs_col{{child1, child2, child3}}.release();
};
input_columns.emplace_back(
cudf::make_lists_column(static_cast<cudf::size_type>(offsets.size() - 1),
int32s_col(offsets.begin(), offsets.end()).release(),
make_structs_col(),
0,
rmm::device_buffer{}));
return write_file(input_columns,
"chunked_read_with_lists_of_structs",
nullable,
512 * 1024, // 512KB per page
20000 // 20k rows per page
);
};
auto const [expected_no_null, filepath_no_null] = generate_input(false);
auto const [expected_with_nulls, filepath_with_nulls] = generate_input(true);
// Test with zero limit: everything will be read in one chunk
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very small limit: 1 byte
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1);
EXPECT_EQ(num_chunks, 10);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// Test with a very large limit
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2L << 40);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
// for these tests, different columns get written to different numbers of pages so it's a
// little tricky to describe the expected results by page counts. To get an idea of how
// these values are chosen, see the debug output from the call to print_cumulative_row_info() in
// reader_impl_preprocess.cu -> find_splits()
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1'000'000);
EXPECT_EQ(num_chunks, 7);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 1'500'000);
EXPECT_EQ(num_chunks, 4);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 2'000'000);
EXPECT_EQ(num_chunks, 4);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_no_null, 5'000'000);
EXPECT_EQ(num_chunks, 2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_no_null, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1'000'000);
EXPECT_EQ(num_chunks, 5);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 1'500'000);
EXPECT_EQ(num_chunks, 4);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 2'000'000);
EXPECT_EQ(num_chunks, 3);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
{
auto const [result, num_chunks] = chunked_read(filepath_with_nulls, 5'000'000);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_TABLES_EQUAL(*expected_with_nulls, *result);
}
}
TEST_F(ParquetChunkedReaderTest, TestChunkedReadNullCount)
{
auto constexpr num_rows = 100'000;
auto const sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return 1; });
auto const validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 4 != 3; });
cudf::test::fixed_width_column_wrapper<int32_t> col{sequence, sequence + num_rows, validity};
std::vector<std::unique_ptr<cudf::column>> cols;
cols.push_back(col.release());
auto const expected = std::make_unique<cudf::table>(std::move(cols));
auto const filepath = temp_env->get_temp_filepath("chunked_reader_null_count.parquet");
auto const page_limit_rows = num_rows / 5;
auto const write_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected)
.max_page_size_rows(page_limit_rows) // 20k rows per page
.build();
cudf::io::write_parquet(write_opts);
auto const byte_limit = page_limit_rows * sizeof(int);
auto const read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).build();
auto reader = cudf::io::chunked_parquet_reader(byte_limit, read_opts);
do {
// Every fourth row is null
EXPECT_EQ(reader.read_chunk().tbl->get_column(0).null_count(), page_limit_rows / 4);
} while (reader.has_next());
}
TEST_F(ParquetChunkedReaderTest, InputLimitSimple)
{
auto const filepath = temp_env->get_temp_filepath("input_limit_10_rowgroups.parquet");
// This results in 10 grow groups, at 4001150 bytes per row group
constexpr int num_rows = 25'000'000;
auto value_iter = cudf::detail::make_counting_transform_iterator(0, [](int i) { return i; });
cudf::test::fixed_width_column_wrapper<int> expected(value_iter, value_iter + num_rows);
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath},
cudf::table_view{{expected}})
// note: it is unnecessary to force compression to NONE here because the size we are using in
// the row group is the uncompressed data size. But forcing the dictionary policy to
// dictionary_policy::NEVER is necessary to prevent changes in the
// decompressed-but-not-yet-decoded data.
.dictionary_policy(cudf::io::dictionary_policy::NEVER);
cudf::io::write_parquet(opts);
{
// no chunking
auto const [result, num_chunks] = chunked_read(filepath, 0, 0);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
{
// 25 chunks of 100k rows each
auto const [result, num_chunks] = chunked_read(filepath, 0, 1);
EXPECT_EQ(num_chunks, 25);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
{
// 25 chunks of 100k rows each
auto const [result, num_chunks] = chunked_read(filepath, 0, 4000000);
EXPECT_EQ(num_chunks, 25);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
{
// 25 chunks of 100k rows each
auto const [result, num_chunks] = chunked_read(filepath, 0, 4100000);
EXPECT_EQ(num_chunks, 25);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
{
// 12 chunks of 200k rows each, plus 1 final chunk of 100k rows.
auto const [result, num_chunks] = chunked_read(filepath, 0, 8002301);
EXPECT_EQ(num_chunks, 13);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
{
// 1 big chunk
auto const [result, num_chunks] = chunked_read(filepath, 0, size_t{1} * 1024 * 1024 * 1024);
EXPECT_EQ(num_chunks, 1);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->get_column(0));
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/csv_test.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/io/arrow_io_source.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/convert/convert_fixed_point.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/unary.hpp>
#include <arrow/io/api.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <iterator>
#include <limits>
#include <numeric>
#include <sstream>
#include <string>
#include <vector>
using cudf::data_type;
using cudf::type_id;
using cudf::type_to_id;
template <typename T>
auto dtype()
{
return data_type{type_to_id<T>()};
}
template <typename T, typename SourceElementT = T>
using column_wrapper =
typename std::conditional<std::is_same_v<T, cudf::string_view>,
cudf::test::strings_column_wrapper,
cudf::test::fixed_width_column_wrapper<T, SourceElementT>>::type;
using column = cudf::column;
using table = cudf::table;
using table_view = cudf::table_view;
// Global environment for temporary files
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
// Base test fixture for tests
struct CsvWriterTest : public cudf::test::BaseFixture {};
template <typename T>
struct CsvFixedPointWriterTest : public CsvWriterTest {};
TYPED_TEST_SUITE(CsvFixedPointWriterTest, cudf::test::FixedPointTypes);
// Base test fixture for tests
struct CsvReaderTest : public cudf::test::BaseFixture {};
// Typed test fixture for timestamp type tests
template <typename T>
struct CsvReaderNumericTypeTest : public CsvReaderTest {};
// Declare typed test cases
using SupportedNumericTypes = cudf::test::Types<int64_t, double>;
TYPED_TEST_SUITE(CsvReaderNumericTypeTest, SupportedNumericTypes);
template <typename DecimalType>
struct CsvFixedPointReaderTest : public CsvReaderTest {
void run_tests(std::vector<std::string> const& reference_strings, numeric::scale_type scale)
{
cudf::test::strings_column_wrapper const strings(reference_strings.begin(),
reference_strings.end());
auto const expected = cudf::strings::to_fixed_point(
cudf::strings_column_view(strings), data_type{type_to_id<DecimalType>(), scale});
auto const buffer = std::accumulate(reference_strings.begin(),
reference_strings.end(),
std::string{},
[](std::string const& acc, std::string const& rhs) {
return acc.empty() ? rhs : (acc + "\n" + rhs);
});
cudf::io::csv_reader_options const in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.dtypes({data_type{type_to_id<DecimalType>(), scale}})
.header(-1);
auto const result = cudf::io::read_csv(in_opts);
auto const result_view = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*expected, result_view.column(0));
EXPECT_EQ(result_view.num_columns(), 1);
}
};
TYPED_TEST_SUITE(CsvFixedPointReaderTest, cudf::test::FixedPointTypes);
namespace {
// Generates a vector of uniform random values of type T
template <typename T>
inline auto random_values(size_t size)
{
std::vector<T> values(size);
using T1 = T;
using uniform_distribution =
typename std::conditional_t<std::is_same_v<T1, bool>,
std::bernoulli_distribution,
std::conditional_t<std::is_floating_point_v<T1>,
std::uniform_real_distribution<T1>,
std::uniform_int_distribution<T1>>>;
static constexpr auto seed = 0xf00d;
static std::mt19937 engine{seed};
static uniform_distribution dist{};
std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; });
return values;
}
MATCHER_P(FloatNearPointwise, tolerance, "Out-of-range")
{
return (std::get<0>(arg) > std::get<1>(arg) - tolerance &&
std::get<0>(arg) < std::get<1>(arg) + tolerance);
}
template <typename T>
using wrapper = cudf::test::fixed_width_column_wrapper<T>;
// temporary method to verify the float columns until
// CUDF_TEST_EXPECT_COLUMNS_EQUAL supports floating point
template <typename T, typename valid_t>
void check_float_column(cudf::column_view const& col_lhs,
cudf::column_view const& col_rhs,
T tol,
valid_t const& validity)
{
auto h_data = cudf::test::to_host<T>(col_rhs).first;
std::vector<T> data(h_data.size());
std::copy(h_data.begin(), h_data.end(), data.begin());
CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUIVALENT(col_lhs,
(wrapper<T>{data.begin(), data.end(), validity}));
EXPECT_TRUE(col_lhs.null_count() == 0 and col_rhs.null_count() == 0);
EXPECT_THAT(cudf::test::to_host<T>(col_lhs).first,
::testing::Pointwise(FloatNearPointwise(tol), data));
}
// timestamp column checker within tolerance
// given by `tol_ms` (milliseconds)
void check_timestamp_column(cudf::column_view const& col_lhs,
cudf::column_view const& col_rhs,
long tol_ms = 1000l)
{
using T = cudf::timestamp_ms;
using namespace cuda::std::chrono;
auto h_lhs = cudf::test::to_host<T>(col_lhs).first;
auto h_rhs = cudf::test::to_host<T>(col_rhs).first;
cudf::size_type nrows = h_lhs.size();
EXPECT_TRUE(nrows == static_cast<cudf::size_type>(h_rhs.size()));
auto begin_count = thrust::make_counting_iterator<cudf::size_type>(0);
auto end_count = thrust::make_counting_iterator<cudf::size_type>(nrows);
auto* ptr_lhs = h_lhs.data(); // cannot capture host_vector in thrust,
// not even in host lambda
auto* ptr_rhs = h_rhs.data();
auto found = thrust::find_if(
thrust::host, begin_count, end_count, [ptr_lhs, ptr_rhs, tol_ms](auto row_index) {
auto delta_ms = cuda::std::chrono::duration_cast<cuda::std::chrono::milliseconds>(
ptr_lhs[row_index] - ptr_rhs[row_index]);
return delta_ms.count() >= tol_ms;
});
EXPECT_TRUE(found == end_count); // not found...
}
// helper to replace in `str` _all_ occurrences of `from` with `to`
std::string replace_all_helper(std::string str, std::string const& from, std::string const& to)
{
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length();
}
return str;
}
// compare string columns accounting for special character
// treatment: double double quotes ('\"')
// and surround whole string by double quotes if it contains:
// newline '\n', <delimiter>, and double quotes;
void check_string_column(cudf::column_view const& col_lhs,
cudf::column_view const& col_rhs,
std::string const& delimiter = ",")
{
auto h_lhs = cudf::test::to_host<std::string>(col_lhs).first;
auto h_rhs = cudf::test::to_host<std::string>(col_rhs).first;
std::string newline("\n");
std::string quotes("\"");
std::string quotes_repl("\"\"");
std::vector<std::string> v_lhs;
std::transform(h_lhs.begin(),
h_lhs.end(),
std::back_inserter(v_lhs),
[delimiter, newline, quotes, quotes_repl](std::string const& str_row) {
auto found_quote = str_row.find(quotes);
auto found_newl = str_row.find(newline);
auto found_delim = str_row.find(delimiter);
bool flag_found_quotes = (found_quote != std::string::npos);
bool need_surround = flag_found_quotes || (found_newl != std::string::npos) ||
(found_delim != std::string::npos);
std::string str_repl;
if (flag_found_quotes) {
str_repl = replace_all_helper(str_row, quotes, quotes_repl);
} else {
str_repl = str_row;
}
return need_surround ? quotes + str_repl + quotes : str_row;
});
EXPECT_TRUE(std::equal(v_lhs.begin(), v_lhs.end(), h_rhs.begin()));
}
// Helper function to compare two floating-point column contents
template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
void expect_column_data_equal(std::vector<T> const& lhs, cudf::column_view const& rhs)
{
EXPECT_THAT(cudf::test::to_host<T>(rhs).first,
::testing::Pointwise(FloatNearPointwise(1e-6), lhs));
}
// Helper function to compare two column contents
template <typename T, std::enable_if_t<!std::is_floating_point_v<T>>* = nullptr>
void expect_column_data_equal(std::vector<T> const& lhs, cudf::column_view const& rhs)
{
EXPECT_THAT(cudf::test::to_host<T>(rhs).first, ::testing::ElementsAreArray(lhs));
}
void write_csv_helper(std::string const& filename,
cudf::table_view const& table,
std::vector<std::string> const& names = {})
{
cudf::io::csv_writer_options writer_options =
cudf::io::csv_writer_options::builder(cudf::io::sink_info(filename), table)
.include_header(not names.empty())
.names(names);
cudf::io::write_csv(writer_options);
}
template <typename T>
std::string assign(T input)
{
return std::to_string(input);
}
std::string assign(std::string input) { return input; }
template <typename T>
std::vector<std::string> prepend_zeros(std::vector<T> const& input,
int zero_count = 0,
bool add_positive_sign = false)
{
std::vector<std::string> output(input.size());
std::transform(input.begin(), input.end(), output.begin(), [=](T const& num) {
auto str = assign(num);
bool is_negative = (str[0] == '-');
if (is_negative) {
str.insert(1, zero_count, '0');
return str;
} else if (add_positive_sign) {
return "+" + std::string(zero_count, '0') + str;
} else {
str.insert(0, zero_count, '0');
return str;
}
});
return output;
}
} // namespace
TYPED_TEST(CsvReaderNumericTypeTest, SingleColumn)
{
constexpr auto num_rows = 10;
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return static_cast<TypeParam>(i + 1000.50f); });
auto filepath = temp_env->get_temp_filepath("SingleColumn.csv");
{
std::ofstream out_file{filepath, std::ofstream::out};
std::ostream_iterator<TypeParam> output_iterator(out_file, "\n");
std::copy(sequence, sequence + num_rows, output_iterator);
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath}).header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
expect_column_data_equal(std::vector<TypeParam>(sequence, sequence + num_rows), view.column(0));
}
TYPED_TEST(CsvFixedPointReaderTest, SingleColumnNegativeScale)
{
this->run_tests({"1.23", "876e-2", "5.43e1", "-0.12", "0.25", "-0.23", "-0.27", "0.00", "0.00"},
numeric::scale_type{-2});
}
TYPED_TEST(CsvFixedPointReaderTest, SingleColumnNoScale)
{
this->run_tests({"123", "-87600e-2", "54.3e1", "-12", "25", "-23", "-27", "0", "0"},
numeric::scale_type{0});
}
TYPED_TEST(CsvFixedPointReaderTest, SingleColumnPositiveScale)
{
this->run_tests(
{"123000", "-87600000e-2", "54300e1", "-12000", "25000", "-23000", "-27000", "0000", "0000"},
numeric::scale_type{3});
}
TYPED_TEST(CsvFixedPointWriterTest, SingleColumnNegativeScale)
{
std::vector<std::string> reference_strings = {
"1.23", "-8.76", "5.43", "-0.12", "0.25", "-0.23", "-0.27", "0.00", "0.00"};
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 2 == 0); });
cudf::test::strings_column_wrapper strings(
reference_strings.begin(), reference_strings.end(), validity);
std::vector<std::string> valid_reference_strings;
thrust::copy_if(thrust::host,
reference_strings.begin(),
reference_strings.end(),
thrust::make_counting_iterator(0),
std::back_inserter(valid_reference_strings),
validity.functor());
reference_strings = valid_reference_strings;
using DecimalType = TypeParam;
auto input_column =
cudf::strings::to_fixed_point(cudf::strings_column_view(strings),
data_type{type_to_id<DecimalType>(), numeric::scale_type{-2}});
auto input_table = cudf::table_view{std::vector<cudf::column_view>{*input_column}};
auto filepath = temp_env->get_temp_dir() + "FixedPointSingleColumnNegativeScale.csv";
cudf::io::csv_writer_options writer_options =
cudf::io::csv_writer_options::builder(cudf::io::sink_info(filepath), input_table)
.include_header(false);
cudf::io::write_csv(writer_options);
std::vector<std::string> result_strings;
result_strings.reserve(reference_strings.size());
std::ifstream read_result_file(filepath);
ASSERT_TRUE(read_result_file.is_open());
std::copy(std::istream_iterator<std::string>(read_result_file),
std::istream_iterator<std::string>(),
std::back_inserter(result_strings));
EXPECT_EQ(result_strings, reference_strings);
}
TYPED_TEST(CsvFixedPointWriterTest, SingleColumnPositiveScale)
{
std::vector<std::string> reference_strings = {
"123000", "-876000", "543000", "-12000", "25000", "-23000", "-27000", "0000", "0000"};
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 2 == 0); });
cudf::test::strings_column_wrapper strings(
reference_strings.begin(), reference_strings.end(), validity);
std::vector<std::string> valid_reference_strings;
thrust::copy_if(thrust::host,
reference_strings.begin(),
reference_strings.end(),
thrust::make_counting_iterator(0),
std::back_inserter(valid_reference_strings),
validity.functor());
reference_strings = valid_reference_strings;
using DecimalType = TypeParam;
auto input_column =
cudf::strings::to_fixed_point(cudf::strings_column_view(strings),
data_type{type_to_id<DecimalType>(), numeric::scale_type{3}});
auto input_table = cudf::table_view{std::vector<cudf::column_view>{*input_column}};
auto filepath = temp_env->get_temp_dir() + "FixedPointSingleColumnPositiveScale.csv";
cudf::io::csv_writer_options writer_options =
cudf::io::csv_writer_options::builder(cudf::io::sink_info(filepath), input_table)
.include_header(false);
cudf::io::write_csv(writer_options);
std::vector<std::string> result_strings;
result_strings.reserve(reference_strings.size());
std::ifstream read_result_file(filepath);
ASSERT_TRUE(read_result_file.is_open());
std::copy(std::istream_iterator<std::string>(read_result_file),
std::istream_iterator<std::string>(),
std::back_inserter(result_strings));
EXPECT_EQ(result_strings, reference_strings);
}
void test_quoting_disabled_with_delimiter(char delimiter_char)
{
auto const delimiter = std::string{delimiter_char};
auto const input_strings = cudf::test::strings_column_wrapper{
std::string{"All"} + delimiter + "the" + delimiter + "leaves",
"are\"brown",
"and\nthe\nsky\nis\ngrey"};
auto const input_table = table_view{{input_strings}};
auto const filepath = temp_env->get_temp_dir() + "unquoted.csv";
auto w_options = cudf::io::csv_writer_options::builder(cudf::io::sink_info{filepath}, input_table)
.include_header(false)
.inter_column_delimiter(delimiter_char)
.quoting(cudf::io::quote_style::NONE);
cudf::io::write_csv(w_options.build());
auto r_options = cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.header(-1)
.delimiter(delimiter_char)
.quoting(cudf::io::quote_style::NONE);
auto r_table = cudf::io::read_csv(r_options.build());
auto const expected =
cudf::test::strings_column_wrapper{"All", "are\"brown", "and", "the", "sky", "is", "grey"};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(r_table.tbl->view().column(0), expected);
}
TEST_F(CsvWriterTest, QuotingDisabled)
{
test_quoting_disabled_with_delimiter(',');
test_quoting_disabled_with_delimiter('\u0001');
}
TEST_F(CsvReaderTest, MultiColumn)
{
constexpr auto num_rows = 10;
auto int8_values = random_values<int8_t>(num_rows);
auto int16_values = random_values<int16_t>(num_rows);
auto int32_values = random_values<int32_t>(num_rows);
auto int64_values = random_values<int64_t>(num_rows);
auto uint8_values = random_values<uint8_t>(num_rows);
auto uint16_values = random_values<uint16_t>(num_rows);
auto uint32_values = random_values<uint32_t>(num_rows);
auto uint64_values = random_values<uint64_t>(num_rows);
auto float32_values = random_values<float>(num_rows);
auto float64_values = random_values<double>(num_rows);
auto filepath = temp_env->get_temp_dir() + "MultiColumn.csv";
{
std::ostringstream line;
for (int i = 0; i < num_rows; ++i) {
line << std::to_string(int8_values[i]) << "," << int16_values[i] << "," << int32_values[i]
<< "," << int64_values[i] << "," << std::to_string(uint8_values[i]) << ","
<< uint16_values[i] << "," << uint32_values[i] << "," << uint64_values[i] << ","
<< float32_values[i] << "," << float64_values[i] << "\n";
}
std::ofstream outfile(filepath, std::ofstream::out);
outfile << line.str();
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.header(-1)
.dtypes({dtype<int8_t>(),
dtype<int16_t>(),
dtype<int32_t>(),
dtype<int64_t>(),
dtype<uint8_t>(),
dtype<uint16_t>(),
dtype<uint32_t>(),
dtype<uint64_t>(),
dtype<float>(),
dtype<double>()});
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
expect_column_data_equal(int8_values, view.column(0));
expect_column_data_equal(int16_values, view.column(1));
expect_column_data_equal(int32_values, view.column(2));
expect_column_data_equal(int64_values, view.column(3));
expect_column_data_equal(uint8_values, view.column(4));
expect_column_data_equal(uint16_values, view.column(5));
expect_column_data_equal(uint32_values, view.column(6));
expect_column_data_equal(uint64_values, view.column(7));
expect_column_data_equal(float32_values, view.column(8));
expect_column_data_equal(float64_values, view.column(9));
}
TEST_F(CsvReaderTest, RepeatColumn)
{
constexpr auto num_rows = 10;
auto int16_values = random_values<int16_t>(num_rows);
auto int64_values = random_values<int64_t>(num_rows);
auto uint64_values = random_values<uint64_t>(num_rows);
auto float32_values = random_values<float>(num_rows);
auto filepath = temp_env->get_temp_dir() + "RepeatColumn.csv";
{
std::ostringstream line;
for (int i = 0; i < num_rows; ++i) {
line << int16_values[i] << "," << int64_values[i] << "," << uint64_values[i] << ","
<< float32_values[i] << "\n";
}
std::ofstream outfile(filepath, std::ofstream::out);
outfile << line.str();
}
// repeats column in indexes and names, misses 1 column.
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<int16_t>(), dtype<int64_t>(), dtype<uint64_t>(), dtype<float>()})
.names({"A", "B", "C", "D"})
.use_cols_indexes({1, 0, 0})
.use_cols_names({"D", "B", "B"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(3, view.num_columns());
expect_column_data_equal(int16_values, view.column(0));
expect_column_data_equal(int64_values, view.column(1));
expect_column_data_equal(float32_values, view.column(2));
}
TEST_F(CsvReaderTest, Booleans)
{
auto filepath = temp_env->get_temp_dir() + "Booleans.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "YES,1,bar,true\nno,2,FOO,true\nBar,3,yes,false\nNo,4,NO,"
"true\nYes,5,foo,false\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A", "B", "C", "D"})
.dtypes({dtype<int32_t>(), dtype<int32_t>(), dtype<int16_t>(), dtype<bool>()})
.true_values({"yes", "Yes", "YES", "foo", "FOO"})
.false_values({"no", "No", "NO", "Bar", "bar"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
// Booleans are the same (integer) data type, but valued at 0 or 1
auto const view = result.tbl->view();
EXPECT_EQ(4, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
ASSERT_EQ(type_id::INT32, view.column(1).type().id());
ASSERT_EQ(type_id::INT16, view.column(2).type().id());
ASSERT_EQ(type_id::BOOL8, view.column(3).type().id());
expect_column_data_equal(std::vector<int32_t>{1, 0, 0, 0, 1}, view.column(0));
expect_column_data_equal(std::vector<int16_t>{0, 1, 1, 0, 1}, view.column(2));
expect_column_data_equal(std::vector<bool>{true, true, false, true, false}, view.column(3));
}
TEST_F(CsvReaderTest, Dates)
{
auto filepath = temp_env->get_temp_dir() + "Dates.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n";
outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n";
outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MILLISECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_MILLISECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
expect_column_data_equal(std::vector<cudf::timestamp_ms>{cudf::timestamp_ms{983750400000ms},
cudf::timestamp_ms{1288483200000ms},
cudf::timestamp_ms{782611200000ms},
cudf::timestamp_ms{656208000000ms},
cudf::timestamp_ms{0ms},
cudf::timestamp_ms{798163200000ms},
cudf::timestamp_ms{774144000000ms},
cudf::timestamp_ms{1149679230400ms},
cudf::timestamp_ms{1126875750400ms},
cudf::timestamp_ms{2764800000ms}},
view.column(0));
}
TEST_F(CsvReaderTest, DatesCastToTimestampSeconds)
{
auto filepath = temp_env->get_temp_dir() + "DatesCastToTimestampS.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n";
outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n";
outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_SECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_SECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
expect_column_data_equal(std::vector<cudf::timestamp_s>{cudf::timestamp_s{983750400s},
cudf::timestamp_s{1288483200s},
cudf::timestamp_s{782611200s},
cudf::timestamp_s{656208000s},
cudf::timestamp_s{0s},
cudf::timestamp_s{798163200s},
cudf::timestamp_s{774144000s},
cudf::timestamp_s{1149679230s},
cudf::timestamp_s{1126875750s},
cudf::timestamp_s{2764800s}},
view.column(0));
}
TEST_F(CsvReaderTest, DatesCastToTimestampMilliSeconds)
{
auto filepath = temp_env->get_temp_dir() + "DatesCastToTimestampMs.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n";
outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n";
outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MILLISECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_MILLISECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
expect_column_data_equal(std::vector<cudf::timestamp_ms>{cudf::timestamp_ms{983750400000ms},
cudf::timestamp_ms{1288483200000ms},
cudf::timestamp_ms{782611200000ms},
cudf::timestamp_ms{656208000000ms},
cudf::timestamp_ms{0ms},
cudf::timestamp_ms{798163200000ms},
cudf::timestamp_ms{774144000000ms},
cudf::timestamp_ms{1149679230400ms},
cudf::timestamp_ms{1126875750400ms},
cudf::timestamp_ms{2764800000ms}},
view.column(0));
}
TEST_F(CsvReaderTest, DatesCastToTimestampMicroSeconds)
{
auto filepath = temp_env->get_temp_dir() + "DatesCastToTimestampUs.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n";
outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n";
outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MICROSECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_MICROSECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
expect_column_data_equal(std::vector<cudf::timestamp_us>{cudf::timestamp_us{983750400000000us},
cudf::timestamp_us{1288483200000000us},
cudf::timestamp_us{782611200000000us},
cudf::timestamp_us{656208000000000us},
cudf::timestamp_us{0us},
cudf::timestamp_us{798163200000000us},
cudf::timestamp_us{774144000000000us},
cudf::timestamp_us{1149679230400000us},
cudf::timestamp_us{1126875750400000us},
cudf::timestamp_us{2764800000000us}},
view.column(0));
}
TEST_F(CsvReaderTest, DatesCastToTimestampNanoSeconds)
{
auto filepath = temp_env->get_temp_dir() + "DatesCastToTimestampNs.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n";
outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n";
outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_NANOSECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_NANOSECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
expect_column_data_equal(
std::vector<cudf::timestamp_ns>{cudf::timestamp_ns{983750400000000000ns},
cudf::timestamp_ns{1288483200000000000ns},
cudf::timestamp_ns{782611200000000000ns},
cudf::timestamp_ns{656208000000000000ns},
cudf::timestamp_ns{0ns},
cudf::timestamp_ns{798163200000000000ns},
cudf::timestamp_ns{774144000000000000ns},
cudf::timestamp_ns{1149679230400000000ns},
cudf::timestamp_ns{1126875750400000000ns},
cudf::timestamp_ns{2764800000000000ns}},
view.column(0));
}
TEST_F(CsvReaderTest, IntegersCastToTimestampSeconds)
{
auto filepath = temp_env->get_temp_dir() + "IntegersCastToTimestampS.csv";
std::vector<int64_t> input_vals{1, 10, 111, 2, 11, 112, 3, 12, 113, 43432423, 13342, 13243214};
auto expected_column =
column_wrapper<cudf::timestamp_s, cudf::timestamp_s::rep>(input_vals.begin(), input_vals.end());
{
std::ofstream outfile(filepath, std::ofstream::out);
for (auto v : input_vals) {
outfile << v << "\n";
}
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_SECONDS}})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_SECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(expected_column, view.column(0));
}
TEST_F(CsvReaderTest, IntegersCastToTimestampMilliSeconds)
{
auto filepath = temp_env->get_temp_dir() + "IntegersCastToTimestampMs.csv";
std::vector<int64_t> input_vals{1, 10, 111, 2, 11, 112, 3, 12, 113, 43432423, 13342, 13243214};
auto expected_column = column_wrapper<cudf::timestamp_ms, cudf::timestamp_ms::rep>(
input_vals.begin(), input_vals.end());
{
std::ofstream outfile(filepath, std::ofstream::out);
for (auto v : input_vals) {
outfile << v << "\n";
}
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MILLISECONDS}})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_MILLISECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(expected_column, view.column(0));
}
TEST_F(CsvReaderTest, IntegersCastToTimestampMicroSeconds)
{
auto filepath = temp_env->get_temp_dir() + "IntegersCastToTimestampUs.csv";
std::vector<int64_t> input_vals{1, 10, 111, 2, 11, 112, 3, 12, 113, 43432423, 13342, 13243214};
auto expected_column = column_wrapper<cudf::timestamp_us, cudf::timestamp_us::rep>(
input_vals.begin(), input_vals.end());
{
std::ofstream outfile(filepath, std::ofstream::out);
for (auto v : input_vals) {
outfile << v << "\n";
}
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MICROSECONDS}})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_MICROSECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(expected_column, view.column(0));
}
TEST_F(CsvReaderTest, IntegersCastToTimestampNanoSeconds)
{
auto filepath = temp_env->get_temp_dir() + "IntegersCastToTimestampNs.csv";
std::vector<int64_t> input_vals{1, 10, 111, 2, 11, 112, 3, 12, 113, 43432423, 13342, 13243214};
auto expected_column = column_wrapper<cudf::timestamp_ns, cudf::timestamp_ns::rep>(
input_vals.begin(), input_vals.end());
{
std::ofstream outfile(filepath, std::ofstream::out);
for (auto v : input_vals) {
outfile << v << "\n";
}
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_NANOSECONDS}})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::TIMESTAMP_NANOSECONDS, view.column(0).type().id());
using namespace cuda::std::chrono_literals;
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(expected_column, view.column(0));
}
TEST_F(CsvReaderTest, FloatingPoint)
{
auto filepath = temp_env->get_temp_dir() + "FloatingPoint.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "5.6;0.5679e2;1.2e10;0.07e1;3000e-3;12.34e0;3.1e-001;-73."
"98007199999998;";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<float>()})
.lineterminator(';')
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::FLOAT32, view.column(0).type().id());
auto const ref_vals =
std::vector<float>{5.6, 56.79, 12000000000, 0.7, 3.000, 12.34, 0.31, -73.98007199999998};
expect_column_data_equal(ref_vals, view.column(0));
auto const bitmask = cudf::test::bitmask_to_host(view.column(0));
ASSERT_EQ((1u << ref_vals.size()) - 1, bitmask[0]);
}
TEST_F(CsvReaderTest, Strings)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "Strings.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << names[0] << ',' << names[1] << '\n';
outfile << "10,abc def ghi" << '\n';
outfile << "20,\"jkl mno pqr\"" << '\n';
outfile << "30,stu \"\"vwx\"\" yz" << '\n';
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names(names)
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()})
.quoting(cudf::io::quote_style::NONE);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(2, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
ASSERT_EQ(type_id::STRING, view.column(1).type().id());
expect_column_data_equal(
std::vector<std::string>{"abc def ghi", "\"jkl mno pqr\"", "stu \"\"vwx\"\" yz"},
view.column(1));
}
TEST_F(CsvReaderTest, StringsQuotes)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "StringsQuotes.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << names[0] << ',' << names[1] << '\n';
outfile << "10,`abc,\ndef, ghi`" << '\n';
outfile << "20,`jkl, ``mno``, pqr`" << '\n';
outfile << "30,stu `vwx` yz" << '\n';
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names(names)
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()})
.quotechar('`');
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(2, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
ASSERT_EQ(type_id::STRING, view.column(1).type().id());
expect_column_data_equal(
std::vector<std::string>{"abc,\ndef, ghi", "jkl, `mno`, pqr", "stu `vwx` yz"}, view.column(1));
}
TEST_F(CsvReaderTest, StringsQuotesIgnored)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "StringsQuotesIgnored.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << names[0] << ',' << names[1] << '\n';
outfile << "10,\"abcdef ghi\"" << '\n';
outfile << "20,\"jkl \"\"mno\"\" pqr\"" << '\n';
outfile << "30,stu \"vwx\" yz" << '\n';
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names(names)
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()})
.quoting(cudf::io::quote_style::NONE)
.doublequote(false);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(2, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
ASSERT_EQ(type_id::STRING, view.column(1).type().id());
expect_column_data_equal(
std::vector<std::string>{"\"abcdef ghi\"", "\"jkl \"\"mno\"\" pqr\"", "stu \"vwx\" yz"},
view.column(1));
}
TEST_F(CsvReaderTest, SkiprowsNrows)
{
auto filepath = temp_env->get_temp_dir() + "SkiprowsNrows.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "1\n2\n3\n4\n5\n6\n7\n8\n9\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<int32_t>()})
.header(1)
.skiprows(2)
.nrows(2);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
expect_column_data_equal(std::vector<int32_t>{5, 6}, view.column(0));
}
TEST_F(CsvReaderTest, ByteRange)
{
auto filepath = temp_env->get_temp_dir() + "ByteRange.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "1000\n2000\n3000\n4000\n5000\n6000\n7000\n8000\n9000\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<int32_t>()})
.header(-1)
.byte_range_offset(11)
.byte_range_size(15);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
expect_column_data_equal(std::vector<int32_t>{4000, 5000, 6000}, view.column(0));
}
TEST_F(CsvReaderTest, ByteRangeStrings)
{
std::string input = "\"a\"\n\"b\"\n\"c\"";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{input.c_str(), input.size()})
.names({"A"})
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.byte_range_offset(4);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::STRING, view.column(0).type().id());
expect_column_data_equal(std::vector<std::string>{"c"}, view.column(0));
}
TEST_F(CsvReaderTest, BlanksAndComments)
{
auto filepath = temp_env->get_temp_dir() + "BlanksAndComments.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "1\n#blank\n3\n4\n5\n#blank\n\n\n8\n9\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<int32_t>()})
.header(-1)
.comment('#');
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::INT32, view.column(0).type().id());
expect_column_data_equal(std::vector<int32_t>{1, 3, 4, 5, 8, 9}, view.column(0));
}
TEST_F(CsvReaderTest, EmptyFile)
{
auto filepath = temp_env->get_temp_dir() + "EmptyFile.csv";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_columns());
}
TEST_F(CsvReaderTest, NoDataFile)
{
auto filepath = temp_env->get_temp_dir() + "NoDataFile.csv";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "\n\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_columns());
}
TEST_F(CsvReaderTest, HeaderOnlyFile)
{
auto filepath = temp_env->get_temp_dir() + "HeaderOnlyFile.csv";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "\"a\",\"b\",\"c\"\n\n";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_rows());
EXPECT_EQ(3, view.num_columns());
}
TEST_F(CsvReaderTest, ArrowFileSource)
{
auto filepath = temp_env->get_temp_dir() + "ArrowFileSource.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "A\n9\n8\n7\n6\n5\n4\n3\n2\n";
}
std::shared_ptr<arrow::io::ReadableFile> infile;
ASSERT_TRUE(arrow::io::ReadableFile::Open(filepath).Value(&infile).ok());
auto arrow_source = cudf::io::arrow_io_source{infile};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{&arrow_source})
.dtypes({dtype<int8_t>()});
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::INT8, view.column(0).type().id());
expect_column_data_equal(std::vector<int8_t>{9, 8, 7, 6, 5, 4, 3, 2}, view.column(0));
}
TEST_F(CsvReaderTest, InvalidFloatingPoint)
{
auto const filepath = temp_env->get_temp_dir() + "InvalidFloatingPoint.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "1.2e1+\n3.4e2-\n5.6e3e\n7.8e3A\n9.0Be1\n1C.2";
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<float>()})
.header(-1);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
EXPECT_EQ(1, view.num_columns());
ASSERT_EQ(type_id::FLOAT32, view.column(0).type().id());
// ignore all data because it is all nulls.
ASSERT_EQ(6u, result.tbl->view().column(0).null_count());
}
TEST_F(CsvReaderTest, StringInference)
{
std::string buffer = "\"-1\"\n";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1);
auto const result = cudf::io::read_csv(in_opts);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), type_id::STRING);
}
TEST_F(CsvReaderTest, TypeInferenceThousands)
{
std::string buffer = "1`400,123,1`234.56\n123`456,123456,12.34";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1)
.thousands('`');
auto const result = cudf::io::read_csv(in_opts);
auto const result_view = result.tbl->view();
EXPECT_EQ(result_view.num_columns(), 3);
EXPECT_EQ(result_view.column(0).type().id(), type_id::INT64);
EXPECT_EQ(result_view.column(1).type().id(), type_id::INT64);
EXPECT_EQ(result_view.column(2).type().id(), type_id::FLOAT64);
auto tsnd_sep_col = std::vector<int64_t>{1400L, 123456L};
auto int_col = std::vector<int64_t>{123L, 123456L};
auto dbl_col = std::vector<double>{1234.56, 12.34};
expect_column_data_equal(tsnd_sep_col, result_view.column(0));
expect_column_data_equal(int_col, result_view.column(1));
expect_column_data_equal(dbl_col, result_view.column(2));
}
TEST_F(CsvReaderTest, TypeInferenceWithDecimal)
{
// Given that thousands:'`' and decimal(';'), we expect:
// col#0 => INT64 (column contains only digits & thousands sep)
// col#1 => STRING (contains digits and period character, which is NOT the decimal point here)
// col#2 => FLOAT64 (column contains digits and decimal point (i.e., ';'))
std::string buffer = "1`400,1.23,1`234;56\n123`456,123.456,12;34";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1)
.thousands('`')
.decimal(';');
auto const result = cudf::io::read_csv(in_opts);
auto const result_view = result.tbl->view();
EXPECT_EQ(result_view.num_columns(), 3);
EXPECT_EQ(result_view.column(0).type().id(), type_id::INT64);
EXPECT_EQ(result_view.column(1).type().id(), type_id::STRING);
EXPECT_EQ(result_view.column(2).type().id(), type_id::FLOAT64);
auto int_col = std::vector<int64_t>{1400L, 123456L};
auto str_col = std::vector<std::string>{"1.23", "123.456"};
auto dbl_col = std::vector<double>{1234.56, 12.34};
expect_column_data_equal(int_col, result_view.column(0));
expect_column_data_equal(str_col, result_view.column(1));
expect_column_data_equal(dbl_col, result_view.column(2));
}
TEST_F(CsvReaderTest, SkipRowsXorSkipFooter)
{
std::string buffer = "1,2,3";
cudf::io::csv_reader_options skiprows_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1)
.skiprows(1);
EXPECT_NO_THROW(cudf::io::read_csv(skiprows_options));
cudf::io::csv_reader_options skipfooter_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1)
.skipfooter(1);
EXPECT_NO_THROW(cudf::io::read_csv(skipfooter_options));
}
TEST_F(CsvReaderTest, nullHandling)
{
auto const filepath = temp_env->get_temp_dir() + "NullValues.csv";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "NULL\n\nnull\nn/a\nNull\nNA\nnan";
}
// Test disabling na_filter
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.na_filter(false)
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.skip_blank_lines(false);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
auto expect =
cudf::test::strings_column_wrapper({"NULL", "", "null", "n/a", "Null", "NA", "nan"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, view.column(0));
}
// Test enabling na_filter
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.skip_blank_lines(false);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
auto expect =
cudf::test::strings_column_wrapper({"NULL", "", "null", "n/a", "Null", "NA", "nan"},
{false, false, false, false, true, false, false});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, view.column(0));
}
// Setting na_values with default values
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.na_values({"Null"})
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.skip_blank_lines(false);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
auto expect =
cudf::test::strings_column_wrapper({"NULL", "", "null", "n/a", "Null", "NA", "nan"},
{false, false, false, false, false, false, false});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, view.column(0));
}
// Setting na_values without default values
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.keep_default_na(false)
.na_values({"Null"})
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.skip_blank_lines(false);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
auto expect =
cudf::test::strings_column_wrapper({"NULL", "", "null", "n/a", "Null", "NA", "nan"},
{true, true, true, true, false, true, true, true});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, view.column(0));
}
// Filter enabled, but no NA values
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.keep_default_na(false)
.dtypes({dtype<cudf::string_view>()})
.header(-1)
.skip_blank_lines(false);
auto const result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
auto expect =
cudf::test::strings_column_wrapper({"NULL", "", "null", "n/a", "Null", "NA", "nan"});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expect, view.column(0));
}
}
TEST_F(CsvReaderTest, FailCases)
{
std::string buffer = "1,2,3";
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_offset(4)
.skiprows(1),
std::invalid_argument);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_offset(4)
.skipfooter(1),
std::invalid_argument);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_offset(4)
.nrows(1),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_size(4)
.skiprows(1),
std::invalid_argument);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_size(4)
.skipfooter(1),
std::invalid_argument);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.byte_range_size(4)
.nrows(1),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.skiprows(1)
.byte_range_offset(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.skipfooter(1)
.byte_range_offset(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.nrows(1)
.byte_range_offset(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.skiprows(1)
.byte_range_size(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.skipfooter(1)
.byte_range_size(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.nrows(1)
.byte_range_size(4),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.nrows(1)
.skipfooter(1),
std::invalid_argument);
;
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.skipfooter(1)
.nrows(1),
cudf::logic_error);
}
{
EXPECT_THROW(
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.na_filter(false)
.na_values({"Null"}),
cudf::logic_error);
}
}
TEST_F(CsvReaderTest, HexTest)
{
auto filepath = temp_env->get_temp_filepath("Hexadecimal.csv");
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << "0x0\n-0x1000\n0xfedcba\n0xABCDEF\n0xaBcDeF\n9512c20b\n";
}
// specify hex columns by name
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<int64_t>()})
.header(-1)
.parse_hex({"A"});
auto result = cudf::io::read_csv(in_opts);
expect_column_data_equal(
std::vector<int64_t>{0, -4096, 16702650, 11259375, 11259375, 2501034507},
result.tbl->view().column(0));
}
// specify hex columns by index
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<int64_t>()})
.header(-1)
.parse_hex(std::vector<int>{0});
auto result = cudf::io::read_csv(in_opts);
expect_column_data_equal(
std::vector<int64_t>{0, -4096, 16702650, 11259375, 11259375, 2501034507},
result.tbl->view().column(0));
}
}
TYPED_TEST(CsvReaderNumericTypeTest, SingleColumnWithWriter)
{
constexpr auto num_rows = 10;
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return static_cast<TypeParam>(i + 1000.50f); });
auto input_column = column_wrapper<TypeParam>(sequence, sequence + num_rows);
auto input_table = cudf::table_view{std::vector<cudf::column_view>{input_column}};
auto filepath = temp_env->get_temp_filepath("SingleColumnWithWriter.csv");
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath}).header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, result_table);
}
TEST_F(CsvReaderTest, MultiColumnWithWriter)
{
constexpr auto num_rows = 10;
auto int8_column = []() {
auto values = random_values<int8_t>(num_rows);
return column_wrapper<int8_t>(values.begin(), values.end());
}();
auto int16_column = []() {
auto values = random_values<int16_t>(num_rows);
return column_wrapper<int16_t>(values.begin(), values.end());
}();
auto int32_column = []() {
auto values = random_values<int32_t>(num_rows);
return column_wrapper<int32_t>(values.begin(), values.end());
}();
auto int64_column = []() {
auto values = random_values<int64_t>(num_rows);
return column_wrapper<int64_t>(values.begin(), values.end());
}();
auto uint8_column = []() {
auto values = random_values<uint8_t>(num_rows);
return column_wrapper<uint8_t>(values.begin(), values.end());
}();
auto uint16_column = []() {
auto values = random_values<uint16_t>(num_rows);
return column_wrapper<uint16_t>(values.begin(), values.end());
}();
auto uint32_column = []() {
auto values = random_values<uint32_t>(num_rows);
return column_wrapper<uint32_t>(values.begin(), values.end());
}();
auto uint64_column = []() {
auto values = random_values<uint64_t>(num_rows);
return column_wrapper<uint64_t>(values.begin(), values.end());
}();
auto float32_column = []() {
auto values = random_values<float>(num_rows);
return column_wrapper<float>(values.begin(), values.end());
}();
auto float64_column = []() {
auto values = random_values<double>(num_rows);
return column_wrapper<double>(values.begin(), values.end());
}();
std::vector<cudf::column_view> input_columns{int8_column,
int16_column,
int32_column,
int64_column,
uint8_column,
uint16_column,
uint32_column,
uint64_column,
float32_column,
float64_column};
cudf::table_view input_table{input_columns};
auto filepath = temp_env->get_temp_dir() + "MultiColumnWithWriter.csv";
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.header(-1)
.dtypes({dtype<int8_t>(),
dtype<int16_t>(),
dtype<int32_t>(),
dtype<int64_t>(),
dtype<uint8_t>(),
dtype<uint16_t>(),
dtype<uint32_t>(),
dtype<uint64_t>(),
dtype<float>(),
dtype<double>()});
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
std::vector<cudf::size_type> non_float64s{0, 1, 2, 3, 4, 5, 6, 7, 8};
auto const input_sliced_view = input_table.select(non_float64s);
auto const result_sliced_view = result_table.select(non_float64s);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_sliced_view, result_sliced_view);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
double tol{1.0e-6};
auto float64_col_idx = non_float64s.size();
check_float_column(
input_table.column(float64_col_idx), result_table.column(float64_col_idx), tol, validity);
}
TEST_F(CsvReaderTest, DatesWithWriter)
{
auto filepath = temp_env->get_temp_dir() + "DatesWithWriter.csv";
auto input_column = column_wrapper<cudf::timestamp_ms, cudf::timestamp_ms::rep>{983750400000,
1288483200000,
782611200000,
656208000000,
0L,
798163200000,
774144000000,
1149679230400,
1126875750400,
2764800000};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
// TODO need to add a dayfirst flag?
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({data_type{type_id::TIMESTAMP_MILLISECONDS}})
.dayfirst(true)
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
check_timestamp_column(input_table.column(0), result_table.column(0));
}
TEST_F(CsvReaderTest, DatesStringWithWriter)
{
{
auto filepath = temp_env->get_temp_dir() + "DatesStringWithWriter_D.csv";
auto input_column = column_wrapper<cudf::timestamp_D, cudf::timestamp_D::rep>{-106751, 106751};
auto expected_column = column_wrapper<cudf::string_view>{"1677-09-22", "2262-04-11"};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_column, result_table.column(0));
}
{
auto filepath = temp_env->get_temp_dir() + "DatesStringWithWriter_s.csv";
auto input_column =
column_wrapper<cudf::timestamp_s, cudf::timestamp_s::rep>{-9223372036, 9223372036};
auto expected_column =
column_wrapper<cudf::string_view>{"1677-09-21T00:12:44Z", "2262-04-11T23:47:16Z"};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_column, result_table.column(0));
}
{
auto filepath = temp_env->get_temp_dir() + "DatesStringWithWriter_ms.csv";
auto input_column =
column_wrapper<cudf::timestamp_ms, cudf::timestamp_ms::rep>{-9223372036854, 9223372036854};
auto expected_column =
column_wrapper<cudf::string_view>{"1677-09-21T00:12:43.146Z", "2262-04-11T23:47:16.854Z"};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_column, result_table.column(0));
}
{
auto filepath = temp_env->get_temp_dir() + "DatesStringWithWriter_us.csv";
auto input_column = column_wrapper<cudf::timestamp_us, cudf::timestamp_us::rep>{
-9223372036854775, 9223372036854775};
auto cast_column = cudf::strings::from_timestamps(input_column, "%Y-%m-%dT%H:%M:%S.%fZ");
auto expected_column = column_wrapper<cudf::string_view>{"1677-09-21T00:12:43.145225Z",
"2262-04-11T23:47:16.854775Z"};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_column, result_table.column(0));
}
{
auto filepath = temp_env->get_temp_dir() + "DatesStringWithWriter_ns.csv";
auto input_column = column_wrapper<cudf::timestamp_ns, cudf::timestamp_ns::rep>{
-9223372036854775807, 9223372036854775807};
auto expected_column = column_wrapper<cudf::string_view>{"1677-09-21T00:12:43.145224193Z",
"2262-04-11T23:47:16.854775807Z"};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_column, result_table.column(0));
}
}
TEST_F(CsvReaderTest, FloatingPointWithWriter)
{
auto filepath = temp_env->get_temp_dir() + "FloatingPointWithWriter.csv";
auto input_column =
column_wrapper<double>{5.6, 56.79, 12000000000., 0.7, 3.000, 12.34, 0.31, -73.98007199999998};
cudf::table_view input_table(std::vector<cudf::column_view>{input_column});
// TODO add lineterminator=";"
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names({"A"})
.dtypes({dtype<double>()})
.header(-1);
// in_opts.lineterminator = ';';
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, result_table);
}
TEST_F(CsvReaderTest, StringsWithWriter)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "StringsWithWriter.csv";
auto int_column = column_wrapper<int32_t>{10, 20, 30};
auto string_column =
column_wrapper<cudf::string_view>{"abc def ghi", "\"jkl mno pqr\"", "stu \"\"vwx\"\" yz"};
cudf::table_view input_table(std::vector<cudf::column_view>{int_column, string_column});
// TODO add quoting style flag?
write_csv_helper(filepath, input_table, names);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()})
.quoting(cudf::io::quote_style::NONE);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_table.column(0), result_table.column(0));
check_string_column(input_table.column(1), result_table.column(1));
ASSERT_EQ(result.metadata.schema_info.size(), names.size());
for (auto i = 0ul; i < names.size(); ++i)
EXPECT_EQ(names[i], result.metadata.schema_info[i].name);
}
TEST_F(CsvReaderTest, StringsWithWriterSimple)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "StringsWithWriterSimple.csv";
auto int_column = column_wrapper<int32_t>{10, 20, 30};
auto string_column = column_wrapper<cudf::string_view>{"abc def ghi", "jkl mno pq", "stu vwx y"};
cudf::table_view input_table(std::vector<cudf::column_view>{int_column, string_column});
// TODO add quoting style flag?
write_csv_helper(filepath, input_table, names);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()})
.quoting(cudf::io::quote_style::NONE);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_table.column(0), result_table.column(0));
check_string_column(input_table.column(1), result_table.column(1));
ASSERT_EQ(result.metadata.schema_info.size(), names.size());
for (auto i = 0ul; i < names.size(); ++i)
EXPECT_EQ(names[i], result.metadata.schema_info[i].name);
}
TEST_F(CsvReaderTest, StringsEmbeddedDelimiter)
{
std::vector<std::string> names{"line", "verse"};
auto filepath = temp_env->get_temp_dir() + "StringsWithWriterSimple.csv";
auto int_column = column_wrapper<int32_t>{10, 20, 30};
auto string_column = column_wrapper<cudf::string_view>{"abc def ghi", "jkl,mno,pq", "stu vwx y"};
cudf::table_view input_table(std::vector<cudf::column_view>{int_column, string_column});
write_csv_helper(filepath, input_table, names);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()});
auto result = cudf::io::read_csv(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, result.tbl->view());
ASSERT_EQ(result.metadata.schema_info.size(), names.size());
for (auto i = 0ul; i < names.size(); ++i)
EXPECT_EQ(names[i], result.metadata.schema_info[i].name);
}
TEST_F(CsvReaderTest, HeaderEmbeddedDelimiter)
{
std::vector<std::string> names{
"header1", "header,2", "quote\"embedded", "new\nline", "\"quoted\""};
auto filepath = temp_env->get_temp_dir() + "HeaderEmbeddedDelimiter.csv";
auto int_column = column_wrapper<int32_t>{10, 20, 30};
auto string_column = column_wrapper<cudf::string_view>{"abc", "jkl,mno", "xyz"};
cudf::table_view input_table(
std::vector<cudf::column_view>{int_column, string_column, int_column, int_column, int_column});
write_csv_helper(filepath, input_table, names);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names(names)
.dtypes({dtype<int32_t>(),
dtype<cudf::string_view>(),
dtype<int32_t>(),
dtype<int32_t>(),
dtype<int32_t>()});
auto result = cudf::io::read_csv(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, result.tbl->view());
ASSERT_EQ(result.metadata.schema_info.size(), names.size());
for (auto i = 0ul; i < names.size(); ++i)
EXPECT_EQ(names[i], result.metadata.schema_info[i].name);
}
TEST_F(CsvReaderTest, EmptyFileWithWriter)
{
auto filepath = temp_env->get_temp_dir() + "EmptyFileWithWriter.csv";
cudf::table_view empty_table;
write_csv_helper(filepath, empty_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_csv(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(empty_table, result.tbl->view());
}
class TestSource : public cudf::io::datasource {
public:
std::string const str;
TestSource(std::string s) : str(std::move(s)) {}
std::unique_ptr<buffer> host_read(size_t offset, size_t size) override
{
size = std::min(size, str.size() - offset);
return std::make_unique<non_owning_buffer>((uint8_t*)str.data() + offset, size);
}
size_t host_read(size_t offset, size_t size, uint8_t* dst) override
{
auto const read_size = std::min(size, str.size() - offset);
memcpy(dst, str.data() + offset, size);
return read_size;
}
[[nodiscard]] size_t size() const override { return str.size(); }
};
TEST_F(CsvReaderTest, UserImplementedSource)
{
constexpr auto num_rows = 10;
auto int8_values = random_values<int8_t>(num_rows);
auto int16_values = random_values<int16_t>(num_rows);
auto int32_values = random_values<int32_t>(num_rows);
std::ostringstream csv_data;
for (int i = 0; i < num_rows; ++i) {
csv_data << std::to_string(int8_values[i]) << "," << int16_values[i] << "," << int32_values[i]
<< "\n";
}
TestSource source{csv_data.str()};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{&source})
.dtypes({dtype<int8_t>(), dtype<int16_t>(), dtype<int32_t>()})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
expect_column_data_equal(int8_values, view.column(0));
expect_column_data_equal(int16_values, view.column(1));
expect_column_data_equal(int32_values, view.column(2));
}
TEST_F(CsvReaderTest, DurationsWithWriter)
{
auto filepath = temp_env->get_temp_dir() + "DurationsWithWriter.csv";
constexpr long max_value_d = std::numeric_limits<cudf::duration_D::rep>::max();
constexpr long min_value_d = std::numeric_limits<cudf::duration_D::rep>::min();
constexpr long max_value_ns = std::numeric_limits<cudf::duration_s::rep>::max();
constexpr long min_value_ns = std::numeric_limits<cudf::duration_s::rep>::min();
column_wrapper<cudf::duration_D, cudf::duration_D::rep> durations_D{
{-86400L, -3600L, -2L, -1L, 0L, 1L, 2L, min_value_d, max_value_d}};
column_wrapper<cudf::duration_s, int64_t> durations_s{{-86400L,
-3600L,
-2L,
-1L,
0L,
1L,
2L,
min_value_ns / 1000000000 + 1,
max_value_ns / 1000000000}};
column_wrapper<cudf::duration_ms, int64_t> durations_ms{
{-86400L, -3600L, -2L, -1L, 0L, 1L, 2L, min_value_ns / 1000000 + 1, max_value_ns / 1000000}};
column_wrapper<cudf::duration_us, int64_t> durations_us{
{-86400L, -3600L, -2L, -1L, 0L, 1L, 2L, min_value_ns / 1000 + 1, max_value_ns / 1000}};
column_wrapper<cudf::duration_ns, int64_t> durations_ns{
{-86400L, -3600L, -2L, -1L, 0L, 1L, 2L, min_value_ns, max_value_ns}};
cudf::table_view input_table(std::vector<cudf::column_view>{
durations_D, durations_s, durations_ms, durations_us, durations_ns});
std::vector<std::string> names{"D", "s", "ms", "us", "ns"};
write_csv_helper(filepath, input_table, names);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.names(names)
.dtypes({data_type{type_id::DURATION_DAYS},
data_type{type_id::DURATION_SECONDS},
data_type{type_id::DURATION_MILLISECONDS},
data_type{type_id::DURATION_MICROSECONDS},
data_type{type_id::DURATION_NANOSECONDS}});
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, result_table);
ASSERT_EQ(result.metadata.schema_info.size(), names.size());
for (auto i = 0ul; i < names.size(); ++i)
EXPECT_EQ(names[i], result.metadata.schema_info[i].name);
}
TEST_F(CsvReaderTest, ParseInRangeIntegers)
{
std::vector<int64_t> small_int = {0, -10, 20, -30};
std::vector<int64_t> less_equal_int64_max = {std::numeric_limits<int64_t>::max() - 3,
std::numeric_limits<int64_t>::max() - 2,
std::numeric_limits<int64_t>::max() - 1,
std::numeric_limits<int64_t>::max()};
std::vector<int64_t> greater_equal_int64_min = {std::numeric_limits<int64_t>::min() + 3,
std::numeric_limits<int64_t>::min() + 2,
std::numeric_limits<int64_t>::min() + 1,
std::numeric_limits<int64_t>::min()};
std::vector<uint64_t> greater_int64_max = {uint64_t{std::numeric_limits<int64_t>::max()} - 1,
uint64_t{std::numeric_limits<int64_t>::max()},
uint64_t{std::numeric_limits<int64_t>::max()} + 1,
uint64_t{std::numeric_limits<int64_t>::max()} + 2};
std::vector<uint64_t> less_equal_uint64_max = {std::numeric_limits<uint64_t>::max() - 3,
std::numeric_limits<uint64_t>::max() - 2,
std::numeric_limits<uint64_t>::max() - 1,
std::numeric_limits<uint64_t>::max()};
auto input_small_int = column_wrapper<int64_t>(small_int.begin(), small_int.end());
auto input_less_equal_int64_max =
column_wrapper<int64_t>(less_equal_int64_max.begin(), less_equal_int64_max.end());
auto input_greater_equal_int64_min =
column_wrapper<int64_t>(greater_equal_int64_min.begin(), greater_equal_int64_min.end());
auto input_greater_int64_max =
column_wrapper<uint64_t>(greater_int64_max.begin(), greater_int64_max.end());
auto input_less_equal_uint64_max =
column_wrapper<uint64_t>(less_equal_uint64_max.begin(), less_equal_uint64_max.end());
auto small_int_append_zeros = prepend_zeros(small_int, 32, true);
auto less_equal_int64_max_append_zeros = prepend_zeros(less_equal_int64_max, 32, true);
auto greater_equal_int64_min_append_zeros = prepend_zeros(greater_equal_int64_min, 17);
auto greater_int64_max_append_zeros = prepend_zeros(greater_int64_max, 5);
auto less_equal_uint64_max_append_zeros = prepend_zeros(less_equal_uint64_max, 8, true);
auto input_small_int_append =
column_wrapper<cudf::string_view>(small_int_append_zeros.begin(), small_int_append_zeros.end());
auto input_less_equal_int64_max_append = column_wrapper<cudf::string_view>(
less_equal_int64_max_append_zeros.begin(), less_equal_int64_max_append_zeros.end());
auto input_greater_equal_int64_min_append = column_wrapper<cudf::string_view>(
greater_equal_int64_min_append_zeros.begin(), greater_equal_int64_min_append_zeros.end());
auto input_greater_int64_max_append = column_wrapper<cudf::string_view>(
greater_int64_max_append_zeros.begin(), greater_int64_max_append_zeros.end());
auto input_less_equal_uint64_max_append = column_wrapper<cudf::string_view>(
less_equal_uint64_max_append_zeros.begin(), less_equal_uint64_max_append_zeros.end());
std::vector<cudf::column_view> input_columns{input_small_int,
input_less_equal_int64_max,
input_greater_equal_int64_min,
input_greater_int64_max,
input_less_equal_uint64_max,
input_small_int_append,
input_less_equal_int64_max_append,
input_greater_equal_int64_min_append,
input_greater_int64_max_append,
input_less_equal_uint64_max_append};
cudf::table_view input_table{input_columns};
auto filepath = temp_env->get_temp_filepath("ParseInRangeIntegers.csv");
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath}).header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_small_int, view.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_int64_max, view.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_equal_int64_min, view.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_int64_max, view.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_uint64_max, view.column(4));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_small_int, view.column(5));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_int64_max, view.column(6));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_equal_int64_min, view.column(7));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_int64_max, view.column(8));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_uint64_max, view.column(9));
}
TEST_F(CsvReaderTest, ParseOutOfRangeIntegers)
{
std::vector<std::string> out_of_range_positive = {"111111111111111111111",
"2222222222222222222222",
"33333333333333333333333",
"444444444444444444444444"};
std::vector<std::string> out_of_range_negative = {"-111111111111111111111",
"-2222222222222222222222",
"-33333333333333333333333",
"-444444444444444444444444"};
std::vector<std::string> greater_uint64_max = {
"18446744073709551615", "18446744073709551616", "18446744073709551617", "18446744073709551618"};
std::vector<std::string> less_int64_min = {
"-9223372036854775807", "-9223372036854775808", "-9223372036854775809", "-9223372036854775810"};
std::vector<std::string> mixed_range = {
"18446744073709551613", "18446744073709551614", "18446744073709551615", "-5"};
auto input_out_of_range_positive =
column_wrapper<cudf::string_view>(out_of_range_positive.begin(), out_of_range_positive.end());
auto input_out_of_range_negative =
column_wrapper<cudf::string_view>(out_of_range_negative.begin(), out_of_range_negative.end());
auto input_greater_uint64_max =
column_wrapper<cudf::string_view>(greater_uint64_max.begin(), greater_uint64_max.end());
auto input_less_int64_min =
column_wrapper<cudf::string_view>(less_int64_min.begin(), less_int64_min.end());
auto input_mixed_range =
column_wrapper<cudf::string_view>(mixed_range.begin(), mixed_range.end());
auto out_of_range_positive_append_zeros = prepend_zeros(out_of_range_positive, 32, true);
auto out_of_range_negative_append_zeros = prepend_zeros(out_of_range_negative, 5);
auto greater_uint64_max_append_zeros = prepend_zeros(greater_uint64_max, 8, true);
auto less_int64_min_append_zeros = prepend_zeros(less_int64_min, 17);
auto mixed_range_append_zeros = prepend_zeros(mixed_range, 2, true);
auto input_out_of_range_positive_append = column_wrapper<cudf::string_view>(
out_of_range_positive_append_zeros.begin(), out_of_range_positive_append_zeros.end());
auto input_out_of_range_negative_append = column_wrapper<cudf::string_view>(
out_of_range_negative_append_zeros.begin(), out_of_range_negative_append_zeros.end());
auto input_greater_uint64_max_append = column_wrapper<cudf::string_view>(
greater_uint64_max_append_zeros.begin(), greater_uint64_max_append_zeros.end());
auto input_less_int64_min_append = column_wrapper<cudf::string_view>(
less_int64_min_append_zeros.begin(), less_int64_min_append_zeros.end());
auto input_mixed_range_append = column_wrapper<cudf::string_view>(
mixed_range_append_zeros.begin(), mixed_range_append_zeros.end());
std::vector<cudf::column_view> input_columns{input_out_of_range_positive,
input_out_of_range_negative,
input_greater_uint64_max,
input_less_int64_min,
input_mixed_range,
input_out_of_range_positive_append,
input_out_of_range_negative_append,
input_greater_uint64_max_append,
input_less_int64_min_append,
input_mixed_range_append};
cudf::table_view input_table{input_columns};
auto filepath = temp_env->get_temp_filepath("ParseOutOfRangeIntegers.csv");
write_csv_helper(filepath, input_table);
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath}).header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_positive, view.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_negative, view.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_uint64_max, view.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_int64_min, view.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_mixed_range, view.column(4));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_positive_append, view.column(5));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_negative_append, view.column(6));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_uint64_max_append, view.column(7));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_int64_min_append, view.column(8));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_mixed_range_append, view.column(9));
}
TEST_F(CsvReaderTest, ReadMaxNumericValue)
{
constexpr auto num_rows = 10;
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return std::numeric_limits<uint64_t>::max() - i; });
auto filepath = temp_env->get_temp_filepath("ReadMaxNumericValue.csv");
{
std::ofstream out_file{filepath, std::ofstream::out};
std::ostream_iterator<uint64_t> output_iterator(out_file, "\n");
std::copy(sequence, sequence + num_rows, output_iterator);
}
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath}).header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
expect_column_data_equal(std::vector<uint64_t>(sequence, sequence + num_rows), view.column(0));
}
TEST_F(CsvReaderTest, DefaultWriteChunkSize)
{
for (auto num_rows : {1, 20, 100, 1000}) {
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return static_cast<int32_t>(i + 1000.50f); });
auto input_column = column_wrapper<int32_t>(sequence, sequence + num_rows);
auto input_table = cudf::table_view{std::vector<cudf::column_view>{input_column}};
cudf::io::csv_writer_options opts =
cudf::io::csv_writer_options::builder(cudf::io::sink_info{"unused.path"}, input_table);
ASSERT_EQ(num_rows, opts.get_rows_per_chunk());
}
}
TEST_F(CsvReaderTest, DtypesMap)
{
std::string csv_in{"12,9\n34,8\n56,7"};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"A", "B"})
.dtypes({{"B", dtype<int16_t>()}, {"A", dtype<int32_t>()}})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 2);
ASSERT_EQ(result_table.column(0).type(), data_type{type_id::INT32});
ASSERT_EQ(result_table.column(1).type(), data_type{type_id::INT16});
expect_column_data_equal(std::vector<int32_t>{12, 34, 56}, result_table.column(0));
expect_column_data_equal(std::vector<int16_t>{9, 8, 7}, result_table.column(1));
}
TEST_F(CsvReaderTest, DtypesMapPartial)
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{nullptr, 0})
.names({"A", "B"})
.dtypes({{"A", dtype<int16_t>()}});
{
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
ASSERT_EQ(type_id::INT16, view.column(0).type().id());
// Default to String if there's no data
ASSERT_EQ(type_id::STRING, view.column(1).type().id());
}
in_opts.set_dtypes({{"B", dtype<uint32_t>()}});
{
auto result = cudf::io::read_csv(in_opts);
auto const view = result.tbl->view();
ASSERT_EQ(type_id::STRING, view.column(0).type().id());
ASSERT_EQ(type_id::UINT32, view.column(1).type().id());
}
}
TEST_F(CsvReaderTest, DtypesArrayInvalid)
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{nullptr, 0})
.names({"A", "B", "C"})
.dtypes(std::vector<cudf::data_type>{dtype<int16_t>(), dtype<int8_t>()});
EXPECT_THROW(cudf::io::read_csv(in_opts), cudf::logic_error);
}
TEST_F(CsvReaderTest, CsvDefaultOptionsWriteReadMatch)
{
auto const filepath = temp_env->get_temp_dir() + "issue.csv";
// make up some kind of dataframe
auto int_column = column_wrapper<int32_t>{10, 20, 30};
auto str_column = column_wrapper<cudf::string_view>{"abc", "mno", "xyz"};
cudf::table_view input_table(std::vector<cudf::column_view>{int_column, str_column});
// write that dataframe to a csv using default options to some temporary file
cudf::io::csv_writer_options writer_options =
cudf::io::csv_writer_options::builder(cudf::io::sink_info{filepath}, input_table);
cudf::io::write_csv(writer_options);
// read the temp csv file using default options
cudf::io::csv_reader_options read_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{filepath})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<cudf::string_view>()});
cudf::io::table_with_metadata new_table_and_metadata = cudf::io::read_csv(read_options);
// verify that the tables are identical, or as identical as expected.
auto const new_table_view = new_table_and_metadata.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(input_table, new_table_view);
EXPECT_EQ(new_table_and_metadata.metadata.schema_info[0].name, "0");
EXPECT_EQ(new_table_and_metadata.metadata.schema_info[1].name, "1");
}
TEST_F(CsvReaderTest, UseColsValidation)
{
const std::string buffer = "1,2,3";
const cudf::io::csv_reader_options idx_cnt_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.names({"a", "b"})
.use_cols_indexes({0});
EXPECT_THROW(cudf::io::read_csv(idx_cnt_options), cudf::logic_error);
cudf::io::csv_reader_options unique_idx_cnt_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.names({"a", "b"})
.use_cols_indexes({0, 0});
EXPECT_THROW(cudf::io::read_csv(unique_idx_cnt_options), cudf::logic_error);
cudf::io::csv_reader_options bad_name_options =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.names({"a", "b", "c"})
.use_cols_names({"nonexistent_name"});
EXPECT_THROW(cudf::io::read_csv(bad_name_options), cudf::logic_error);
}
TEST_F(CsvReaderTest, CropColumns)
{
const std::string csv_in{"12,9., 10\n34,8., 20\n56,7., 30"};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<float>()})
.names({"a", "b"})
.header(-1);
auto const result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 2);
ASSERT_EQ(result_table.column(0).type(), data_type{type_id::INT32});
ASSERT_EQ(result_table.column(1).type(), data_type{type_id::FLOAT32});
expect_column_data_equal(std::vector<int32_t>{12, 34, 56}, result_table.column(0));
expect_column_data_equal(std::vector<float>{9., 8., 7.}, result_table.column(1));
}
TEST_F(CsvReaderTest, CropColumnsUseColsNames)
{
std::string csv_in{"12,9., 10\n34,8., 20\n56,7., 30"};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<float>()})
.names({"a", "b"})
.use_cols_names({"b"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 1);
ASSERT_EQ(result_table.column(0).type(), data_type{type_id::FLOAT32});
expect_column_data_equal(std::vector<float>{9., 8., 7.}, result_table.column(0));
}
TEST_F(CsvReaderTest, ExtraColumns)
{
std::string csv_in{"12,9., 10\n34,8., 20\n56,7., 30"};
{
cudf::io::csv_reader_options opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"a", "b", "c", "d"})
.header(-1);
auto result = cudf::io::read_csv(opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 4);
ASSERT_EQ(result_table.column(3).type(), data_type{type_id::INT8});
ASSERT_EQ(result_table.column(3).null_count(), 3);
}
{
cudf::io::csv_reader_options with_dtypes_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"a", "b", "c", "d"})
.dtypes({dtype<int32_t>(), dtype<int32_t>(), dtype<int32_t>(), dtype<float>()})
.header(-1);
auto result = cudf::io::read_csv(with_dtypes_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 4);
ASSERT_EQ(result_table.column(3).type(), data_type{type_id::FLOAT32});
ASSERT_EQ(result_table.column(3).null_count(), 3);
}
}
TEST_F(CsvReaderTest, ExtraColumnsUseCols)
{
std::string csv_in{"12,9., 10\n34,8., 20\n56,7., 30"};
{
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"a", "b", "c", "d"})
.use_cols_names({"b", "d"})
.header(-1);
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 2);
ASSERT_EQ(result_table.column(1).type(), data_type{type_id::INT8});
ASSERT_EQ(result_table.column(1).null_count(), 3);
}
{
cudf::io::csv_reader_options with_dtypes_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"a", "b", "c", "d"})
.use_cols_names({"b", "d"})
.dtypes({dtype<int32_t>(), dtype<int32_t>(), dtype<int32_t>(), dtype<cudf::string_view>()})
.header(-1);
auto result = cudf::io::read_csv(with_dtypes_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 2);
ASSERT_EQ(result_table.column(1).type(), data_type{type_id::STRING});
ASSERT_EQ(result_table.column(1).null_count(), 3);
}
}
TEST_F(CsvReaderTest, EmptyColumns)
{
// First column only has empty fields. second column contains only "null" literals
std::string csv_in{",null\n,null"};
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.names({"a", "b", "c", "d"})
.header(-1);
// More elements in `names` than in the file; additional columns are filled with nulls
auto result = cudf::io::read_csv(in_opts);
auto const result_table = result.tbl->view();
EXPECT_EQ(result_table.num_columns(), 4);
// All columns should contain only nulls; expect INT8 type to use as little memory as possible
for (auto& column : result_table) {
EXPECT_EQ(column.type(), data_type{type_id::INT8});
EXPECT_EQ(column.null_count(), 2);
}
}
TEST_F(CsvReaderTest, BlankLineAfterFirstRow)
{
std::string csv_in{"12,9., 10\n\n"};
{
cudf::io::csv_reader_options no_header_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()})
.header(-1);
// No header, getting column names/count from first row
auto result = cudf::io::read_csv(no_header_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 3);
}
{
cudf::io::csv_reader_options header_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{csv_in.c_str(), csv_in.size()});
// Getting column names/count from header
auto result = cudf::io::read_csv(header_opts);
auto const result_table = result.tbl->view();
ASSERT_EQ(result_table.num_columns(), 3);
}
}
TEST_F(CsvReaderTest, NullCount)
{
std::string buffer = "0,,\n1,1.,\n2,,\n3,,\n4,4.,\n5,5.,\n6,6.,\n7,7.,\n";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.header(-1);
auto const result = cudf::io::read_csv(in_opts);
auto const result_view = result.tbl->view();
EXPECT_EQ(result_view.num_rows(), 8);
EXPECT_EQ(result_view.column(0).null_count(), 0);
EXPECT_EQ(result_view.column(1).null_count(), 3);
EXPECT_EQ(result_view.column(2).null_count(), 8);
}
TEST_F(CsvReaderTest, UTF8BOM)
{
std::string buffer = "\xEF\xBB\xBFMonth,Day,Year\nJune,6,2023\nAugust,25,1990\nMay,1,2000\n";
cudf::io::csv_reader_options in_opts =
cudf::io::csv_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()});
auto const result = cudf::io::read_csv(in_opts);
auto const result_view = result.tbl->view();
EXPECT_EQ(result_view.num_rows(), 3);
EXPECT_EQ(result.metadata.schema_info.front().name, "Month");
auto col1 = cudf::test::strings_column_wrapper({"June", "August", "May"});
auto col2 = cudf::test::fixed_width_column_wrapper<int64_t>({6, 25, 1});
auto col3 = cudf::test::fixed_width_column_wrapper<int64_t>({2023, 1990, 2000});
auto expected = cudf::table_view({col1, col2, col3});
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(result_view, expected);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/type_inference_test.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/string_parsing.hpp>
#include <io/utilities/trie.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf_test/base_fixture.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <cstddef>
#include <string>
#include <vector>
using cudf::io::parse_options;
using cudf::io::detail::infer_data_type;
// Base test fixture for tests
struct TypeInference : public cudf::test::BaseFixture {};
TEST_F(TypeInference, Basic)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([42,52,5])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 4, 7};
auto const string_length = std::vector<cudf::size_type>{2, 2, 1};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::INT64});
}
TEST_F(TypeInference, Null)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([52,5])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 1, 4};
auto const string_length = std::vector<cudf::size_type>{0, 2, 1};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::INT64});
}
TEST_F(TypeInference, AllNull)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([null])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 1, 1};
auto const string_length = std::vector<cudf::size_type>{0, 0, 4};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::INT8}); // INT8 if all nulls
}
TEST_F(TypeInference, String)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json(["1990","8","25"])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 8, 12};
auto const string_length = std::vector<cudf::size_type>{6, 3, 4};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::STRING});
}
TEST_F(TypeInference, Bool)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([true,false,false])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 6, 12};
auto const string_length = std::vector<cudf::size_type>{4, 5, 5};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::BOOL8});
}
TEST_F(TypeInference, Timestamp)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([1970/2/5,1970/8/25])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 10};
auto const string_length = std::vector<cudf::size_type>{8, 9};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
// All data time (quoted and unquoted) is inferred as string for now
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::STRING});
}
TEST_F(TypeInference, InvalidInput)
{
auto const stream = cudf::get_default_stream();
auto options = parse_options{',', '\n', '\"'};
options.trie_true = cudf::detail::create_serialized_trie({"true"}, stream);
options.trie_false = cudf::detail::create_serialized_trie({"false"}, stream);
options.trie_na = cudf::detail::create_serialized_trie({"", "null"}, stream);
std::string data = R"json([1,2,3,a,5])json";
auto d_data = cudf::make_string_scalar(data);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_data);
auto const string_offset = std::vector<cudf::size_type>{1, 3, 5, 7, 9};
auto const string_length = std::vector<cudf::size_type>{1, 1, 1, 1, 1};
auto const d_string_offset = cudf::detail::make_device_uvector_async(
string_offset, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const d_string_length = cudf::detail::make_device_uvector_async(
string_length, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto d_col_strings =
thrust::make_zip_iterator(thrust::make_tuple(d_string_offset.begin(), d_string_length.begin()));
auto res_type =
infer_data_type(options.json_view(),
{d_string_scalar.data(), static_cast<std::size_t>(d_string_scalar.size())},
d_col_strings,
string_offset.size(),
stream);
// Invalid input is inferred as string for now
EXPECT_EQ(res_type, cudf::data_type{cudf::type_id::STRING});
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/json_tree.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/json/nested_json.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <string>
#include <unordered_map>
namespace cuio_json = cudf::io::json;
// Host copy of tree_meta_t
struct tree_meta_t2 {
std::vector<cuio_json::NodeT> node_categories;
std::vector<cuio_json::NodeIndexT> parent_node_ids;
std::vector<cuio_json::TreeDepthT> node_levels;
std::vector<cuio_json::SymbolOffsetT> node_range_begin;
std::vector<cuio_json::SymbolOffsetT> node_range_end;
};
namespace {
std::string get_node_string(std::size_t const node_id,
tree_meta_t2 const& tree_rep,
std::string const& json_input)
{
auto node_to_str = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::NC_STRUCT: return "STRUCT";
case cuio_json::NC_LIST: return "LIST";
case cuio_json::NC_FN: return "FN";
case cuio_json::NC_STR: return "STR";
case cuio_json::NC_VAL: return "VAL";
case cuio_json::NC_ERR: return "ERR";
default: return "N/A";
};
};
return "<" + std::to_string(node_id) + ":" + node_to_str(tree_rep.node_categories[node_id]) +
":[" + std::to_string(tree_rep.node_range_begin[node_id]) + ", " +
std::to_string(tree_rep.node_range_end[node_id]) + ") '" +
json_input.substr(tree_rep.node_range_begin[node_id],
tree_rep.node_range_end[node_id] - tree_rep.node_range_begin[node_id]) +
"'>";
}
void print_tree_representation(std::string const& json_input, tree_meta_t2 const& tree_rep)
{
for (std::size_t i = 0; i < tree_rep.node_categories.size(); i++) {
auto parent_id = tree_rep.parent_node_ids[i];
std::stack<std::size_t> path;
path.push(i);
while (parent_id != cuio_json::parent_node_sentinel) {
path.push(parent_id);
parent_id = tree_rep.parent_node_ids[parent_id];
}
while (path.size()) {
auto const node_id = path.top();
std::cout << get_node_string(node_id, tree_rep, json_input)
<< (path.size() > 1 ? " -> " : "");
path.pop();
}
std::cout << "\n";
}
}
tree_meta_t2 to_cpu_tree(cuio_json::tree_meta_t const& d_value, rmm::cuda_stream_view stream)
{
return {cudf::detail::make_std_vector_async(d_value.node_categories, stream),
cudf::detail::make_std_vector_async(d_value.parent_node_ids, stream),
cudf::detail::make_std_vector_async(d_value.node_levels, stream),
cudf::detail::make_std_vector_async(d_value.node_range_begin, stream),
cudf::detail::make_std_vector_async(d_value.node_range_end, stream)};
}
// DEBUG prints
auto to_cat = [](auto v) -> std::string {
switch (v) {
case cuio_json::NC_STRUCT: return " S";
case cuio_json::NC_LIST: return " L";
case cuio_json::NC_STR: return " \"";
case cuio_json::NC_VAL: return " V";
case cuio_json::NC_FN: return " F";
case cuio_json::NC_ERR: return "ER";
default: return "UN";
};
};
auto to_int = [](auto v) { return std::to_string(static_cast<int>(v)); };
auto print_vec = [](auto const& cpu, auto const name, auto converter) {
for (auto const& v : cpu)
printf("%3s,", converter(v).c_str());
std::cout << name << std::endl;
};
void print_tree(tree_meta_t2 const& cpu_tree)
{
print_vec(cpu_tree.node_categories, "node_categories", to_cat);
print_vec(cpu_tree.parent_node_ids, "parent_node_ids", to_int);
print_vec(cpu_tree.node_levels, "node_levels", to_int);
print_vec(cpu_tree.node_range_begin, "node_range_begin", to_int);
print_vec(cpu_tree.node_range_end, "node_range_end", to_int);
}
void print_tree(cuio_json::tree_meta_t const& d_gpu_tree)
{
auto const cpu_tree = to_cpu_tree(d_gpu_tree, cudf::get_default_stream());
print_tree(cpu_tree);
}
template <typename T>
bool compare_vector(std::vector<T> const& cpu_vec,
std::vector<T> const& gpu_vec,
std::string const& name)
{
EXPECT_EQ(cpu_vec.size(), gpu_vec.size());
bool mismatch = false;
if (!std::equal(cpu_vec.begin(), cpu_vec.end(), gpu_vec.begin())) {
print_vec(cpu_vec, name + "(cpu)", to_int);
print_vec(gpu_vec, name + "(gpu)", to_int);
for (auto i = 0lu; i < cpu_vec.size(); i++) {
mismatch |= (cpu_vec[i] != gpu_vec[i]);
printf("%3s,", (cpu_vec[i] == gpu_vec[i] ? " " : "x"));
}
printf("\n");
}
EXPECT_FALSE(mismatch);
return mismatch;
}
template <typename T>
bool compare_vector(std::vector<T> const& cpu_vec,
rmm::device_uvector<T> const& d_vec,
std::string const& name)
{
auto gpu_vec = cudf::detail::make_std_vector_async(d_vec, cudf::get_default_stream());
return compare_vector(cpu_vec, gpu_vec, name);
}
void compare_trees(tree_meta_t2 const& cpu_tree,
cuio_json::tree_meta_t const& d_gpu_tree,
bool print = false)
{
auto cpu_num_nodes = cpu_tree.node_categories.size();
EXPECT_EQ(cpu_num_nodes, d_gpu_tree.node_categories.size());
EXPECT_EQ(cpu_num_nodes, d_gpu_tree.parent_node_ids.size());
EXPECT_EQ(cpu_num_nodes, d_gpu_tree.node_levels.size());
EXPECT_EQ(cpu_num_nodes, d_gpu_tree.node_range_begin.size());
EXPECT_EQ(cpu_num_nodes, d_gpu_tree.node_range_end.size());
auto gpu_tree = to_cpu_tree(d_gpu_tree, cudf::get_default_stream());
bool mismatch = false;
#define COMPARE_MEMBER(member) \
for (std::size_t i = 0; i < cpu_num_nodes; i++) { \
EXPECT_EQ(cpu_tree.member[i], gpu_tree.member[i]) << #member << "[" << i << "]"; \
}
COMPARE_MEMBER(node_categories);
COMPARE_MEMBER(parent_node_ids);
COMPARE_MEMBER(node_levels);
COMPARE_MEMBER(node_range_begin);
COMPARE_MEMBER(node_range_end);
#undef COMPARE_MEMBER
#define PRINT_VEC(vec, conv) print_vec(vec, #vec, conv);
#define PRINT_COMPARISON(vec, conv) \
PRINT_VEC(cpu_tree.vec, conv); \
PRINT_VEC(gpu_tree.vec, conv); \
if (!std::equal(cpu_tree.vec.begin(), cpu_tree.vec.end(), gpu_tree.vec.begin())) { \
for (auto i = 0lu; i < cpu_tree.vec.size(); i++) { \
mismatch |= (gpu_tree.vec[i] != cpu_tree.vec[i]); \
printf("%3s,", (gpu_tree.vec[i] == cpu_tree.vec[i] ? " " : "x")); \
} \
printf("\n"); \
}
if (print) {
for (int i = 0; i < int(cpu_num_nodes); i++)
printf("%3d,", i);
printf(" node_id\n");
PRINT_COMPARISON(node_categories, to_cat); // Works
PRINT_COMPARISON(node_levels, to_int); // Works
PRINT_COMPARISON(node_range_begin, to_int); // Works
PRINT_COMPARISON(node_range_end, to_int); // Works
PRINT_COMPARISON(parent_node_ids, to_int); // Works
EXPECT_FALSE(mismatch);
}
#undef PRINT_VEC
#undef PRINT_COMPARISON
}
template <typename T>
auto translate_col_id(T const& col_id)
{
using value_type = typename T::value_type;
std::unordered_map<value_type, value_type> col_id_map;
std::vector<value_type> new_col_ids(col_id.size());
value_type unique_id = 0;
for (auto id : col_id) {
if (col_id_map.count(id) == 0) { col_id_map[id] = unique_id++; }
}
for (size_t i = 0; i < col_id.size(); i++) {
new_col_ids[i] = col_id_map[col_id[i]];
}
return new_col_ids;
}
tree_meta_t2 get_tree_representation_cpu(
cudf::device_span<cuio_json::PdaTokenT const> tokens_gpu,
cudf::device_span<cuio_json::SymbolOffsetT const> token_indices_gpu1,
cudf::io::json_reader_options const& options,
rmm::cuda_stream_view stream)
{
constexpr bool include_quote_char = true;
// Copy the JSON tokens to the host
thrust::host_vector<cuio_json::PdaTokenT> tokens =
cudf::detail::make_host_vector_async(tokens_gpu, stream);
thrust::host_vector<cuio_json::SymbolOffsetT> token_indices =
cudf::detail::make_host_vector_async(token_indices_gpu1, stream);
// Make sure tokens have been copied to the host
stream.synchronize();
// DEBUG print
[[maybe_unused]] auto to_token_str = [](cuio_json::PdaTokenT token) {
switch (token) {
case cuio_json::token_t::StructBegin: return " {";
case cuio_json::token_t::StructEnd: return " }";
case cuio_json::token_t::ListBegin: return " [";
case cuio_json::token_t::ListEnd: return " ]";
case cuio_json::token_t::FieldNameBegin: return "FB";
case cuio_json::token_t::FieldNameEnd: return "FE";
case cuio_json::token_t::StringBegin: return "SB";
case cuio_json::token_t::StringEnd: return "SE";
case cuio_json::token_t::ErrorBegin: return "er";
case cuio_json::token_t::ValueBegin: return "VB";
case cuio_json::token_t::ValueEnd: return "VE";
case cuio_json::token_t::StructMemberBegin: return " <";
case cuio_json::token_t::StructMemberEnd: return " >";
case cuio_json::token_t::LineEnd: return ";";
default: return ".";
}
};
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
std::cout << "Tokens: \n";
for (auto i = 0u; i < tokens.size(); i++) {
std::cout << to_token_str(tokens[i]) << " ";
}
std::cout << std::endl;
}
// Whether a token does represent a node in the tree representation
auto is_node = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StructBegin:
case cuio_json::token_t::ListBegin:
case cuio_json::token_t::StringBegin:
case cuio_json::token_t::ValueBegin:
case cuio_json::token_t::FieldNameBegin:
case cuio_json::token_t::ErrorBegin: return true;
default: return false;
};
};
// The node that a token represents
auto token_to_node = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StructBegin: return cuio_json::NC_STRUCT;
case cuio_json::token_t::ListBegin: return cuio_json::NC_LIST;
case cuio_json::token_t::StringBegin: return cuio_json::NC_STR;
case cuio_json::token_t::ValueBegin: return cuio_json::NC_STR; // NC_VAL;
case cuio_json::token_t::FieldNameBegin: return cuio_json::NC_FN;
default: return cuio_json::NC_ERR;
};
};
// Includes quote char for end-of-string token or Skips the quote char for beginning-of-field-name
auto get_token_index = [include_quote_char](cuio_json::PdaTokenT const token,
cuio_json::SymbolOffsetT const token_index) {
constexpr cuio_json::SymbolOffsetT quote_char_size = 1;
switch (token) {
// Strip off or include quote char for StringBegin
case cuio_json::token_t::StringBegin:
return token_index + (include_quote_char ? 0 : quote_char_size);
// Strip off or Include trailing quote char for string values for StringEnd
case cuio_json::token_t::StringEnd:
return token_index + (include_quote_char ? quote_char_size : 0);
// Strip off quote char included for FieldNameBegin
case cuio_json::token_t::FieldNameBegin: return token_index + quote_char_size;
default: return token_index;
};
};
// Whether a token expects to be followed by its respective end-of-* token partner
auto is_begin_of_section = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StringBegin:
case cuio_json::token_t::ValueBegin:
case cuio_json::token_t::FieldNameBegin: return true;
default: return false;
};
};
// The end-of-* partner token for a given beginning-of-* token
auto end_of_partner = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StringBegin: return cuio_json::token_t::StringEnd;
case cuio_json::token_t::ValueBegin: return cuio_json::token_t::ValueEnd;
case cuio_json::token_t::FieldNameBegin: return cuio_json::token_t::FieldNameEnd;
default: return cuio_json::token_t::ErrorBegin;
};
};
// Whether the token pops from the parent node stack
auto does_pop = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StructEnd:
case cuio_json::token_t::ListEnd: return true;
default: return false;
};
};
// Whether the token pushes onto the parent node stack
auto does_push = [](cuio_json::PdaTokenT const token) {
switch (token) {
case cuio_json::token_t::StructBegin:
case cuio_json::token_t::ListBegin: return true;
default: return false;
};
};
// The node id sitting on top of the stack becomes the node's parent
// The full stack represents the path from the root to the current node
std::stack<std::pair<cuio_json::NodeIndexT, bool>> parent_stack;
constexpr bool field_name_node = true;
constexpr bool no_field_name_node = false;
std::vector<cuio_json::NodeT> node_categories;
std::vector<cuio_json::NodeIndexT> parent_node_ids;
std::vector<cuio_json::TreeDepthT> node_levels;
std::vector<cuio_json::SymbolOffsetT> node_range_begin;
std::vector<cuio_json::SymbolOffsetT> node_range_end;
std::size_t node_id = 0;
for (std::size_t i = 0; i < tokens.size(); i++) {
auto token = tokens[i];
// The section from the original JSON input that this token demarcates
std::size_t range_begin = get_token_index(token, token_indices[i]);
std::size_t range_end = range_begin + 1;
// Identify this node's parent node id
std::size_t parent_node_id =
(parent_stack.size() > 0) ? parent_stack.top().first : cuio_json::parent_node_sentinel;
// If this token is the beginning-of-{value, string, field name}, also consume the next end-of-*
// token
if (is_begin_of_section(token)) {
if ((i + 1) < tokens.size() && end_of_partner(token) == tokens[i + 1]) {
// Update the range_end for this pair of tokens
range_end = get_token_index(tokens[i + 1], token_indices[i + 1]);
// We can skip the subsequent end-of-* token
i++;
}
}
// Emit node if this token becomes a node in the tree
if (is_node(token)) {
node_categories.push_back(token_to_node(token));
parent_node_ids.push_back(parent_node_id);
node_levels.push_back(parent_stack.size());
node_range_begin.push_back(range_begin);
node_range_end.push_back(range_end);
}
// Modify the stack if needed
if (token == cuio_json::token_t::FieldNameBegin) {
parent_stack.push({node_id, field_name_node});
} else {
if (does_push(token)) {
parent_stack.push({node_id, no_field_name_node});
} else if (does_pop(token)) {
CUDF_EXPECTS(parent_stack.size() >= 1, "Invalid JSON input.");
parent_stack.pop();
}
// If what we're left with is a field name on top of stack, we need to pop it
if (parent_stack.size() >= 1 && parent_stack.top().second == field_name_node) {
parent_stack.pop();
}
}
// Update node_id
if (is_node(token)) { node_id++; }
}
return {std::move(node_categories),
std::move(parent_node_ids),
std::move(node_levels),
std::move(node_range_begin),
std::move(node_range_end)};
}
std::tuple<std::vector<cuio_json::NodeIndexT>, std::vector<cudf::size_type>>
records_orient_tree_traversal_cpu(cudf::host_span<cuio_json::SymbolT const> input,
tree_meta_t2 const& tree,
bool is_array_of_arrays,
bool is_enabled_lines,
rmm::cuda_stream_view stream)
{
std::vector<cuio_json::NodeIndexT> node_ids(tree.parent_node_ids.size());
std::iota(node_ids.begin(), node_ids.end(), 0);
const cuio_json::NodeIndexT row_array_children_level = is_enabled_lines ? 1 : 2;
std::unordered_map<cuio_json::NodeIndexT, cuio_json::NodeIndexT> list_indices;
if (is_array_of_arrays) {
cuio_json::NodeIndexT parent_node = -1, child_index = 0;
for (size_t i = 0; i < tree.node_levels.size(); i++) {
if (tree.node_levels[i] == row_array_children_level) {
if (tree.parent_node_ids[i] != parent_node) {
parent_node = tree.parent_node_ids[i];
child_index = 0;
}
list_indices[i] = child_index++;
}
}
}
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
for (int i = 0; i < int(tree.node_range_begin.size()); i++) {
printf("%3s ",
std::string(input.data() + tree.node_range_begin[i],
tree.node_range_end[i] - tree.node_range_begin[i])
.c_str());
}
printf(" (JSON)\n");
print_vec(tree.node_categories, "node_categories", to_cat);
print_vec(node_ids, "cpu.node_ids", to_int);
}
// print_vec(tree.parent_node_ids, "tree.parent_node_ids (before)");
constexpr cuio_json::NodeIndexT top_node = -1;
// CPU version of the algorithm
// Calculate row offsets too.
auto hash_path = [&](auto node_id) {
size_t seed = 0;
while (node_id != top_node) {
seed = cudf::hashing::detail::hash_combine(
seed, std::hash<cuio_json::TreeDepthT>{}(tree.node_levels[node_id]));
seed = cudf::hashing::detail::hash_combine(
seed, std::hash<cuio_json::NodeT>{}(tree.node_categories[node_id]));
if (tree.node_categories[node_id] == cuio_json::node_t::NC_FN) {
auto field_name =
std::string_view(input.data() + tree.node_range_begin[node_id],
tree.node_range_end[node_id] - tree.node_range_begin[node_id]);
seed = cudf::hashing::detail::hash_combine(seed, std::hash<std::string_view>{}(field_name));
}
if (is_array_of_arrays and tree.node_levels[node_id] == row_array_children_level)
seed = cudf::hashing::detail::hash_combine(seed, list_indices[node_id]);
node_id = tree.parent_node_ids[node_id];
}
return seed;
};
auto equal_path = [&](auto node_id1, auto node_id2) {
bool is_equal = true;
while (is_equal and node_id1 != top_node and node_id2 != top_node) {
is_equal &= tree.node_levels[node_id1] == tree.node_levels[node_id2];
is_equal &= tree.node_categories[node_id1] == tree.node_categories[node_id2];
if (is_equal and tree.node_categories[node_id1] == cuio_json::node_t::NC_FN) {
auto field_name1 =
std::string_view(input.data() + tree.node_range_begin[node_id1],
tree.node_range_end[node_id1] - tree.node_range_begin[node_id1]);
auto field_name2 =
std::string_view(input.data() + tree.node_range_begin[node_id2],
tree.node_range_end[node_id2] - tree.node_range_begin[node_id2]);
is_equal &= field_name1 == field_name2;
}
if (is_array_of_arrays and is_equal and
tree.node_levels[node_id1] == row_array_children_level) {
is_equal &= list_indices[node_id1] == list_indices[node_id2];
}
node_id1 = tree.parent_node_ids[node_id1];
node_id2 = tree.parent_node_ids[node_id2];
}
return is_equal and node_id1 == top_node and node_id2 == top_node;
};
std::unordered_map<cuio_json::NodeIndexT, int, decltype(hash_path), decltype(equal_path)>
node_id_map(10, hash_path, equal_path);
auto unique_col_id = 0;
for (auto& node_idx : node_ids) {
if (node_id_map.count(node_idx) == 0) {
node_id_map[node_idx] = unique_col_id++; // node_idx;
node_idx = node_id_map[node_idx];
} else {
node_idx = node_id_map[node_idx];
}
}
// Translate parent_node_ids
auto parent_col_ids(tree.parent_node_ids);
for (auto& parent_node_id : parent_col_ids) {
if (parent_node_id != top_node) parent_node_id = node_ids[parent_node_id];
}
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
print_vec(node_ids, "cpu.node_ids (after)", to_int);
print_vec(tree.parent_node_ids, "cpu.parent_node_ids (after)", to_int);
}
// row_offsets
std::vector<int> row_offsets(tree.parent_node_ids.size(), 0);
std::unordered_map<int, int> col_id_current_offset;
for (std::size_t i = 0; i < tree.parent_node_ids.size(); i++) {
auto current_col_id = node_ids[i];
auto parent_col_id = parent_col_ids[i];
auto parent_node_id = tree.parent_node_ids[i];
if (parent_col_id == top_node) {
// row_offsets[current_col_id] = 0; // JSON lines treats top node as list.
col_id_current_offset[current_col_id]++;
row_offsets[i] = col_id_current_offset[current_col_id] - 1;
} else {
if (tree.node_categories[parent_node_id] == cuio_json::node_t::NC_LIST and
!(is_array_of_arrays and tree.node_levels[i] == row_array_children_level)) {
col_id_current_offset[current_col_id]++;
row_offsets[i] = col_id_current_offset[current_col_id] - 1;
} else {
row_offsets[i] = col_id_current_offset[parent_col_id] - 1;
col_id_current_offset[current_col_id] = col_id_current_offset[parent_col_id];
}
}
}
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
print_vec(row_offsets, "cpu.row_offsets (generated)", to_int);
}
return {std::move(node_ids), std::move(row_offsets)};
}
} // namespace
// Base test fixture for tests
struct JsonTest : public cudf::test::BaseFixture {};
TEST_F(JsonTest, TreeRepresentation)
{
auto const stream = cudf::get_default_stream();
// Test input
std::string const input = R"( [{)"
R"("category": "reference",)"
R"("index:": [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "[Sayings of the Century]",)"
R"("price": 8.95)"
R"(}, )"
R"({)"
R"("category": "reference",)"
R"("index": [4,{},null,{"a":[{ }, {}] } ],)"
R"("author": "Nigel Rees",)"
R"("title": "{}[], <=semantic-symbols-string",)"
R"("price": 8.95)"
R"(}] )";
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input = cudf::device_span<cuio_json::SymbolT const>{
d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
auto const [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(
tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource());
// host tree generation
auto cpu_tree = get_tree_representation_cpu(tokens_gpu, token_indices_gpu, options, stream);
compare_trees(cpu_tree, gpu_tree);
// Print tree representation
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) { print_tree_representation(input, cpu_tree); }
// Golden sample of node categories
std::vector<cuio_json::node_t> golden_node_categories = {
cuio_json::NC_LIST, cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STR,
cuio_json::NC_FN, cuio_json::NC_LIST, cuio_json::NC_STR, cuio_json::NC_STR,
cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN,
cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_STRUCT,
cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_LIST,
cuio_json::NC_STR, cuio_json::NC_STRUCT, cuio_json::NC_STR, cuio_json::NC_STRUCT,
cuio_json::NC_FN, cuio_json::NC_LIST, cuio_json::NC_STRUCT, cuio_json::NC_STRUCT,
cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_STR,
cuio_json::NC_FN, cuio_json::NC_STR};
// Golden sample of node ids
// clang-format off
std::vector<cuio_json::NodeIndexT> golden_parent_node_ids = {
cuio_json::parent_node_sentinel, 0, 1, 2,
1, 4, 5, 5,
5, 1, 9, 1,
11, 1, 13, 0,
15, 16, 15, 18,
19, 19, 19, 19,
23, 24, 25, 25,
15, 28, 15, 30,
15, 32};
// clang-format on
// Golden sample of node levels
std::vector<cuio_json::TreeDepthT> golden_node_levels = {0, 1, 2, 3, 2, 3, 4, 4, 4, 2, 3, 2,
3, 2, 3, 1, 2, 3, 2, 3, 4, 4, 4, 4,
5, 6, 7, 7, 2, 3, 2, 3, 2, 3};
// Golden sample of the character-ranges from the original input that each node demarcates
std::vector<std::size_t> golden_node_range_begin = {
2, 3, 5, 16, 29, 38, 39, 41, 44, 49, 58, 72, 80, 108, 116, 124, 126,
137, 150, 158, 159, 161, 164, 169, 171, 174, 175, 180, 189, 198, 212, 220, 255, 263};
// Golden sample of the character-ranges from the original input that each node demarcates
std::vector<std::size_t> golden_node_range_end = {
3, 4, 13, 27, 35, 39, 40, 43, 46, 55, 70, 77, 106, 113, 120, 125, 134,
148, 155, 159, 160, 162, 168, 170, 172, 175, 176, 181, 195, 210, 217, 253, 260, 267};
// Check results against golden samples
ASSERT_EQ(golden_node_categories.size(), cpu_tree.node_categories.size());
ASSERT_EQ(golden_parent_node_ids.size(), cpu_tree.parent_node_ids.size());
ASSERT_EQ(golden_node_levels.size(), cpu_tree.node_levels.size());
ASSERT_EQ(golden_node_range_begin.size(), cpu_tree.node_range_begin.size());
ASSERT_EQ(golden_node_range_end.size(), cpu_tree.node_range_end.size());
for (std::size_t i = 0; i < golden_node_categories.size(); i++) {
ASSERT_EQ(golden_node_categories[i], cpu_tree.node_categories[i]) << "[" << i << "]";
ASSERT_EQ(golden_parent_node_ids[i], cpu_tree.parent_node_ids[i]) << "[" << i << "]";
ASSERT_EQ(golden_node_levels[i], cpu_tree.node_levels[i]) << "[" << i << "]";
ASSERT_EQ(golden_node_range_begin[i], cpu_tree.node_range_begin[i]) << "[" << i << "]";
ASSERT_EQ(golden_node_range_end[i], cpu_tree.node_range_end[i]) << "[" << i << "]";
}
}
TEST_F(JsonTest, TreeRepresentation2)
{
auto const stream = cudf::get_default_stream();
// Test input: value end with comma, space, close-brace ", }"
std::string const input =
// 0 1 2 3 4 5 6 7 8 9
// 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
R"([ {}, { "a": { "y" : 6, "z": [] }}, { "a" : { "x" : 8, "y": 9 }, "b" : {"x": 10 , "z": 11)"
"\n}}]";
// Prepare input & output buffers
cudf::string_scalar d_scalar(input, true, stream);
auto d_input = cudf::device_span<cuio_json::SymbolT const>{d_scalar.data(),
static_cast<size_t>(d_scalar.size())};
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
auto const [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(
tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource());
// host tree generation
auto cpu_tree = get_tree_representation_cpu(tokens_gpu, token_indices_gpu, options, stream);
compare_trees(cpu_tree, gpu_tree);
// Print tree representation
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) { print_tree_representation(input, cpu_tree); }
// Golden sample of node categories
// clang-format off
std::vector<cuio_json::node_t> golden_node_categories = {
cuio_json::NC_LIST, cuio_json::NC_STRUCT,
cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_LIST,
cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_STR,
cuio_json::NC_FN, cuio_json::NC_STRUCT, cuio_json::NC_FN, cuio_json::NC_STR, cuio_json::NC_FN, cuio_json::NC_STR};
// Golden sample of node ids
std::vector<cuio_json::NodeIndexT> golden_parent_node_ids = {
cuio_json::parent_node_sentinel, 0,
0, 2, 3, 4, 5, 4, 7,
0, 9, 10, 11, 12, 11, 14,
9, 16, 17, 18, 17, 20};
// clang-format on
// Golden sample of node levels
std::vector<cuio_json::TreeDepthT> golden_node_levels = {
0, 1, 1, 2, 3, 4, 5, 4, 5, 1, 2, 3, 4, 5, 4, 5, 2, 3, 4, 5, 4, 5,
};
// Golden sample of the character-ranges from the original input that each node demarcates
std::vector<std::size_t> golden_node_range_begin = {0, 2, 6, 9, 13, 16, 21, 25, 29, 36, 39,
44, 47, 52, 56, 60, 66, 71, 73, 77, 83, 87};
// Golden sample of the character-ranges from the original input that each node demarcates
std::vector<std::size_t> golden_node_range_end = {1, 3, 7, 10, 14, 17, 22, 26, 30, 37, 40,
45, 48, 53, 57, 61, 67, 72, 74, 79, 84, 89};
// Check results against golden samples
ASSERT_EQ(golden_node_categories.size(), cpu_tree.node_categories.size());
ASSERT_EQ(golden_parent_node_ids.size(), cpu_tree.parent_node_ids.size());
ASSERT_EQ(golden_node_levels.size(), cpu_tree.node_levels.size());
ASSERT_EQ(golden_node_range_begin.size(), cpu_tree.node_range_begin.size());
ASSERT_EQ(golden_node_range_end.size(), cpu_tree.node_range_end.size());
for (std::size_t i = 0; i < golden_node_categories.size(); i++) {
ASSERT_EQ(golden_node_categories[i], cpu_tree.node_categories[i]);
ASSERT_EQ(golden_parent_node_ids[i], cpu_tree.parent_node_ids[i]);
ASSERT_EQ(golden_node_levels[i], cpu_tree.node_levels[i]);
ASSERT_EQ(golden_node_range_begin[i], cpu_tree.node_range_begin[i]);
ASSERT_EQ(golden_node_range_end[i], cpu_tree.node_range_end[i]);
}
}
TEST_F(JsonTest, TreeRepresentation3)
{
auto const stream = cudf::get_default_stream();
// Test input: Json lines with same TreeRepresentation2 input
std::string const input =
R"( {}
{ "a": { "y" : 6, "z": [] }}
{ "a" : { "x" : 8, "y": 9 }, "b" : {"x": 10 , "z": 11 }} )"; // Prepare input & output buffers
cudf::string_scalar d_scalar(input, true, stream);
auto d_input = cudf::device_span<cuio_json::SymbolT const>{d_scalar.data(),
static_cast<size_t>(d_scalar.size())};
cudf::io::json_reader_options options{};
options.enable_lines(true);
// Parse the JSON and get the token stream
auto const [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto gpu_tree = cuio_json::detail::get_tree_representation(
tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource());
// host tree generation
auto cpu_tree = get_tree_representation_cpu(tokens_gpu, token_indices_gpu, options, stream);
compare_trees(cpu_tree, gpu_tree);
// Print tree representation
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) { print_tree_representation(input, cpu_tree); }
}
TEST_F(JsonTest, TreeRepresentationError)
{
auto const stream = cudf::get_default_stream();
std::string const input = R"([ {}, }{])";
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input = cudf::device_span<cuio_json::SymbolT const>{
d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::io::json_reader_options const options{};
// Parse the JSON and get the token stream
auto const [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
d_input, options, stream, rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
// This JSON is invalid and will raise an exception.
EXPECT_THROW(cuio_json::detail::get_tree_representation(
tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource()),
cudf::logic_error);
}
/**
* @brief Test fixture for parametrized JSON tree traversal tests
*/
struct JsonTreeTraversalTest : public cudf::test::BaseFixture,
public testing::WithParamInterface<std::tuple<bool, std::string>> {};
//
std::vector<std::string> json_list = {
"[]",
"value",
"\"string\"",
"[1, 2, 3]",
R"({"a": 1, "b": 2, "c": 3})",
// input a: {x:i, y:i, z:[]}, b: {x:i, z:i}
R"([ {}, { "a": { "y" : 6, "z": [] }}, { "a" : { "x" : 8, "y": 9}, "b" : {"x": 10, "z": 11}}])",
// input a: {x:i, y:i, z:[]}, b: {x:i, z: {p: i, q: i}}
R"([ {}, { "a": { "y" : 1, "z": [] }},
{ "a": { "x" : 2, "y": 3}, "b" : {"x": 4, "z": [ {"p": 1, "q": 2}]}},
{ "a": { "y" : 6, "z": [7, 8, 9]}, "b": {"x": 10, "z": [{}, {"q": 3}, {"p": 4}]}},
{ "a": { "z": [12, 13, 14, 15]}},
{ "a": { "z": [16], "x": 2}}
])",
//^row offset a a.x a.y a.z b b.x b.z
// 1 1 1
// 2 2 2 2 2 2 b.z[] 0 b.z.p 0, b.z.q 0
// 3 3 3 3 3 3 a.z[] 0, 1, 2 b.z[] 1, 2, 3 b.z.q 2, b.z.p 3
// 4 4 a.z[] 3, 4, 5, 6
// 5 5 5 a.z[] 7
R"([[1, 2, 3], [4, 5, 6], [7, 8, 9]])",
R"([[1, 2, 3], [4, 5], [7]])",
R"([[1], [4, 5, 6], [7, 8]])",
};
std::vector<std::string> json_lines_list = {
// Test input a: {x:i, y:i, z:[]}, b: {x:i, z:i} with JSON-lines
"",
R"( {}
{ "a": { "y" : 6, "z": [] }}
{ "a": { "y" : 6, "z": [2, 3, 4, 5] }}
{ "a": { "z": [4], "y" : 6 }}
{ "a" : { "x" : 8, "y": 9 }, "b" : {"x": 10 , "z": 11 }} )",
// empty list, row.
R"( {"a" : [], "b" : {}}
{"a" : []}
{"b" : {}})",
R"([1, 2, 3]
[4, 5, 6])",
R"([1]
[4, [5], 6]
[7, [8]])"};
INSTANTIATE_TEST_SUITE_P(Mixed_And_Records,
JsonTreeTraversalTest,
::testing::Combine(::testing::Values(false),
::testing::ValuesIn(json_list)));
INSTANTIATE_TEST_SUITE_P(JsonLines,
JsonTreeTraversalTest,
::testing::Combine(::testing::Values(true),
::testing::ValuesIn(json_lines_list)));
TEST_P(JsonTreeTraversalTest, CPUvsGPUTraversal)
{
auto [json_lines, input] = GetParam();
auto stream = cudf::get_default_stream();
cudf::io::json_reader_options options{};
options.enable_lines(json_lines);
// std::cout << json_lines << input << std::endl;
cudf::string_scalar d_scalar(input, true, stream);
auto d_input = cudf::device_span<cuio_json::SymbolT const>{d_scalar.data(),
static_cast<size_t>(d_scalar.size())};
// Parse the JSON and get the token stream
auto const [tokens_gpu, token_indices_gpu] = cudf::io::json::detail::get_token_stream(
d_input, options, stream, rmm::mr::get_current_device_resource());
// host tree generation
auto cpu_tree = get_tree_representation_cpu(tokens_gpu, token_indices_gpu, options, stream);
bool const is_array_of_arrays =
(cpu_tree.node_categories.size() > 0 and
cpu_tree.node_categories[0] == cudf::io::json::NC_LIST) and
(json_lines or (cpu_tree.node_categories.size() > 1 and
cpu_tree.node_categories[1] == cudf::io::json::NC_LIST));
// host tree traversal
auto [cpu_col_id, cpu_row_offsets] =
records_orient_tree_traversal_cpu(input, cpu_tree, is_array_of_arrays, json_lines, stream);
// gpu tree generation
auto gpu_tree = cuio_json::detail::get_tree_representation(
tokens_gpu, token_indices_gpu, stream, rmm::mr::get_current_device_resource());
// Print tree representation
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
printf("BEFORE traversal (gpu_tree):\n");
print_tree(gpu_tree);
}
// gpu tree traversal
auto [gpu_col_id, gpu_row_offsets] =
cuio_json::detail::records_orient_tree_traversal(d_input,
gpu_tree,
is_array_of_arrays,
json_lines,
stream,
rmm::mr::get_current_device_resource());
// Print tree representation
if (std::getenv("NJP_DEBUG_DUMP") != nullptr) {
printf("AFTER traversal (gpu_tree):\n");
print_tree(gpu_tree);
}
// convert to sequence because gpu col id might be have random id
auto gpu_col_id2 = translate_col_id(cudf::detail::make_std_vector_async(gpu_col_id, stream));
EXPECT_FALSE(compare_vector(cpu_col_id, gpu_col_id2, "col_id"));
EXPECT_FALSE(compare_vector(cpu_row_offsets, gpu_row_offsets, "row_offsets"));
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/orc_test.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/io_metadata_utilities.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/io/orc.hpp>
#include <cudf/io/orc_metadata.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/span.hpp>
#include <src/io/comp/nvcomp_adapter.hpp>
#include <type_traits>
template <typename T, typename SourceElementT = T>
using column_wrapper =
typename std::conditional<std::is_same_v<T, cudf::string_view>,
cudf::test::strings_column_wrapper,
cudf::test::fixed_width_column_wrapper<T, SourceElementT>>::type;
using str_col = column_wrapper<cudf::string_view>;
using bool_col = column_wrapper<bool>;
using int8_col = column_wrapper<int8_t>;
using int16_col = column_wrapper<int16_t>;
using int32_col = column_wrapper<int32_t>;
using int64_col = column_wrapper<int64_t>;
using float32_col = column_wrapper<float>;
using float64_col = column_wrapper<double>;
using dec32_col = column_wrapper<numeric::decimal32>;
using dec64_col = column_wrapper<numeric::decimal64>;
using dec128_col = column_wrapper<numeric::decimal128>;
using struct_col = cudf::test::structs_column_wrapper;
template <typename T>
using list_col = cudf::test::lists_column_wrapper<T>;
using column = cudf::column;
using table = cudf::table;
using table_view = cudf::table_view;
// Global environment for temporary files
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
template <typename T>
std::unique_ptr<cudf::table> create_random_fixed_table(cudf::size_type num_columns,
cudf::size_type num_rows,
bool include_validity)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
std::vector<column_wrapper<T>> src_cols(num_columns);
for (int idx = 0; idx < num_columns; idx++) {
auto rand_elements =
cudf::detail::make_counting_transform_iterator(0, [](T i) { return rand(); });
if (include_validity) {
src_cols[idx] = column_wrapper<T>(rand_elements, rand_elements + num_rows, valids);
} else {
src_cols[idx] = column_wrapper<T>(rand_elements, rand_elements + num_rows);
}
}
std::vector<std::unique_ptr<cudf::column>> columns(num_columns);
std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](column_wrapper<T>& in) {
auto ret = in.release();
[[maybe_unused]] auto nulls = ret->has_nulls(); // pre-cache the null count
return ret;
});
return std::make_unique<cudf::table>(std::move(columns));
}
// Base test fixture for tests
struct OrcWriterTest : public cudf::test::BaseFixture {};
// Typed test fixture for numeric type tests
template <typename T>
struct OrcWriterNumericTypeTest : public OrcWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Typed test fixture for timestamp type tests
template <typename T>
struct OrcWriterTimestampTypeTest : public OrcWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Declare typed test cases
// TODO: Replace with `NumericTypes` when unsigned support is added. Issue #5351
using SupportedTypes = cudf::test::Types<int8_t, int16_t, int32_t, int64_t, bool, float, double>;
TYPED_TEST_SUITE(OrcWriterNumericTypeTest, SupportedTypes);
using SupportedTimestampTypes =
cudf::test::RemoveIf<cudf::test::ContainedIn<cudf::test::Types<cudf::timestamp_D>>,
cudf::test::TimestampTypes>;
TYPED_TEST_SUITE(OrcWriterTimestampTypeTest, SupportedTimestampTypes);
// Base test fixture for chunked writer tests
struct OrcChunkedWriterTest : public cudf::test::BaseFixture {};
// Typed test fixture for numeric type tests
template <typename T>
struct OrcChunkedWriterNumericTypeTest : public OrcChunkedWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Declare typed test cases
TYPED_TEST_SUITE(OrcChunkedWriterNumericTypeTest, SupportedTypes);
// Test fixture for reader tests
struct OrcReaderTest : public cudf::test::BaseFixture {};
// Test fixture for statistics tests
struct OrcStatisticsTest : public cudf::test::BaseFixture {};
// Test fixture for metadata tests
struct OrcMetadataReaderTest : public cudf::test::BaseFixture {};
namespace {
// Generates a vector of uniform random values of type T
template <typename T>
inline auto random_values(size_t size)
{
std::vector<T> values(size);
using T1 = T;
using uniform_distribution =
typename std::conditional_t<std::is_same_v<T1, bool>,
std::bernoulli_distribution,
std::conditional_t<std::is_floating_point_v<T1>,
std::uniform_real_distribution<T1>,
std::uniform_int_distribution<T1>>>;
static constexpr auto seed = 0xf00d;
static std::mt19937 engine{seed};
static uniform_distribution dist{};
std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; });
return values;
}
struct SkipRowTest {
int test_calls{0};
SkipRowTest() {}
std::unique_ptr<table> get_expected_result(std::string const& filepath,
int skip_rows,
int file_num_rows,
int read_num_rows)
{
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
column_wrapper<int32_t, typename decltype(sequence)::value_type> input_col(
sequence, sequence + file_num_rows);
table_view input_table({input_col});
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, input_table);
cudf::io::write_orc(out_opts);
auto begin_sequence = sequence, end_sequence = sequence;
if (skip_rows < file_num_rows) {
begin_sequence += skip_rows;
end_sequence += std::min(skip_rows + read_num_rows, file_num_rows);
}
column_wrapper<int32_t, typename decltype(sequence)::value_type> output_col(begin_sequence,
end_sequence);
std::vector<std::unique_ptr<column>> output_cols;
output_cols.push_back(output_col.release());
return std::make_unique<table>(std::move(output_cols));
}
void test(int skip_rows, int file_num_rows, int read_num_rows)
{
auto filepath =
temp_env->get_temp_filepath("SkipRowTest" + std::to_string(test_calls++) + ".orc");
auto expected_result = get_expected_result(filepath, skip_rows, file_num_rows, read_num_rows);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.skip_rows(skip_rows)
.num_rows(read_num_rows);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_result->view(), result.tbl->view());
}
void test(int skip_rows, int file_num_rows)
{
auto filepath =
temp_env->get_temp_filepath("SkipRowTest" + std::to_string(test_calls++) + ".orc");
auto expected_result =
get_expected_result(filepath, skip_rows, file_num_rows, file_num_rows - skip_rows);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.skip_rows(skip_rows);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_result->view(), result.tbl->view());
}
};
} // namespace
TYPED_TEST(OrcWriterNumericTypeTest, SingleColumn)
{
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(sequence,
sequence + num_rows);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcSingleColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(OrcWriterNumericTypeTest, SingleColumnWithNulls)
{
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 2); });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(
sequence, sequence + num_rows, validity);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcSingleColumnWithNulls.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(OrcWriterTimestampTypeTest, Timestamps)
{
auto sequence =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (std::rand() / 10); });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(sequence,
sequence + num_rows);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcTimestamps.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.timestamp_type(this->type());
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(OrcWriterTimestampTypeTest, TimestampsWithNulls)
{
auto sequence =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (std::rand() / 10); });
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i > 30) && (i < 60); });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(
sequence, sequence + num_rows, validity);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcTimestampsWithNulls.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.timestamp_type(this->type());
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(OrcWriterTimestampTypeTest, TimestampOverflow)
{
constexpr int64_t max = std::numeric_limits<int64_t>::max();
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return max - i; });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(sequence,
sequence + num_rows);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcTimestampOverflow.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.timestamp_type(this->type());
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(OrcWriterTest, MultiColumn)
{
constexpr auto num_rows = 10;
auto col0_data = random_values<bool>(num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<float>(num_rows);
auto col5_data = random_values<double>(num_rows);
auto col6_vals = random_values<int64_t>(num_rows);
auto col6_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal128{col6_vals[i], numeric::scale_type{12}};
});
auto col7_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal128{col6_vals[i], numeric::scale_type{-12}};
});
bool_col col0(col0_data.begin(), col0_data.end());
int8_col col1(col1_data.begin(), col1_data.end());
int16_col col2(col2_data.begin(), col2_data.end());
int32_col col3(col3_data.begin(), col3_data.end());
float32_col col4(col4_data.begin(), col4_data.end());
float64_col col5(col5_data.begin(), col5_data.end());
dec128_col col6(col6_data, col6_data + num_rows);
dec128_col col7(col7_data, col7_data + num_rows);
list_col<int64_t> col8{
{9, 8}, {7, 6, 5}, {}, {4}, {3, 2, 1, 0}, {20, 21, 22, 23, 24}, {}, {66, 666}, {}, {-1, -2}};
int32_col child_col{48, 27, 25, 31, 351, 351, 29, 15, -1, -99};
struct_col col9{child_col};
table_view expected({col0, col1, col2, col3, col4, col5, col6, col7, col8, col9});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("bools");
expected_metadata.column_metadata[1].set_name("int8s");
expected_metadata.column_metadata[2].set_name("int16s");
expected_metadata.column_metadata[3].set_name("int32s");
expected_metadata.column_metadata[4].set_name("floats");
expected_metadata.column_metadata[5].set_name("doubles");
expected_metadata.column_metadata[6].set_name("decimal_pos_scale");
expected_metadata.column_metadata[7].set_name("decimal_neg_scale");
expected_metadata.column_metadata[8].set_name("lists");
expected_metadata.column_metadata[9].set_name("structs");
auto filepath = temp_env->get_temp_filepath("OrcMultiColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcWriterTest, MultiColumnWithNulls)
{
constexpr auto num_rows = 10;
auto col0_data = random_values<bool>(num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<float>(num_rows);
auto col5_data = random_values<double>(num_rows);
auto col6_vals = random_values<int32_t>(num_rows);
auto col6_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal64{col6_vals[i], numeric::scale_type{2}};
});
auto col0_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 2); });
auto col1_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i < 2); });
auto col3_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i == (num_rows - 1)); });
auto col4_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i >= 4 && i <= 6); });
auto col5_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i > 8); });
auto col6_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 3); });
bool_col col0{col0_data.begin(), col0_data.end(), col0_mask};
int8_col col1{col1_data.begin(), col1_data.end(), col1_mask};
int16_col col2(col2_data.begin(), col2_data.end());
int32_col col3{col3_data.begin(), col3_data.end(), col3_mask};
float32_col col4{col4_data.begin(), col4_data.end(), col4_mask};
float64_col col5{col5_data.begin(), col5_data.end(), col5_mask};
dec64_col col6{col6_data, col6_data + num_rows, col6_mask};
list_col<int32_t> col7{
{{9, 8}, {7, 6, 5}, {}, {4}, {3, 2, 1, 0}, {20, 21, 22, 23, 24}, {}, {66, 666}, {}, {-1, -2}},
col0_mask};
auto ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{
{48, 27, 25, 31, 351, 351, 29, 15, -1, -99}, {1, 0, 1, 1, 0, 1, 1, 1, 0, 1}};
struct_col col8{{ages_col}, {0, 1, 1, 0, 1, 1, 0, 1, 1, 0}};
table_view expected({col0, col1, col2, col3, col4, col5, col6, col7, col8});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("bools");
expected_metadata.column_metadata[1].set_name("int8s");
expected_metadata.column_metadata[2].set_name("int16s");
expected_metadata.column_metadata[3].set_name("int32s");
expected_metadata.column_metadata[4].set_name("floats");
expected_metadata.column_metadata[5].set_name("doubles");
expected_metadata.column_metadata[6].set_name("decimal");
expected_metadata.column_metadata[7].set_name("lists");
expected_metadata.column_metadata[8].set_name("structs");
auto filepath = temp_env->get_temp_filepath("OrcMultiColumnWithNulls.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcWriterTest, ReadZeroRows)
{
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
constexpr auto num_rows = 10;
column_wrapper<int64_t, typename decltype(sequence)::value_type> col(sequence,
sequence + num_rows);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcSingleColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.num_rows(0);
auto result = cudf::io::read_orc(in_opts);
EXPECT_EQ(0, result.tbl->num_rows());
EXPECT_EQ(1, result.tbl->num_columns());
}
TEST_F(OrcWriterTest, Strings)
{
std::vector<char const*> strings{
"Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
auto const num_rows = strings.size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
int32_col col0(seq_col0.begin(), seq_col0.end());
str_col col1(strings.begin(), strings.end());
float32_col col2(seq_col2.begin(), seq_col2.end());
table_view expected({col0, col1, col2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
expected_metadata.column_metadata[1].set_name("col_string");
expected_metadata.column_metadata[2].set_name("col_another");
auto filepath = temp_env->get_temp_filepath("OrcStrings.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcWriterTest, SlicedTable)
{
// This test checks for writing zero copy, offsetted views into existing cudf tables
std::vector<char const*> strings{
"Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
auto const num_rows = strings.size();
auto seq_col0 = random_values<int32_t>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
auto vals_col3 = random_values<int32_t>(num_rows);
auto seq_col3 = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal64{vals_col3[i], numeric::scale_type{2}};
});
int32_col col0(seq_col0.begin(), seq_col0.end());
str_col col1(strings.begin(), strings.end());
float32_col col2(seq_col2.begin(), seq_col2.end());
float32_col col3(seq_col3, seq_col3 + num_rows);
list_col<int64_t> col4{
{9, 8}, {7, 6, 5}, {}, {4}, {3, 2, 1, 0}, {20, 21, 22, 23, 24}, {}, {66, 666}};
int16_col ages_col{{48, 27, 25, 31, 351, 351, 29, 15}, cudf::test::iterators::null_at(5)};
struct_col col5{{ages_col}, cudf::test::iterators::null_at(4)};
table_view expected({col0, col1, col2, col3, col4, col5});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
expected_metadata.column_metadata[1].set_name("col_string");
expected_metadata.column_metadata[2].set_name("col_another");
expected_metadata.column_metadata[3].set_name("col_decimal");
expected_metadata.column_metadata[4].set_name("lists");
expected_metadata.column_metadata[5].set_name("structs");
auto expected_slice = cudf::slice(expected, {2, static_cast<cudf::size_type>(num_rows)});
auto filepath = temp_env->get_temp_filepath("SlicedTable.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected_slice)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_slice, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcWriterTest, HostBuffer)
{
constexpr auto num_rows = 100 << 10;
auto const seq_col = random_values<int>(num_rows);
int32_col col(seq_col.begin(), seq_col.end());
table_view expected{{col}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
std::vector<char> out_buffer;
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info(&out_buffer), expected)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(
cudf::io::source_info(out_buffer.data(), out_buffer.size()))
.use_index(false);
auto const result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcWriterTest, negTimestampsNano)
{
// This is a separate test because ORC format has a bug where writing a timestamp between -1 and 0
// seconds from UNIX epoch is read as that timestamp + 1 second. We mimic that behavior and so
// this test has to hardcode test values which are < -1 second.
// Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925
auto timestamps_ns =
cudf::test::fixed_width_column_wrapper<cudf::timestamp_ns, cudf::timestamp_ns::rep>{
-131968727238000000,
-1530705634500000000,
-1674638741932929000,
};
cudf::table_view expected({timestamps_ns});
auto filepath = temp_env->get_temp_filepath("OrcNegTimestamp.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected.column(0), result.tbl->view().column(0), cudf::test::debug_output_level::ALL_ERRORS);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(OrcWriterTest, Slice)
{
int32_col col{{1, 2, 3, 4, 5}, cudf::test::iterators::null_at(3)};
std::vector<cudf::size_type> indices{2, 5};
std::vector<cudf::column_view> result = cudf::slice(col, indices);
cudf::table_view tbl{result};
auto filepath = temp_env->get_temp_filepath("Slice.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto read_table = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(read_table.tbl->view(), tbl);
}
TEST_F(OrcChunkedWriterTest, SingleTable)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto filepath = temp_env->get_temp_filepath("ChunkedSingle.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(*table1);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *table1);
}
TEST_F(OrcChunkedWriterTest, SimpleTable)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedSimple.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(*table1).write(*table2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(OrcChunkedWriterTest, LargeTables)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(512, 4096, true);
auto table2 = create_random_fixed_table<int>(512, 8192, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedLarge.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(*table1).write(*table2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(OrcChunkedWriterTest, ManyTables)
{
srand(31337);
std::vector<std::unique_ptr<table>> tables;
std::vector<table_view> table_views;
constexpr int num_tables = 96;
for (int idx = 0; idx < num_tables; idx++) {
auto tbl = create_random_fixed_table<int>(16, 64, true);
table_views.push_back(*tbl);
tables.push_back(std::move(tbl));
}
auto expected = cudf::concatenate(table_views);
auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer writer(opts);
std::for_each(table_views.begin(), table_views.end(), [&writer](table_view const& tbl) {
writer.write(tbl);
});
writer.close();
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(OrcChunkedWriterTest, Metadata)
{
std::vector<char const*> strings{
"Monday", "Tuesday", "THURSDAY", "Wednesday", "Friday", "Sunday", "Saturday"};
auto const num_rows = strings.size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
int32_col col0(seq_col0.begin(), seq_col0.end());
str_col col1{strings.begin(), strings.end()};
float32_col col2(seq_col2.begin(), seq_col2.end());
table_view expected({col0, col1, col2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
expected_metadata.column_metadata[1].set_name("col_string");
expected_metadata.column_metadata[2].set_name("col_another");
auto filepath = temp_env->get_temp_filepath("ChunkedMetadata.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath})
.metadata(expected_metadata);
cudf::io::orc_chunked_writer(opts).write(expected).write(expected);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcChunkedWriterTest, Strings)
{
bool mask1[] = {true, true, false, true, true, true, true};
std::vector<char const*> h_strings1{"four", "score", "and", "seven", "years", "ago", "abcdefgh"};
str_col strings1(h_strings1.begin(), h_strings1.end(), mask1);
table_view tbl1({strings1});
bool mask2[] = {false, true, true, true, true, true, true};
std::vector<char const*> h_strings2{"ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz"};
str_col strings2(h_strings2.begin(), h_strings2.end(), mask2);
table_view tbl2({strings2});
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedStrings.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(tbl1).write(tbl2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(OrcChunkedWriterTest, MismatchedTypes)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(4, 4, true);
auto table2 = create_random_fixed_table<float>(4, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer writer(opts);
writer.write(*table1);
EXPECT_THROW(writer.write(*table2), cudf::logic_error);
}
TEST_F(OrcChunkedWriterTest, ChunkedWritingAfterClosing)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(4, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedWritingAfterClosing.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer writer(opts);
writer.write(*table1);
writer.close();
EXPECT_THROW(writer.write(*table1), cudf::logic_error);
}
TEST_F(OrcChunkedWriterTest, MismatchedStructure)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(4, 4, true);
auto table2 = create_random_fixed_table<int>(3, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer writer(opts);
writer.write(*table1);
EXPECT_THROW(writer.write(*table2), cudf::logic_error);
}
TEST_F(OrcChunkedWriterTest, ReadStripes)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table2, *table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedStripes.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(*table1).write(*table2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).stripes({{1, 0, 1}});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(OrcChunkedWriterTest, ReadStripesError)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto filepath = temp_env->get_temp_filepath("ChunkedStripesError.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(*table1);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).stripes({{0, 1}});
EXPECT_THROW(cudf::io::read_orc(read_opts), cudf::logic_error);
read_opts.set_stripes({{-1}});
EXPECT_THROW(cudf::io::read_orc(read_opts), cudf::logic_error);
}
TYPED_TEST(OrcChunkedWriterNumericTypeTest, UnalignedSize)
{
// write out two 31 row tables and make sure they get
// read back with all their validity bits in the right place
using T = TypeParam;
int num_els = 31;
bool mask[] = {false, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true};
T c1a[num_els];
std::fill(c1a, c1a + num_els, static_cast<T>(5));
T c1b[num_els];
std::fill(c1b, c1b + num_els, static_cast<T>(6));
column_wrapper<T> c1a_w(c1a, c1a + num_els, mask);
column_wrapper<T> c1b_w(c1b, c1b + num_els, mask);
table_view tbl1({c1a_w, c1b_w});
T c2a[num_els];
std::fill(c2a, c2a + num_els, static_cast<T>(8));
T c2b[num_els];
std::fill(c2b, c2b + num_els, static_cast<T>(9));
column_wrapper<T> c2a_w(c2a, c2a + num_els, mask);
column_wrapper<T> c2b_w(c2b, c2b + num_els, mask);
table_view tbl2({c2a_w, c2b_w});
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(tbl1).write(tbl2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TYPED_TEST(OrcChunkedWriterNumericTypeTest, UnalignedSize2)
{
// write out two 33 row tables and make sure they get
// read back with all their validity bits in the right place
using T = TypeParam;
int num_els = 33;
bool mask[] = {false, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true};
T c1a[num_els];
std::fill(c1a, c1a + num_els, static_cast<T>(5));
T c1b[num_els];
std::fill(c1b, c1b + num_els, static_cast<T>(6));
column_wrapper<T> c1a_w(c1a, c1a + num_els, mask);
column_wrapper<T> c1b_w(c1b, c1b + num_els, mask);
table_view tbl1({c1a_w, c1b_w});
T c2a[num_els];
std::fill(c2a, c2a + num_els, static_cast<T>(8));
T c2b[num_els];
std::fill(c2b, c2b + num_els, static_cast<T>(9));
column_wrapper<T> c2a_w(c2a, c2a + num_els, mask);
column_wrapper<T> c2b_w(c2b, c2b + num_els, mask);
table_view tbl2({c2a_w, c2b_w});
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.orc");
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::orc_chunked_writer(opts).write(tbl1).write(tbl2);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(OrcReaderTest, CombinedSkipRowTest)
{
SkipRowTest skip_row;
skip_row.test(50, 75);
skip_row.test(2, 100);
skip_row.test(2, 100, 50);
skip_row.test(2, 100, 98);
skip_row.test(2, 100, 99);
skip_row.test(2, 100, 100);
skip_row.test(2, 100, 110);
}
TEST_F(OrcStatisticsTest, Basic)
{
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i; });
auto ts_sequence =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i - 4) * 1000002; });
auto dec_sequence =
cudf::detail::make_counting_transform_iterator(0, [&](auto i) { return i * 1001; });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
std::vector<char const*> strings{
"Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Wednesday", "Tuesday"};
int num_rows = strings.size();
column_wrapper<int32_t, typename decltype(sequence)::value_type> col1(
sequence, sequence + num_rows, validity);
column_wrapper<float, typename decltype(sequence)::value_type> col2(
sequence, sequence + num_rows, validity);
str_col col3{strings.begin(), strings.end()};
column_wrapper<cudf::timestamp_ns, typename decltype(sequence)::value_type> col4(
ts_sequence, ts_sequence + num_rows, validity);
column_wrapper<cudf::timestamp_us, typename decltype(sequence)::value_type> col5(
ts_sequence, ts_sequence + num_rows, validity);
bool_col col6({true, true, true, true, true, false, false, false, false}, validity);
cudf::test::fixed_point_column_wrapper<int64_t> col7(
dec_sequence, dec_sequence + num_rows, numeric::scale_type{-1});
table_view expected({col1, col2, col3, col4, col5, col6, col7});
auto filepath = temp_env->get_temp_filepath("OrcStatsMerge.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
auto const stats = cudf::io::read_parsed_orc_statistics(cudf::io::source_info{filepath});
auto expected_column_names = std::vector<std::string>{""};
std::generate_n(
std::back_inserter(expected_column_names),
expected.num_columns(),
[starting_index = 0]() mutable { return "_col" + std::to_string(starting_index++); });
EXPECT_EQ(stats.column_names, expected_column_names);
auto validate_statistics = [&](std::vector<cudf::io::column_statistics> const& stats) {
ASSERT_EQ(stats.size(), expected.num_columns() + 1);
auto& s0 = stats[0];
EXPECT_EQ(*s0.number_of_values, 9ul);
auto& s1 = stats[1];
EXPECT_EQ(*s1.number_of_values, 4ul);
EXPECT_TRUE(*s1.has_null);
auto& ts1 = std::get<cudf::io::integer_statistics>(s1.type_specific_stats);
EXPECT_EQ(*ts1.minimum, 1);
EXPECT_EQ(*ts1.maximum, 7);
EXPECT_EQ(*ts1.sum, 16);
auto& s2 = stats[2];
EXPECT_EQ(*s2.number_of_values, 4ul);
EXPECT_TRUE(*s2.has_null);
auto& ts2 = std::get<cudf::io::double_statistics>(s2.type_specific_stats);
EXPECT_EQ(*ts2.minimum, 1.);
EXPECT_EQ(*ts2.maximum, 7.);
EXPECT_EQ(*ts2.sum, 16.);
auto& s3 = stats[3];
EXPECT_EQ(*s3.number_of_values, 9ul);
EXPECT_FALSE(*s3.has_null);
auto& ts3 = std::get<cudf::io::string_statistics>(s3.type_specific_stats);
EXPECT_EQ(*ts3.minimum, "Friday");
EXPECT_EQ(*ts3.maximum, "Wednesday");
EXPECT_EQ(*ts3.sum, 58ul);
auto& s4 = stats[4];
EXPECT_EQ(*s4.number_of_values, 4ul);
EXPECT_TRUE(*s4.has_null);
auto& ts4 = std::get<cudf::io::timestamp_statistics>(s4.type_specific_stats);
EXPECT_EQ(*ts4.minimum, -4);
EXPECT_EQ(*ts4.maximum, 3);
EXPECT_EQ(*ts4.minimum_utc, -4);
EXPECT_EQ(*ts4.maximum_utc, 3);
// nanosecond precision can't be included until we write a writer version that includes ORC-135
// see https://github.com/rapidsai/cudf/issues/14325
// EXPECT_EQ(*ts4.minimum_nanos, 999994);
EXPECT_FALSE(ts4.minimum_nanos.has_value());
// EXPECT_EQ(*ts4.maximum_nanos, 6);
EXPECT_FALSE(ts4.maximum_nanos.has_value());
auto& s5 = stats[5];
EXPECT_EQ(*s5.number_of_values, 4ul);
EXPECT_TRUE(*s5.has_null);
auto& ts5 = std::get<cudf::io::timestamp_statistics>(s5.type_specific_stats);
EXPECT_EQ(*ts5.minimum, -3001);
EXPECT_EQ(*ts5.maximum, 3000);
EXPECT_EQ(*ts5.minimum_utc, -3001);
EXPECT_EQ(*ts5.maximum_utc, 3000);
// nanosecond precision can't be included until we write a writer version that includes ORC-135
// see https://github.com/rapidsai/cudf/issues/14325
// EXPECT_EQ(*ts5.minimum_nanos, 994000);
EXPECT_FALSE(ts5.minimum_nanos.has_value());
// EXPECT_EQ(*ts5.maximum_nanos, 6000);
EXPECT_FALSE(ts5.maximum_nanos.has_value());
auto& s6 = stats[6];
EXPECT_EQ(*s6.number_of_values, 4ul);
EXPECT_TRUE(*s6.has_null);
auto& ts6 = std::get<cudf::io::bucket_statistics>(s6.type_specific_stats);
EXPECT_EQ(ts6.count[0], 2);
auto& s7 = stats[7];
EXPECT_EQ(*s7.number_of_values, 9ul);
EXPECT_FALSE(*s7.has_null);
auto& ts7 = std::get<cudf::io::decimal_statistics>(s7.type_specific_stats);
EXPECT_EQ(*ts7.minimum, "0.0");
EXPECT_EQ(*ts7.maximum, "800.8");
EXPECT_EQ(*ts7.sum, "3603.6");
};
validate_statistics(stats.file_stats);
// There's only one stripe, so column stats are the same as stripe stats
validate_statistics(stats.stripes_stats[0]);
}
TEST_F(OrcWriterTest, SlicedValidMask)
{
std::vector<char const*> strings;
// Need more than 32 elements to reproduce the issue
for (int i = 0; i < 34; ++i)
strings.emplace_back("a long string to make sure overflow affects the output");
// An element is null only to enforce the output column to be nullable
str_col col{strings.begin(), strings.end(), cudf::test::iterators::null_at(32)};
// Bug tested here is easiest to reproduce when column_offset % 32 is 31
std::vector<cudf::size_type> indices{31, 34};
auto sliced_col = cudf::slice(static_cast<cudf::column_view>(col), indices);
cudf::table_view tbl{sliced_col};
cudf::io::table_input_metadata expected_metadata(tbl);
expected_metadata.column_metadata[0].set_name("col_string");
auto filepath = temp_env->get_temp_filepath("OrcStrings.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, tbl)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(tbl, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcReaderTest, SingleInputs)
{
srand(31533);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto filepath1 = temp_env->get_temp_filepath("SimpleTable1.orc");
cudf::io::orc_writer_options write_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath1}, table1->view());
cudf::io::write_orc(write_opts);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{{filepath1}});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *table1);
}
TEST_F(OrcReaderTest, zstdCompressionRegression)
{
if (cudf::io::nvcomp::is_decompression_disabled(cudf::io::nvcomp::compression_type::ZSTD)) {
GTEST_SKIP() << "Newer nvCOMP version is required";
}
// Test with zstd compressed orc file with high compression ratio.
constexpr uint8_t input_buffer[] = {
0x4f, 0x52, 0x43, 0x5a, 0x00, 0x00, 0x28, 0xb5, 0x2f, 0xfd, 0xa4, 0x34, 0xc7, 0x03, 0x00, 0x74,
0x00, 0x00, 0x18, 0x41, 0xff, 0xaa, 0x02, 0x00, 0xbb, 0xff, 0x45, 0xc8, 0x01, 0x25, 0x30, 0x04,
0x65, 0x00, 0x00, 0x10, 0xaa, 0x1f, 0x02, 0x00, 0x01, 0x29, 0x0b, 0xc7, 0x39, 0xb8, 0x02, 0xcb,
0xaf, 0x38, 0xc0, 0x07, 0x00, 0x00, 0x40, 0x01, 0xc0, 0x05, 0x00, 0x00, 0x46, 0x4d, 0x45, 0x00,
0x00, 0x0a, 0x06, 0x08, 0x01, 0x10, 0x01, 0x18, 0x30, 0x0a, 0x06, 0x08, 0x02, 0x10, 0x01, 0x18,
0x06, 0x0a, 0x06, 0x08, 0x03, 0x10, 0x01, 0x18, 0x05, 0x12, 0x02, 0x08, 0x00, 0x12, 0x04, 0x08,
0x03, 0x10, 0x02, 0x59, 0x00, 0x00, 0x08, 0x03, 0x10, 0x63, 0x1a, 0x0c, 0x08, 0x03, 0x10, 0x00,
0x18, 0x3b, 0x20, 0x25, 0x28, 0xa0, 0x9e, 0x75, 0x22, 0x10, 0x08, 0x0c, 0x12, 0x01, 0x01, 0x1a,
0x09, 0x63, 0x64, 0x5f, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x22, 0x02, 0x08, 0x07, 0x30, 0xa0,
0x9e, 0x75, 0x08, 0x2f, 0x10, 0x05, 0x18, 0x80, 0x80, 0x10, 0x22, 0x02, 0x00, 0x0c, 0x28, 0x00,
0x30, 0x09, 0x82, 0xf4, 0x03, 0x03, 0x4f, 0x52, 0x43, 0x17};
auto source =
cudf::io::source_info(reinterpret_cast<char const*>(input_buffer), sizeof(input_buffer));
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(source).use_index(false);
cudf::io::table_with_metadata result;
CUDF_EXPECT_NO_THROW(result = cudf::io::read_orc(in_opts));
EXPECT_EQ(1920800, result.tbl->num_rows());
}
TEST_F(OrcReaderTest, MultipleInputs)
{
srand(31537);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto const filepath1 = temp_env->get_temp_filepath("SimpleTable1.orc");
{
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath1}, table1->view());
cudf::io::write_orc(out_opts);
}
auto const filepath2 = temp_env->get_temp_filepath("SimpleTable2.orc");
{
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath2}, table2->view());
cudf::io::write_orc(out_opts);
}
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{{filepath1, filepath2}});
auto result = cudf::io::read_orc(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
struct OrcWriterTestDecimal : public OrcWriterTest,
public ::testing::WithParamInterface<std::tuple<int, int>> {};
TEST_P(OrcWriterTestDecimal, Decimal64)
{
auto const [num_rows, scale] = GetParam();
// Using int16_t because scale causes values to overflow if they already require 32 bits
auto const vals = random_values<int32_t>(num_rows);
auto data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal64{vals[i], numeric::scale_type{scale}};
});
auto mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 7 == 0; });
dec64_col col{data, data + num_rows, mask};
cudf::table_view tbl({static_cast<cudf::column_view>(col)});
auto filepath = temp_env->get_temp_filepath("Decimal64.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(tbl.column(0), result.tbl->view().column(0));
}
INSTANTIATE_TEST_CASE_P(OrcWriterTest,
OrcWriterTestDecimal,
::testing::Combine(::testing::Values(1, 10000, 10001, 34567),
::testing::Values(-2, 0, 2)));
TEST_F(OrcWriterTest, Decimal32)
{
constexpr auto num_rows = 12000;
// Using int16_t because scale causes values to overflow if they already require 32 bits
auto const vals = random_values<int16_t>(num_rows);
auto data = cudf::detail::make_counting_transform_iterator(0, [&vals](auto i) {
return numeric::decimal32{vals[i], numeric::scale_type{2}};
});
auto mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 13; });
dec32_col col{data, data + num_rows, mask};
cudf::table_view expected({col});
auto filepath = temp_env->get_temp_filepath("Decimal32.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(col, result.tbl->view().column(0));
}
TEST_F(OrcStatisticsTest, Overflow)
{
int num_rows = 10;
auto too_large_seq = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i * (std::numeric_limits<int64_t>::max() / 20); });
auto too_small_seq = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i * (std::numeric_limits<int64_t>::min() / 20); });
auto not_too_large_seq = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i * (std::numeric_limits<int64_t>::max() / 200); });
auto not_too_small_seq = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i * (std::numeric_limits<int64_t>::min() / 200); });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
column_wrapper<int64_t, typename decltype(too_large_seq)::value_type> col1(
too_large_seq, too_large_seq + num_rows, validity);
column_wrapper<int64_t, typename decltype(too_small_seq)::value_type> col2(
too_small_seq, too_small_seq + num_rows, validity);
column_wrapper<int64_t, typename decltype(not_too_large_seq)::value_type> col3(
not_too_large_seq, not_too_large_seq + num_rows, validity);
column_wrapper<int64_t, typename decltype(not_too_small_seq)::value_type> col4(
not_too_small_seq, not_too_small_seq + num_rows, validity);
table_view tbl({col1, col2, col3, col4});
auto filepath = temp_env->get_temp_filepath("OrcStatsOverflow.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_orc(out_opts);
auto const stats = cudf::io::read_parsed_orc_statistics(cudf::io::source_info{filepath});
auto check_sum_exist = [&](int idx, bool expected) {
auto const& s = stats.file_stats[idx];
auto const& ts = std::get<cudf::io::integer_statistics>(s.type_specific_stats);
EXPECT_EQ(ts.sum.has_value(), expected);
};
check_sum_exist(1, false);
check_sum_exist(2, false);
check_sum_exist(3, true);
check_sum_exist(4, true);
}
TEST_F(OrcStatisticsTest, HasNull)
{
// This test can now be implemented with libcudf; keeping the pandas version to keep the test
// inputs diversified
// Method to create file:
// >>> import pandas as pd
// >>> df = pd.DataFrame({'a':pd.Series([1, 2, None], dtype="Int64"), 'b':[3, 4, 5]})
// >>> df.to_orc("temp.orc")
//
// Contents of file:
// >>> import pyarrow.orc as po
// >>> po.ORCFile('temp.orc').read()
// pyarrow.Table
// a: int64
// b: int64
// ----
// a: [[1,2,null]]
// b: [[3,4,5]]
auto nulls_orc = std::array<uint8_t, 308>{
0x4F, 0x52, 0x43, 0x1D, 0x00, 0x00, 0x0A, 0x0C, 0x0A, 0x04, 0x00, 0x00, 0x00, 0x00, 0x12, 0x04,
0x08, 0x03, 0x50, 0x00, 0x2C, 0x00, 0x00, 0xE3, 0x12, 0xE7, 0x62, 0x67, 0x80, 0x00, 0x21, 0x1E,
0x0E, 0x26, 0x21, 0x36, 0x0E, 0x26, 0x01, 0x16, 0x09, 0xB6, 0x00, 0x46, 0x00, 0x2C, 0x00, 0x00,
0xE3, 0x12, 0xE7, 0x62, 0x67, 0x80, 0x00, 0x21, 0x1E, 0x0E, 0x66, 0x21, 0x36, 0x0E, 0x36, 0x01,
0x2E, 0x09, 0x89, 0x00, 0x06, 0x00, 0x05, 0x00, 0x00, 0xFF, 0xE0, 0x05, 0x00, 0x00, 0xFF, 0xC0,
0x07, 0x00, 0x00, 0x46, 0x01, 0x24, 0x05, 0x00, 0x00, 0xFF, 0xE0, 0x09, 0x00, 0x00, 0x46, 0x02,
0x68, 0xA0, 0x68, 0x00, 0x00, 0xE3, 0x62, 0xE3, 0x60, 0x13, 0x60, 0x90, 0x10, 0xE4, 0x02, 0xD1,
0x8C, 0x12, 0x92, 0x60, 0x9A, 0x09, 0x4C, 0x33, 0x00, 0xC5, 0x59, 0xC1, 0x34, 0x23, 0x98, 0x66,
0x04, 0xD2, 0x6C, 0x60, 0x3E, 0x13, 0x94, 0xCF, 0x24, 0xC1, 0x2E, 0xC4, 0x02, 0x52, 0x07, 0x24,
0x99, 0x60, 0xA4, 0x14, 0x73, 0x68, 0x88, 0x33, 0x00, 0x46, 0x00, 0x00, 0xE3, 0x52, 0xE2, 0x62,
0xE1, 0x60, 0x0E, 0x60, 0xE0, 0xE2, 0xE1, 0x60, 0x12, 0x62, 0xE3, 0x60, 0x12, 0x60, 0x91, 0x60,
0x0B, 0x60, 0x04, 0xF2, 0x98, 0x81, 0x3C, 0x36, 0x01, 0x2E, 0x09, 0x89, 0x00, 0x06, 0x00, 0xB4,
0x00, 0x00, 0xE3, 0x60, 0x16, 0x98, 0xC6, 0x28, 0xC5, 0xC5, 0xC1, 0x2C, 0xE0, 0x2C, 0x21, 0xA3,
0x60, 0xAE, 0xC1, 0xAC, 0x24, 0xC4, 0xC1, 0x23, 0xC4, 0xC4, 0xC8, 0x24, 0xC5, 0x98, 0x28, 0xC5,
0x98, 0xA4, 0xC0, 0xA0, 0xC1, 0x60, 0xC0, 0xA0, 0xC4, 0xC1, 0xC1, 0x82, 0xCE, 0x32, 0x60, 0xB6,
0x62, 0xE1, 0x60, 0x0E, 0x60, 0xB0, 0xE2, 0xE1, 0x60, 0x12, 0x62, 0xE3, 0x60, 0x12, 0x60, 0x91,
0x60, 0x0B, 0x60, 0x04, 0xF2, 0x98, 0x81, 0x3C, 0x36, 0x01, 0x2E, 0x09, 0x89, 0x00, 0x06, 0x87,
0x09, 0x7E, 0x1E, 0x8C, 0x49, 0xAC, 0x86, 0x7A, 0xE6, 0x7A, 0xA6, 0x00, 0x08, 0x5D, 0x10, 0x01,
0x18, 0x80, 0x80, 0x04, 0x22, 0x02, 0x00, 0x0C, 0x28, 0x26, 0x30, 0x06, 0x82, 0xF4, 0x03, 0x03,
0x4F, 0x52, 0x43, 0x17,
};
auto const stats = cudf::io::read_parsed_orc_statistics(
cudf::io::source_info{reinterpret_cast<char const*>(nulls_orc.data()), nulls_orc.size()});
EXPECT_EQ(stats.file_stats[1].has_null, true);
EXPECT_EQ(stats.file_stats[2].has_null, false);
EXPECT_EQ(stats.stripes_stats[0][1].has_null, true);
EXPECT_EQ(stats.stripes_stats[0][2].has_null, false);
}
struct OrcWriterTestStripes
: public OrcWriterTest,
public ::testing::WithParamInterface<std::tuple<size_t, cudf::size_type>> {};
TEST_P(OrcWriterTestStripes, StripeSize)
{
constexpr auto num_rows = 1000000;
auto const [size_bytes, size_rows] = GetParam();
auto const seq_col = random_values<int>(num_rows);
auto const validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int64_t> col{seq_col.begin(), seq_col.end(), validity};
std::vector<std::unique_ptr<column>> cols;
cols.push_back(col.release());
auto const expected = std::make_unique<table>(std::move(cols));
auto validate = [&](std::vector<char> const& orc_buffer) {
auto const expected_stripe_num =
std::max<cudf::size_type>(num_rows / size_rows, (num_rows * sizeof(int64_t)) / size_bytes);
auto const stats = cudf::io::read_parsed_orc_statistics(
cudf::io::source_info(orc_buffer.data(), orc_buffer.size()));
EXPECT_EQ(stats.stripes_stats.size(), expected_stripe_num);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(
cudf::io::source_info(orc_buffer.data(), orc_buffer.size()))
.use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result.tbl->view());
};
{
std::vector<char> out_buffer_chunked;
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info(&out_buffer_chunked))
.stripe_size_rows(size_rows)
.stripe_size_bytes(size_bytes);
cudf::io::orc_chunked_writer(opts).write(expected->view());
validate(out_buffer_chunked);
}
{
std::vector<char> out_buffer;
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info(&out_buffer), expected->view())
.stripe_size_rows(size_rows)
.stripe_size_bytes(size_bytes);
cudf::io::write_orc(out_opts);
validate(out_buffer);
}
}
INSTANTIATE_TEST_CASE_P(OrcWriterTest,
OrcWriterTestStripes,
::testing::Values(std::make_tuple(800000ul, 1000000),
std::make_tuple(2000000ul, 1000000),
std::make_tuple(4000000ul, 1000000),
std::make_tuple(8000000ul, 1000000),
std::make_tuple(8000000ul, 500000),
std::make_tuple(8000000ul, 250000),
std::make_tuple(8000000ul, 100000)));
TEST_F(OrcWriterTest, StripeSizeInvalid)
{
auto const unused_table = std::make_unique<table>();
std::vector<char> out_buffer;
EXPECT_THROW(
cudf::io::orc_writer_options::builder(cudf::io::sink_info(&out_buffer), unused_table->view())
.stripe_size_rows(511),
cudf::logic_error);
EXPECT_THROW(
cudf::io::orc_writer_options::builder(cudf::io::sink_info(&out_buffer), unused_table->view())
.stripe_size_bytes(63 << 10),
cudf::logic_error);
EXPECT_THROW(
cudf::io::orc_writer_options::builder(cudf::io::sink_info(&out_buffer), unused_table->view())
.row_index_stride(511),
cudf::logic_error);
}
TEST_F(OrcWriterTest, TestMap)
{
auto const num_rows = 1200000;
auto const lists_per_row = 4;
auto const num_child_rows = (num_rows * lists_per_row) / 2; // half due to validity
auto keys = random_values<int>(num_child_rows);
auto vals = random_values<float>(num_child_rows);
auto vals_mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 3; });
int32_col keys_col(keys.begin(), keys.end());
float32_col vals_col{vals.begin(), vals.end(), vals_mask};
auto s_col = struct_col({keys_col, vals_col}).release();
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
std::vector<int> row_offsets(num_rows + 1);
int offset = 0;
for (int idx = 0; idx < (num_rows) + 1; ++idx) {
row_offsets[idx] = offset;
if (valids[idx]) { offset += lists_per_row; }
}
int32_col offsets(row_offsets.begin(), row_offsets.end());
auto num_list_rows = static_cast<cudf::column_view>(offsets).size() - 1;
auto [null_mask, null_count] = cudf::test::detail::make_null_mask(valids, valids + num_list_rows);
auto list_col = cudf::make_lists_column(
num_list_rows, offsets.release(), std::move(s_col), null_count, std::move(null_mask));
table_view expected({*list_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_list_column_as_map();
auto filepath = temp_env->get_temp_filepath("MapColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(OrcReaderTest, NestedColumnSelection)
{
auto const num_rows = 1000;
auto child_col1_data = random_values<int32_t>(num_rows);
auto child_col2_data = random_values<int64_t>(num_rows);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 3; });
int32_col child_col1{child_col1_data.begin(), child_col1_data.end(), validity};
int64_col child_col2{child_col2_data.begin(), child_col2_data.end(), validity};
struct_col s_col{child_col1, child_col2};
table_view expected({s_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("struct_s");
expected_metadata.column_metadata[0].child(0).set_name("field_a");
expected_metadata.column_metadata[0].child(1).set_name("field_b");
auto filepath = temp_env->get_temp_filepath("OrcNestedSelection.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
.columns({"struct_s.field_b"});
auto result = cudf::io::read_orc(in_opts);
// Verify that only one child column is included in the output table
ASSERT_EQ(1, result.tbl->view().column(0).num_children());
// Verify that the first child column is `field_b`
int64_col expected_col{child_col2_data.begin(), child_col2_data.end(), validity};
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(expected_col, result.tbl->view().column(0).child(0));
ASSERT_EQ("field_b", result.metadata.schema_info[0].children[0].name);
}
TEST_F(OrcReaderTest, DecimalOptions)
{
constexpr auto num_rows = 10;
auto col_vals = random_values<int64_t>(num_rows);
auto col_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal128{col_vals[i], numeric::scale_type{2}};
});
auto mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 3 == 0; });
dec128_col col{col_data, col_data + num_rows, mask};
table_view expected({col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("dec");
auto filepath = temp_env->get_temp_filepath("OrcDecimalOptions.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options valid_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.decimal128_columns({"dec", "fake_name"});
// Should not throw, even with "fake name"
EXPECT_NO_THROW(cudf::io::read_orc(valid_opts));
}
TEST_F(OrcWriterTest, DecimalOptionsNested)
{
auto const num_rows = 100;
auto dec_vals = random_values<int32_t>(num_rows);
auto dec1_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal64{dec_vals[i], numeric::scale_type{2}};
});
auto dec2_data = cudf::detail::make_counting_transform_iterator(0, [&](auto i) {
return numeric::decimal128{dec_vals[i], numeric::scale_type{2}};
});
dec64_col dec1_col(dec1_data, dec1_data + num_rows);
dec128_col dec2_col(dec2_data, dec2_data + num_rows);
auto child_struct_col = cudf::test::structs_column_wrapper{dec1_col, dec2_col};
auto int_vals = random_values<int32_t>(num_rows);
int32_col int_col(int_vals.begin(), int_vals.end());
auto map_struct_col = struct_col({child_struct_col, int_col}).release();
std::vector<int> row_offsets(num_rows + 1);
std::iota(row_offsets.begin(), row_offsets.end(), 0);
int32_col offsets(row_offsets.begin(), row_offsets.end());
auto map_list_col = cudf::make_lists_column(
num_rows, offsets.release(), std::move(map_struct_col), 0, rmm::device_buffer{});
table_view expected({*map_list_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("maps");
expected_metadata.column_metadata[0].set_list_column_as_map();
expected_metadata.column_metadata[0].child(1).child(0).child(0).set_name("dec64");
expected_metadata.column_metadata[0].child(1).child(0).child(1).set_name("dec128");
auto filepath = temp_env->get_temp_filepath("OrcMultiColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath})
.use_index(false)
// One less level of nesting because children of map columns are the child struct's children
.decimal128_columns({"maps.0.dec64"});
auto result = cudf::io::read_orc(in_opts);
// Both columns should be read as decimal128
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(result.tbl->view().column(0).child(1).child(0).child(0),
result.tbl->view().column(0).child(1).child(0).child(1));
}
TEST_F(OrcReaderTest, EmptyColumnsParam)
{
srand(31337);
auto const expected = create_random_fixed_table<int>(2, 4, false);
std::vector<char> out_buffer;
cudf::io::orc_writer_options args =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{&out_buffer}, *expected);
cudf::io::write_orc(args);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(
cudf::io::source_info{out_buffer.data(), out_buffer.size()})
.columns({});
auto const result = cudf::io::read_orc(read_opts);
EXPECT_EQ(result.tbl->num_columns(), 0);
EXPECT_EQ(result.tbl->num_rows(), 0);
}
TEST_F(OrcMetadataReaderTest, TestBasic)
{
auto const num_rows = 1'200'000;
auto ints = random_values<int>(num_rows);
auto floats = random_values<float>(num_rows);
int32_col int_col(ints.begin(), ints.end());
float32_col float_col(floats.begin(), floats.end());
table_view expected({int_col, float_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("int_col");
expected_metadata.column_metadata[1].set_name("float_col");
auto filepath = temp_env->get_temp_filepath("MetadataTest.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_orc(out_opts);
auto meta = read_orc_metadata(cudf::io::source_info{filepath});
EXPECT_EQ(meta.num_rows(), num_rows);
EXPECT_EQ(meta.schema().root().name(), "");
EXPECT_EQ(meta.schema().root().type_kind(), cudf::io::orc::STRUCT);
ASSERT_EQ(meta.schema().root().num_children(), 2);
EXPECT_EQ(meta.schema().root().child(0).name(), "int_col");
EXPECT_EQ(meta.schema().root().child(1).name(), "float_col");
}
TEST_F(OrcMetadataReaderTest, TestNested)
{
auto const num_rows = 1'200'000;
auto const lists_per_row = 4;
auto const num_child_rows = num_rows * lists_per_row;
auto keys = random_values<int>(num_child_rows);
auto vals = random_values<float>(num_child_rows);
int32_col keys_col(keys.begin(), keys.end());
float32_col vals_col(vals.begin(), vals.end());
auto s_col = struct_col({keys_col, vals_col}).release();
std::vector<int> row_offsets(num_rows + 1);
for (int idx = 0; idx < num_rows + 1; ++idx) {
row_offsets[idx] = idx * lists_per_row;
}
int32_col offsets(row_offsets.begin(), row_offsets.end());
auto list_col =
cudf::make_lists_column(num_rows, offsets.release(), std::move(s_col), 0, rmm::device_buffer{});
table_view expected({*list_col, *list_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("maps");
expected_metadata.column_metadata[0].set_list_column_as_map();
expected_metadata.column_metadata[1].set_name("lists");
expected_metadata.column_metadata[1].child(1).child(0).set_name("int_field");
expected_metadata.column_metadata[1].child(1).child(1).set_name("float_field");
auto filepath = temp_env->get_temp_filepath("MetadataTest.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_orc(out_opts);
auto meta = read_orc_metadata(cudf::io::source_info{filepath});
EXPECT_EQ(meta.num_rows(), num_rows);
EXPECT_EQ(meta.schema().root().name(), "");
EXPECT_EQ(meta.schema().root().type_kind(), cudf::io::orc::STRUCT);
ASSERT_EQ(meta.schema().root().num_children(), 2);
auto const& out_map_col = meta.schema().root().child(0);
EXPECT_EQ(out_map_col.name(), "maps");
EXPECT_EQ(out_map_col.type_kind(), cudf::io::orc::MAP);
ASSERT_EQ(out_map_col.num_children(), 2);
EXPECT_EQ(out_map_col.child(0).name(), ""); // keys (no name in ORC)
EXPECT_EQ(out_map_col.child(1).name(), ""); // values (no name in ORC)
auto const& out_list_col = meta.schema().root().child(1);
EXPECT_EQ(out_list_col.name(), "lists");
EXPECT_EQ(out_list_col.type_kind(), cudf::io::orc::LIST);
ASSERT_EQ(out_list_col.num_children(), 1);
auto const& out_list_struct_col = out_list_col.child(0);
EXPECT_EQ(out_list_struct_col.name(), ""); // elements (no name in ORC)
EXPECT_EQ(out_list_struct_col.type_kind(), cudf::io::orc::STRUCT);
ASSERT_EQ(out_list_struct_col.num_children(), 2);
auto const& out_int_col = out_list_struct_col.child(0);
EXPECT_EQ(out_int_col.name(), "int_field");
EXPECT_EQ(out_int_col.type_kind(), cudf::io::orc::INT);
auto const& out_float_col = out_list_struct_col.child(1);
EXPECT_EQ(out_float_col.name(), "float_field");
EXPECT_EQ(out_float_col.type_kind(), cudf::io::orc::FLOAT);
}
TEST_F(OrcReaderTest, ZstdMaxCompressionRate)
{
if (cudf::io::nvcomp::is_decompression_disabled(cudf::io::nvcomp::compression_type::ZSTD) or
cudf::io::nvcomp::is_compression_disabled(cudf::io::nvcomp::compression_type::ZSTD)) {
GTEST_SKIP() << "Newer nvCOMP version is required";
}
// Encodes as 64KB of zeros, which compresses to 18 bytes with ZSTD
std::vector<float> const h_data(8 * 1024);
float32_col col(h_data.begin(), h_data.end());
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcHugeCompRatio.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::ZSTD);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(OrcWriterTest, CompStats)
{
auto table = create_random_fixed_table<int>(1, 100000, true);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::orc_writer_options opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{&unused_buffer}, table->view())
.compression_statistics(stats);
cudf::io::write_orc(opts);
EXPECT_NE(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_FALSE(std::isnan(stats->compression_ratio()));
}
TEST_F(OrcChunkedWriterTest, CompStats)
{
auto table = create_random_fixed_table<int>(1, 100000, true);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{&unused_buffer})
.compression_statistics(stats);
cudf::io::orc_chunked_writer(opts).write(*table);
EXPECT_NE(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_FALSE(std::isnan(stats->compression_ratio()));
auto const single_table_comp_stats = *stats;
cudf::io::orc_chunked_writer(opts).write(*table);
EXPECT_EQ(stats->compression_ratio(), single_table_comp_stats.compression_ratio());
EXPECT_EQ(stats->num_compressed_bytes(), 2 * single_table_comp_stats.num_compressed_bytes());
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
}
void expect_compression_stats_empty(std::shared_ptr<cudf::io::writer_compression_statistics> stats)
{
EXPECT_EQ(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_TRUE(std::isnan(stats->compression_ratio()));
}
TEST_F(OrcWriterTest, CompStatsEmptyTable)
{
auto table_no_rows = create_random_fixed_table<int>(20, 0, false);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::orc_writer_options opts = cudf::io::orc_writer_options::builder(
cudf::io::sink_info{&unused_buffer}, table_no_rows->view())
.compression_statistics(stats);
cudf::io::write_orc(opts);
expect_compression_stats_empty(stats);
}
TEST_F(OrcChunkedWriterTest, CompStatsEmptyTable)
{
auto table_no_rows = create_random_fixed_table<int>(20, 0, false);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(cudf::io::sink_info{&unused_buffer})
.compression_statistics(stats);
cudf::io::orc_chunked_writer(opts).write(*table_no_rows);
expect_compression_stats_empty(stats);
}
TEST_F(OrcWriterTest, EmptyRowGroup)
{
std::vector<int> ints(10000 + 5, -1);
auto mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i >= 10000; });
int32_col col{ints.begin(), ints.end(), mask};
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcEmptyRowGroup.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(OrcWriterTest, NoNullsAsNonNullable)
{
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int32_t> col{{1, 2, 3}, valids};
table_view expected({col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_nullability(false);
auto filepath = temp_env->get_temp_filepath("NonNullable.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
// Writer should be able to write a column without nulls as non-nullable
EXPECT_NO_THROW(cudf::io::write_orc(out_opts));
}
TEST_F(OrcWriterTest, SlicedStringColumn)
{
std::vector<char const*> strings{"a", "bc", "def", "longer", "strings", "at the end"};
str_col col(strings.begin(), strings.end());
table_view expected({col});
// Slice the table to include the longer strings
auto expected_slice = cudf::slice(expected, {2, 6});
auto filepath = temp_env->get_temp_filepath("SlicedTable.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected_slice);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_slice, result.tbl->view());
}
TEST_F(OrcWriterTest, EmptyChildStringColumn)
{
list_col<cudf::string_view> col{{}, {}};
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("OrcEmptyChildStringColumn.orc");
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_orc(out_opts);
cudf::io::orc_reader_options in_opts =
cudf::io::orc_reader_options::builder(cudf::io::source_info{filepath}).use_index(false);
auto result = cudf::io::read_orc(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
template <typename T>
void check_all_null_stats(cudf::io::column_statistics const& stats)
{
EXPECT_EQ(stats.number_of_values, 0);
EXPECT_TRUE(stats.has_null);
auto const ts = std::get<T>(stats.type_specific_stats);
EXPECT_FALSE(ts.minimum.has_value());
EXPECT_FALSE(ts.maximum.has_value());
EXPECT_TRUE(ts.sum.has_value());
EXPECT_EQ(*ts.sum, 0);
}
TEST_F(OrcStatisticsTest, AllNulls)
{
float64_col double_col({0., 0., 0.}, cudf::test::iterators::all_nulls());
int32_col int_col({0, 0, 0}, cudf::test::iterators::all_nulls());
str_col string_col({"", "", ""}, cudf::test::iterators::all_nulls());
cudf::table_view expected({int_col, double_col, string_col});
std::vector<char> out_buffer;
cudf::io::orc_writer_options out_opts =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{&out_buffer}, expected);
cudf::io::write_orc(out_opts);
auto const stats = cudf::io::read_parsed_orc_statistics(
cudf::io::source_info{out_buffer.data(), out_buffer.size()});
check_all_null_stats<cudf::io::integer_statistics>(stats.file_stats[1]);
check_all_null_stats<cudf::io::double_statistics>(stats.file_stats[2]);
check_all_null_stats<cudf::io::string_statistics>(stats.file_stats[3]);
}
TEST_F(OrcWriterTest, UnorderedDictionary)
{
std::vector<char const*> strings{
"BBBB", "BBBB", "CCCC", "BBBB", "CCCC", "EEEE", "CCCC", "AAAA", "DDDD", "EEEE"};
str_col col(strings.begin(), strings.end());
table_view expected({col});
std::vector<char> out_buffer_sorted;
cudf::io::orc_writer_options out_opts_sorted =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{&out_buffer_sorted}, expected);
cudf::io::write_orc(out_opts_sorted);
cudf::io::orc_reader_options in_opts_sorted = cudf::io::orc_reader_options::builder(
cudf::io::source_info{out_buffer_sorted.data(), out_buffer_sorted.size()});
auto const from_sorted = cudf::io::read_orc(in_opts_sorted).tbl;
std::vector<char> out_buffer_unsorted;
cudf::io::orc_writer_options out_opts_unsorted =
cudf::io::orc_writer_options::builder(cudf::io::sink_info{&out_buffer_unsorted}, expected)
.enable_dictionary_sort(false);
cudf::io::write_orc(out_opts_unsorted);
cudf::io::orc_reader_options in_opts_unsorted = cudf::io::orc_reader_options::builder(
cudf::io::source_info{out_buffer_unsorted.data(), out_buffer_unsorted.size()});
auto const from_unsorted = cudf::io::read_orc(in_opts_unsorted).tbl;
CUDF_TEST_EXPECT_TABLES_EQUAL(*from_sorted, *from_unsorted);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/nested_json_test.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/json/nested_json.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/json.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/io_metadata_utilities.hpp>
#include <cudf_test/table_utilities.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/zip_iterator.h>
#include <string>
namespace cuio_json = cudf::io::json;
namespace {
// Forward declaration
void print_column(std::string const& input,
cuio_json::json_column const& column,
uint32_t indent = 0);
/**
* @brief Helper to generate indentation
*/
std::string pad(uint32_t indent = 0)
{
std::string pad{};
if (indent > 0) pad.insert(pad.begin(), indent, ' ');
return pad;
}
/**
* @brief Prints a string column.
*/
void print_json_string_col(std::string const& input,
cuio_json::json_column const& column,
uint32_t indent = 0)
{
for (std::size_t i = 0; i < column.string_offsets.size(); i++) {
std::cout << pad(indent) << i << ": [" << (column.validity[i] ? "1" : "0") << "] '"
<< input.substr(column.string_offsets[i], column.string_lengths[i]) << "'\n";
}
}
/**
* @brief Prints a list column.
*/
void print_json_list_col(std::string const& input,
cuio_json::json_column const& column,
uint32_t indent = 0)
{
std::cout << pad(indent) << " [LIST]\n";
std::cout << pad(indent) << " -> num. child-columns: " << column.child_columns.size() << "\n";
std::cout << pad(indent) << " -> num. rows: " << column.current_offset << "\n";
std::cout << pad(indent) << " -> num. valid: " << column.valid_count << "\n";
std::cout << pad(indent) << " offsets[]: "
<< "\n";
for (std::size_t i = 0; i < column.child_offsets.size() - 1; i++) {
std::cout << pad(indent + 2) << i << ": [" << (column.validity[i] ? "1" : "0") << "] ["
<< column.child_offsets[i] << ", " << column.child_offsets[i + 1] << ")\n";
}
if (column.child_columns.size() > 0) {
std::cout << pad(indent) << column.child_columns.begin()->first << "[]: "
<< "\n";
print_column(input, column.child_columns.begin()->second, indent + 2);
}
}
/**
* @brief Prints a struct column.
*/
void print_json_struct_col(std::string const& input,
cuio_json::json_column const& column,
uint32_t indent = 0)
{
std::cout << pad(indent) << " [STRUCT]\n";
std::cout << pad(indent) << " -> num. child-columns: " << column.child_columns.size() << "\n";
std::cout << pad(indent) << " -> num. rows: " << column.current_offset << "\n";
std::cout << pad(indent) << " -> num. valid: " << column.valid_count << "\n";
std::cout << pad(indent) << " -> validity[]: "
<< "\n";
for (decltype(column.current_offset) i = 0; i < column.current_offset; i++) {
std::cout << pad(indent + 2) << i << ": [" << (column.validity[i] ? "1" : "0") << "]\n";
}
auto it = std::begin(column.child_columns);
for (std::size_t i = 0; i < column.child_columns.size(); i++) {
std::cout << pad(indent + 2) << "child #" << i << " '" << it->first << "'[] \n";
print_column(input, it->second, indent + 2);
it++;
}
}
/**
* @brief Prints the column's data and recurses through and prints all the child columns.
*/
void print_column(std::string const& input, cuio_json::json_column const& column, uint32_t indent)
{
switch (column.type) {
case cuio_json::json_col_t::StringColumn: print_json_string_col(input, column, indent); break;
case cuio_json::json_col_t::ListColumn: print_json_list_col(input, column, indent); break;
case cuio_json::json_col_t::StructColumn: print_json_struct_col(input, column, indent); break;
case cuio_json::json_col_t::Unknown: std::cout << pad(indent) << "[UNKNOWN]\n"; break;
default: break;
}
}
} // namespace
// Base test fixture for tests
struct JsonTest : public cudf::test::BaseFixture {};
TEST_F(JsonTest, StackContext)
{
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
using StackSymbolT = char;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
// Test input
std::string const input = R"( [{)"
R"("category": "reference",)"
R"("index:": [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "[Sayings of the Century]",)"
R"("price": 8.95)"
R"(}, )"
R"({)"
R"("category": "reference",)"
R"("index": [4,{},null,{"a":[{ }, {}] } ],)"
R"("author": "Nigel Rees",)"
R"("title": "{}\\\"[], <=semantic-symbols-string\\\\",)"
R"("price": 8.95)"
R"(}] )";
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::detail::hostdevice_vector<StackSymbolT> stack_context(input.size(), stream);
// Run algorithm
constexpr auto stack_behavior = cuio_json::stack_behavior_t::PushPopWithoutReset;
cuio_json::detail::get_stack_context(d_input, stack_context.device_ptr(), stack_behavior, stream);
// Copy back the results
stack_context.device_to_host_async(stream);
// Make sure we copied back the stack context
stream.synchronize();
std::vector<char> const golden_stack_context{
'_', '_', '_', '[', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '[', '[', '[', '[', '[', '[', '[', '[', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '[', '[', '[', '[', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '[', '[', '[', '{', '[', '[', '[', '[', '[', '[', '[', '{',
'{', '{', '{', '{', '[', '{', '{', '[', '[', '[', '{', '[', '{', '{', '[', '[', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '[', '_'};
ASSERT_EQ(golden_stack_context.size(), stack_context.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(golden_stack_context, stack_context, stack_context.size());
}
TEST_F(JsonTest, StackContextUtf8)
{
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
using StackSymbolT = char;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
// Test input
std::string const input = R"([{"a":{"year":1882,"author": "Bharathi"}, {"a":"filip ʒakotɛ"}}])";
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::detail::hostdevice_vector<StackSymbolT> stack_context(input.size(), stream);
// Run algorithm
constexpr auto stack_behavior = cuio_json::stack_behavior_t::PushPopWithoutReset;
cuio_json::detail::get_stack_context(d_input, stack_context.device_ptr(), stack_behavior, stream);
// Copy back the results
stack_context.device_to_host_async(stream);
// Make sure we copied back the stack context
stream.synchronize();
std::vector<char> const golden_stack_context{
'_', '[', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{',
'{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '{', '['};
ASSERT_EQ(golden_stack_context.size(), stack_context.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(golden_stack_context, stack_context, stack_context.size());
}
TEST_F(JsonTest, StackContextRecovering)
{
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
using StackSymbolT = char;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
// JSON lines input that recovers on invalid lines
std::string const input = R"({"a":-2},
{"a":
{"a":{"a":[321
{"a":[1]}
{"b":123}
)";
// Expected stack context (including stack context of the newline characters)
std::string const golden_stack_context =
"_{{{{{{{__"
"___{{{{{"
"___{{{{{{{{{{[[[["
"___{{{{{[[{_"
"_"
"___{{{{{{{{_"
"__";
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::detail::hostdevice_vector<StackSymbolT> stack_context(input.size(), stream);
// Run algorithm
constexpr auto stack_behavior = cuio_json::stack_behavior_t::ResetOnDelimiter;
cuio_json::detail::get_stack_context(d_input, stack_context.device_ptr(), stack_behavior, stream);
// Copy back the results
stack_context.device_to_host_async(stream);
// Make sure we copied back the stack context
stream.synchronize();
// Verify results
ASSERT_EQ(golden_stack_context.size(), stack_context.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(golden_stack_context, stack_context, stack_context.size());
}
TEST_F(JsonTest, StackContextRecoveringFuzz)
{
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
using StackSymbolT = char;
std::random_device rd;
std::mt19937 gen(42);
std::uniform_int_distribution<int> distribution(0, 4);
constexpr std::size_t input_length = 1024 * 1024;
std::string input{};
input.reserve(input_length);
bool inside_quotes = false;
std::stack<StackSymbolT> host_stack{};
for (std::size_t i = 0; i < input_length; ++i) {
bool is_ok = true;
char current{};
do {
int rand_char = distribution(gen);
is_ok = true;
switch (rand_char) {
case 0: current = '{'; break;
case 1: current = '['; break;
case 2: current = '}'; break;
case 3: current = '"'; break;
case 4: current = '\n'; break;
}
switch (current) {
case '"': inside_quotes = !inside_quotes; break;
case '{':
if (!inside_quotes) { host_stack.push('{'); }
break;
case '[':
if (!inside_quotes) { host_stack.push('['); }
break;
case '}':
if (!inside_quotes) {
if (host_stack.size() > 0) {
// Get the proper 'pop' stack symbol
current = (host_stack.top() == '{' ? '}' : ']');
host_stack.pop();
} else
is_ok = false;
}
break;
case '\n':
// Increase chance to have longer lines
if (distribution(gen) == 0) {
is_ok = false;
break;
} else {
host_stack = {};
inside_quotes = false;
break;
}
}
} while (!is_ok);
input += current;
}
std::string expected_stack_context{};
expected_stack_context.reserve(input_length);
inside_quotes = false;
host_stack = std::stack<StackSymbolT>{};
for (auto const current : input) {
// Write the stack context for the current input symbol
if (host_stack.empty()) {
expected_stack_context += '_';
} else {
expected_stack_context += host_stack.top();
}
switch (current) {
case '"': inside_quotes = !inside_quotes; break;
case '{':
if (!inside_quotes) { host_stack.push('{'); }
break;
case '[':
if (!inside_quotes) { host_stack.push('['); }
break;
case '}':
if (!inside_quotes && host_stack.size() > 0) { host_stack.pop(); }
break;
case ']':
if (!inside_quotes && host_stack.size() > 0) { host_stack.pop(); }
break;
case '\n':
host_stack = {};
inside_quotes = false;
break;
}
}
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
cudf::detail::hostdevice_vector<StackSymbolT> stack_context(input.size(), stream);
// Run algorithm
constexpr auto stack_behavior = cuio_json::stack_behavior_t::ResetOnDelimiter;
cuio_json::detail::get_stack_context(d_input, stack_context.device_ptr(), stack_behavior, stream);
// Copy back the results
stack_context.device_to_host_async(stream);
// Make sure we copied back the stack context
stream.synchronize();
ASSERT_EQ(expected_stack_context.size(), stack_context.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(expected_stack_context, stack_context, stack_context.size());
}
TEST_F(JsonTest, TokenStream)
{
using cuio_json::PdaTokenT;
using cuio_json::SymbolOffsetT;
using cuio_json::SymbolT;
// Test input
std::string const input = R"( [{)"
R"("category": "reference",)"
R"("index:": [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "[Sayings of the Century]",)"
R"("price": 8.95)"
R"(}, )"
R"({)"
R"("category": "reference",)"
R"("index": [4,{},null,{"a":[{ }, {}] } ],)"
R"("author": "Nigel Rees",)"
R"("title": "{}[], <=semantic-symbols-string",)"
R"("price": 8.95)"
R"(}] )";
auto const stream = cudf::get_default_stream();
// Default parsing options
cudf::io::json_reader_options default_options{};
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
// Parse the JSON and get the token stream
auto [d_tokens_gpu, d_token_indices_gpu] = cuio_json::detail::get_token_stream(
d_input, default_options, stream, rmm::mr::get_current_device_resource());
// Copy back the number of tokens that were written
auto const tokens_gpu = cudf::detail::make_std_vector_async(d_tokens_gpu, stream);
auto const token_indices_gpu = cudf::detail::make_std_vector_async(d_token_indices_gpu, stream);
// Golden token stream sample
using token_t = cuio_json::token_t;
std::vector<std::pair<std::size_t, cuio_json::PdaTokenT>> const golden_token_stream = {
{2, token_t::ListBegin},
{3, token_t::StructBegin},
{4, token_t::StructMemberBegin},
{4, token_t::FieldNameBegin},
{13, token_t::FieldNameEnd},
{16, token_t::StringBegin},
{26, token_t::StringEnd},
{27, token_t::StructMemberEnd},
{28, token_t::StructMemberBegin},
{28, token_t::FieldNameBegin},
{35, token_t::FieldNameEnd},
{38, token_t::ListBegin},
{39, token_t::ValueBegin},
{40, token_t::ValueEnd},
{41, token_t::ValueBegin},
{43, token_t::ValueEnd},
{44, token_t::ValueBegin},
{46, token_t::ValueEnd},
{46, token_t::ListEnd},
{47, token_t::StructMemberEnd},
{48, token_t::StructMemberBegin},
{48, token_t::FieldNameBegin},
{55, token_t::FieldNameEnd},
{58, token_t::StringBegin},
{69, token_t::StringEnd},
{70, token_t::StructMemberEnd},
{71, token_t::StructMemberBegin},
{71, token_t::FieldNameBegin},
{77, token_t::FieldNameEnd},
{80, token_t::StringBegin},
{105, token_t::StringEnd},
{106, token_t::StructMemberEnd},
{107, token_t::StructMemberBegin},
{107, token_t::FieldNameBegin},
{113, token_t::FieldNameEnd},
{116, token_t::ValueBegin},
{120, token_t::ValueEnd},
{120, token_t::StructMemberEnd},
{120, token_t::StructEnd},
{124, token_t::StructBegin},
{125, token_t::StructMemberBegin},
{125, token_t::FieldNameBegin},
{134, token_t::FieldNameEnd},
{137, token_t::StringBegin},
{147, token_t::StringEnd},
{148, token_t::StructMemberEnd},
{149, token_t::StructMemberBegin},
{149, token_t::FieldNameBegin},
{155, token_t::FieldNameEnd},
{158, token_t::ListBegin},
{159, token_t::ValueBegin},
{160, token_t::ValueEnd},
{161, token_t::StructBegin},
{162, token_t::StructEnd},
{164, token_t::ValueBegin},
{168, token_t::ValueEnd},
{169, token_t::StructBegin},
{170, token_t::StructMemberBegin},
{170, token_t::FieldNameBegin},
{172, token_t::FieldNameEnd},
{174, token_t::ListBegin},
{175, token_t::StructBegin},
{177, token_t::StructEnd},
{180, token_t::StructBegin},
{181, token_t::StructEnd},
{182, token_t::ListEnd},
{184, token_t::StructMemberEnd},
{184, token_t::StructEnd},
{186, token_t::ListEnd},
{187, token_t::StructMemberEnd},
{188, token_t::StructMemberBegin},
{188, token_t::FieldNameBegin},
{195, token_t::FieldNameEnd},
{198, token_t::StringBegin},
{209, token_t::StringEnd},
{210, token_t::StructMemberEnd},
{211, token_t::StructMemberBegin},
{211, token_t::FieldNameBegin},
{217, token_t::FieldNameEnd},
{220, token_t::StringBegin},
{252, token_t::StringEnd},
{253, token_t::StructMemberEnd},
{254, token_t::StructMemberBegin},
{254, token_t::FieldNameBegin},
{260, token_t::FieldNameEnd},
{263, token_t::ValueBegin},
{267, token_t::ValueEnd},
{267, token_t::StructMemberEnd},
{267, token_t::StructEnd},
{268, token_t::ListEnd}};
// Verify the number of tokens matches
ASSERT_EQ(golden_token_stream.size(), tokens_gpu.size());
ASSERT_EQ(golden_token_stream.size(), token_indices_gpu.size());
for (std::size_t i = 0; i < tokens_gpu.size(); i++) {
// Ensure the index the tokens are pointing to do match
EXPECT_EQ(golden_token_stream[i].first, token_indices_gpu[i]) << "Mismatch at #" << i;
// Ensure the token category is correct
EXPECT_EQ(golden_token_stream[i].second, tokens_gpu[i]) << "Mismatch at #" << i;
}
}
TEST_F(JsonTest, TokenStream2)
{
using cuio_json::PdaTokenT;
using cuio_json::SymbolOffsetT;
using cuio_json::SymbolT;
// value end with comma, space, close-brace ", }"
std::string const input =
R"([ {}, { "a": { "y" : 6, "z": [] }}, { "a" : { "x" : 8, "y": 9}, "b" : {"x": 10 , "z": 11)"
"\n}}]";
auto const stream = cudf::get_default_stream();
// Default parsing options
cudf::io::json_reader_options default_options{};
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input =
cudf::device_span<SymbolT const>{d_scalar.data(), static_cast<size_t>(d_scalar.size())};
// Parse the JSON and get the token stream
auto [d_tokens_gpu, d_token_indices_gpu] = cuio_json::detail::get_token_stream(
d_input, default_options, stream, rmm::mr::get_current_device_resource());
// Copy back the number of tokens that were written
auto const tokens_gpu = cudf::detail::make_std_vector_async(d_tokens_gpu, stream);
auto const token_indices_gpu = cudf::detail::make_std_vector_async(d_token_indices_gpu, stream);
// Golden token stream sample
using token_t = cuio_json::token_t;
// clang-format off
std::vector<std::pair<std::size_t, cuio_json::PdaTokenT>> const golden_token_stream = {
{0, token_t::ListBegin},
{2, token_t::StructBegin}, {3, token_t::StructEnd}, //{}
{6, token_t::StructBegin},
{8, token_t::StructMemberBegin}, {8, token_t::FieldNameBegin}, {10, token_t::FieldNameEnd}, //a
{13, token_t::StructBegin},
{15, token_t::StructMemberBegin}, {15, token_t::FieldNameBegin}, {17, token_t::FieldNameEnd}, {21, token_t::ValueBegin}, {22, token_t::ValueEnd}, {22, token_t::StructMemberEnd}, //a.y
{24, token_t::StructMemberBegin}, {24, token_t::FieldNameBegin}, {26, token_t::FieldNameEnd}, {29, token_t::ListBegin}, {30, token_t::ListEnd}, {32, token_t::StructMemberEnd}, //a.z
{32, token_t::StructEnd},
{33, token_t::StructMemberEnd},
{33, token_t::StructEnd},
{36, token_t::StructBegin},
{38, token_t::StructMemberBegin}, {38, token_t::FieldNameBegin}, {40, token_t::FieldNameEnd}, //a
{44, token_t::StructBegin},
{46, token_t::StructMemberBegin}, {46, token_t::FieldNameBegin}, {48, token_t::FieldNameEnd}, {52, token_t::ValueBegin}, {53, token_t::ValueEnd}, {53, token_t::StructMemberEnd}, //a.x
{55, token_t::StructMemberBegin}, {55, token_t::FieldNameBegin}, {57, token_t::FieldNameEnd}, {60, token_t::ValueBegin}, {61, token_t::ValueEnd}, {61, token_t::StructMemberEnd}, //a.y
{61, token_t::StructEnd},
{62, token_t::StructMemberEnd},
{64, token_t::StructMemberBegin}, {64, token_t::FieldNameBegin}, {66, token_t::FieldNameEnd}, //b
{70, token_t::StructBegin},
{71, token_t::StructMemberBegin}, {71, token_t::FieldNameBegin}, {73, token_t::FieldNameEnd}, {76, token_t::ValueBegin}, {78, token_t::ValueEnd}, {79, token_t::StructMemberEnd}, //b.x
{81, token_t::StructMemberBegin}, {81, token_t::FieldNameBegin}, {83, token_t::FieldNameEnd}, {86, token_t::ValueBegin}, {88, token_t::ValueEnd}, {89, token_t::StructMemberEnd}, //b.z
{89, token_t::StructEnd},
{90, token_t::StructMemberEnd},
{90, token_t::StructEnd},
{91, token_t::ListEnd}};
// clang-format on
// Verify the number of tokens matches
ASSERT_EQ(golden_token_stream.size(), tokens_gpu.size());
ASSERT_EQ(golden_token_stream.size(), token_indices_gpu.size());
for (std::size_t i = 0; i < tokens_gpu.size(); i++) {
// Ensure the index the tokens are pointing to do match
EXPECT_EQ(golden_token_stream[i].first, token_indices_gpu[i]) << "Mismatch at #" << i;
// Ensure the token category is correct
EXPECT_EQ(golden_token_stream[i].second, tokens_gpu[i]) << "Mismatch at #" << i;
}
}
struct JsonParserTest : public cudf::test::BaseFixture, public testing::WithParamInterface<bool> {};
INSTANTIATE_TEST_SUITE_P(Experimental, JsonParserTest, testing::Bool());
TEST_P(JsonParserTest, ExtractColumn)
{
using cuio_json::SymbolT;
bool const is_full_gpu = GetParam();
auto json_parser = is_full_gpu ? cuio_json::detail::device_parse_nested_json
: cuio_json::detail::host_parse_nested_json;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
// Default parsing options
cudf::io::json_reader_options default_options{};
std::string const input = R"( [{"a":0.0, "b":1.0}, {"a":0.1, "b":1.1}, {"a":0.2, "b":1.2}] )";
auto const d_input = cudf::detail::make_device_uvector_async(
cudf::host_span<char const>{input.c_str(), input.size()},
stream,
rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto const cudf_table = json_parser(d_input, default_options, stream, mr);
auto const expected_col_count = 2;
EXPECT_EQ(cudf_table.tbl->num_columns(), expected_col_count);
auto expected_col1 =
cudf::test::fixed_width_column_wrapper<double>({0.0, 0.1, 0.2}, {true, true, true});
auto expected_col2 =
cudf::test::fixed_width_column_wrapper<double>({1.0, 1.1, 1.2}, {true, true, true});
cudf::column_view parsed_col1 = cudf_table.tbl->get_column(0);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_col1, parsed_col1);
cudf::column_view parsed_col2 = cudf_table.tbl->get_column(1);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_col2, parsed_col2);
}
TEST_F(JsonTest, RecoveringTokenStream)
{
// Test input. Inline comments used to indicate character indexes
// 012345678 <= line 0
std::string const input = R"({"a":2 {})"
// 9
"\n"
// 01234 <= line 1
R"({"a":)"
// 5
"\n"
// 67890123456789 <= line 2
R"({"a":{"a":[321)"
// 0
"\n"
// 123456789 <= line 3
R"({"a":[1]})"
// 0
"\n"
// 1 <= line 4
"\n"
// 23456789 <= line 5
R"({"b":123})";
// Golden token stream sample
using token_t = cuio_json::token_t;
std::vector<std::pair<std::size_t, cuio_json::PdaTokenT>> const golden_token_stream = {
// Line 0 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 1 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 2 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 3 (valid)
{31, token_t::StructBegin},
{32, token_t::StructMemberBegin},
{32, token_t::FieldNameBegin},
{34, token_t::FieldNameEnd},
{36, token_t::ListBegin},
{37, token_t::ValueBegin},
{38, token_t::ValueEnd},
{38, token_t::ListEnd},
{39, token_t::StructMemberEnd},
{39, token_t::StructEnd},
// Line 4 (empty)
// Line 5 (valid)
{42, token_t::StructBegin},
{43, token_t::StructMemberBegin},
{43, token_t::FieldNameBegin},
{45, token_t::FieldNameEnd},
{47, token_t::ValueBegin},
{50, token_t::ValueEnd},
{50, token_t::StructMemberEnd},
{50, token_t::StructEnd}};
auto const stream = cudf::get_default_stream();
// Default parsing options
cudf::io::json_reader_options default_options{};
default_options.set_recovery_mode(cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL);
default_options.enable_lines(true);
// Prepare input & output buffers
cudf::string_scalar const d_scalar(input, true, stream);
auto const d_input = cudf::device_span<cuio_json::SymbolT const>{
d_scalar.data(), static_cast<size_t>(d_scalar.size())};
// Parse the JSON and get the token stream
auto [d_tokens_gpu, d_token_indices_gpu] = cuio_json::detail::get_token_stream(
d_input, default_options, stream, rmm::mr::get_current_device_resource());
// Copy back the number of tokens that were written
auto const tokens_gpu = cudf::detail::make_std_vector_async(d_tokens_gpu, stream);
auto const token_indices_gpu = cudf::detail::make_std_vector_async(d_token_indices_gpu, stream);
// Verify the number of tokens matches
ASSERT_EQ(golden_token_stream.size(), tokens_gpu.size());
ASSERT_EQ(golden_token_stream.size(), token_indices_gpu.size());
for (std::size_t i = 0; i < tokens_gpu.size(); i++) {
// Ensure the index the tokens are pointing to do match
EXPECT_EQ(golden_token_stream[i].first, token_indices_gpu[i]) << "Mismatch at #" << i;
// Ensure the token category is correct
EXPECT_EQ(golden_token_stream[i].second, tokens_gpu[i]) << "Mismatch at #" << i;
}
}
TEST_F(JsonTest, PostProcessTokenStream)
{
// Golden token stream sample
using token_t = cuio_json::token_t;
using token_index_t = cuio_json::SymbolOffsetT;
using tuple_t = thrust::tuple<token_index_t, cuio_json::PdaTokenT>;
std::vector<tuple_t> const input = {// Line 0 (invalid)
{0, token_t::LineEnd},
{0, token_t::StructBegin},
{1, token_t::StructMemberBegin},
{1, token_t::FieldNameBegin},
{3, token_t::FieldNameEnd},
{5, token_t::ValueBegin},
{7, token_t::ValueEnd},
{7, token_t::StructMemberEnd},
{7, token_t::StructEnd},
{8, token_t::ErrorBegin},
{9, token_t::LineEnd},
// Line 1
{10, token_t::StructBegin},
{11, token_t::StructMemberBegin},
{11, token_t::FieldNameBegin},
{13, token_t::FieldNameEnd},
{15, token_t::LineEnd},
// Line 2 (invalid)
{16, token_t::StructBegin},
{17, token_t::StructMemberBegin},
{17, token_t::FieldNameBegin},
{19, token_t::FieldNameEnd},
{21, token_t::StructBegin},
{22, token_t::StructMemberBegin},
{22, token_t::FieldNameBegin},
{24, token_t::FieldNameEnd},
{26, token_t::ListBegin},
{27, token_t::ValueBegin},
{29, token_t::ErrorBegin},
{30, token_t::LineEnd},
// Line 3 (invalid)
{31, token_t::StructBegin},
{32, token_t::StructMemberBegin},
{32, token_t::FieldNameBegin},
{34, token_t::FieldNameEnd},
{36, token_t::ListBegin},
{37, token_t::ValueBegin},
{38, token_t::ValueEnd},
{38, token_t::ListEnd},
{39, token_t::StructMemberEnd},
{39, token_t::StructEnd},
{40, token_t::ErrorBegin},
{40, token_t::LineEnd},
// Line 4
{41, token_t::LineEnd},
// Line 5
{42, token_t::StructBegin},
{43, token_t::StructMemberBegin},
{43, token_t::FieldNameBegin},
{45, token_t::FieldNameEnd},
{47, token_t::ValueBegin},
{50, token_t::ValueEnd},
{50, token_t::StructMemberEnd},
{50, token_t::StructEnd}};
std::vector<tuple_t> const expected_output = {// Line 0 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 1
{10, token_t::StructBegin},
{11, token_t::StructMemberBegin},
{11, token_t::FieldNameBegin},
{13, token_t::FieldNameEnd},
// Line 2 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 3 (invalid)
{0, token_t::StructBegin},
{0, token_t::StructEnd},
// Line 4 (empty)
// Line 5
{42, token_t::StructBegin},
{43, token_t::StructMemberBegin},
{43, token_t::FieldNameBegin},
{45, token_t::FieldNameEnd},
{47, token_t::ValueBegin},
{50, token_t::ValueEnd},
{50, token_t::StructMemberEnd},
{50, token_t::StructEnd}};
// Decompose tuples
auto const stream = cudf::get_default_stream();
std::vector<token_index_t> offsets(input.size());
std::vector<cuio_json::PdaTokenT> tokens(input.size());
auto token_tuples = thrust::make_zip_iterator(offsets.begin(), tokens.begin());
thrust::copy(input.cbegin(), input.cend(), token_tuples);
// Initialize device-side test data
auto const d_offsets = cudf::detail::make_device_uvector_async(
cudf::host_span<token_index_t const>{offsets.data(), offsets.size()},
stream,
rmm::mr::get_current_device_resource());
auto const d_tokens =
cudf::detail::make_device_uvector_async(tokens, stream, rmm::mr::get_current_device_resource());
// Run system-under-test
auto [d_filtered_tokens, d_filtered_indices] =
cuio_json::detail::process_token_stream(d_tokens, d_offsets, stream);
auto const filtered_tokens = cudf::detail::make_std_vector_async(d_filtered_tokens, stream);
auto const filtered_indices = cudf::detail::make_std_vector_async(d_filtered_indices, stream);
// Verify the number of tokens matches
ASSERT_EQ(filtered_tokens.size(), expected_output.size());
ASSERT_EQ(filtered_indices.size(), expected_output.size());
for (std::size_t i = 0; i < filtered_tokens.size(); i++) {
// Ensure the index the tokens are pointing to do match
EXPECT_EQ(thrust::get<0>(expected_output[i]), filtered_indices[i]) << "Mismatch at #" << i;
// Ensure the token category is correct
EXPECT_EQ(thrust::get<1>(expected_output[i]), filtered_tokens[i]) << "Mismatch at #" << i;
}
}
TEST_P(JsonParserTest, UTF_JSON)
{
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
bool const is_full_gpu = GetParam();
auto json_parser = is_full_gpu ? cuio_json::detail::device_parse_nested_json
: cuio_json::detail::host_parse_nested_json;
// Default parsing options
cudf::io::json_reader_options default_options{};
// Only ASCII string
std::string const ascii_pass = R"([
{"a":1,"b":2,"c":[3], "d": {}},
{"a":1,"b":4.0,"c":[], "d": {"year":1882,"author": "Bharathi"}},
{"a":1,"b":6.0,"c":[5, 7], "d": null},
{"a":1,"b":8.0,"c":null, "d": {}},
{"a":1,"b":null,"c":null},
{"a":1,"b":Infinity,"c":[null], "d": {"year":-600,"author": "Kaniyan"}}])";
auto const d_ascii_pass = cudf::detail::make_device_uvector_sync(
cudf::host_span<char const>{ascii_pass.c_str(), ascii_pass.size()},
stream,
rmm::mr::get_current_device_resource());
CUDF_EXPECT_NO_THROW(json_parser(d_ascii_pass, default_options, stream, mr));
// utf-8 string that fails parsing.
std::string const utf_failed = R"([
{"a":1,"b":2,"c":[3], "d": {}},
{"a":1,"b":4.0,"c":[], "d": {"year":1882,"author": "Bharathi"}},
{"a":1,"b":6.0,"c":[5, 7], "d": null},
{"a":1,"b":8.0,"c":null, "d": {}},
{"a":1,"b":null,"c":null},
{"a":1,"b":Infinity,"c":[null], "d": {"year":-600,"author": "filip ʒakotɛ"}}])";
auto const d_utf_failed = cudf::detail::make_device_uvector_sync(
cudf::host_span<char const>{utf_failed.c_str(), utf_failed.size()},
stream,
rmm::mr::get_current_device_resource());
CUDF_EXPECT_NO_THROW(json_parser(d_utf_failed, default_options, stream, mr));
// utf-8 string that passes parsing.
std::string const utf_pass = R"([
{"a":1,"b":2,"c":[3], "d": {}},
{"a":1,"b":4.0,"c":[], "d": {"year":1882,"author": "Bharathi"}},
{"a":1,"b":6.0,"c":[5, 7], "d": null},
{"a":1,"b":8.0,"c":null, "d": {}},
{"a":1,"b":null,"c":null},
{"a":1,"b":Infinity,"c":[null], "d": {"year":-600,"author": "Kaniyan"}},
{"a":1,"b":NaN,"c":[null, null], "d": {"year": 2, "author": "filip ʒakotɛ"}}])";
auto const d_utf_pass = cudf::detail::make_device_uvector_sync(
cudf::host_span<char const>{utf_pass.c_str(), utf_pass.size()},
stream,
rmm::mr::get_current_device_resource());
CUDF_EXPECT_NO_THROW(json_parser(d_utf_pass, default_options, stream, mr));
}
TEST_P(JsonParserTest, ExtractColumnWithQuotes)
{
using cuio_json::SymbolT;
bool const is_full_gpu = GetParam();
auto json_parser = is_full_gpu ? cuio_json::detail::device_parse_nested_json
: cuio_json::detail::host_parse_nested_json;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
// Default parsing options
cudf::io::json_reader_options options{};
options.enable_keep_quotes(true);
std::string const input = R"( [{"a":"0.0", "b":1.0}, {"b":1.1}, {"b":2.1, "a":"2.0"}] )";
auto const d_input = cudf::detail::make_device_uvector_async(
cudf::host_span<char const>{input.c_str(), input.size()},
stream,
rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto const cudf_table = json_parser(d_input, options, stream, mr);
auto constexpr expected_col_count = 2;
EXPECT_EQ(cudf_table.tbl->num_columns(), expected_col_count);
auto expected_col1 =
cudf::test::strings_column_wrapper({R"("0.0")", R"()", R"("2.0")"}, {true, false, true});
auto expected_col2 =
cudf::test::fixed_width_column_wrapper<double>({1.0, 1.1, 2.1}, {true, true, true});
cudf::column_view parsed_col1 = cudf_table.tbl->get_column(0);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_col1, parsed_col1);
cudf::column_view parsed_col2 = cudf_table.tbl->get_column(1);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected_col2, parsed_col2);
}
TEST_P(JsonParserTest, ExpectFailMixStructAndList)
{
using cuio_json::SymbolT;
bool const is_full_gpu = GetParam();
auto json_parser = is_full_gpu ? cuio_json::detail::device_parse_nested_json
: cuio_json::detail::host_parse_nested_json;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
// Default parsing options
cudf::io::json_reader_options options{};
options.enable_keep_quotes(true);
std::vector<std::string> const inputs_fail{
R"( [{"a":[123], "b":1.0}, {"b":1.1}, {"b":2.1, "a":{"0":123}}] )",
R"( [{"a":{"0":"foo"}, "b":1.0}, {"b":1.1}, {"b":2.1, "a":[123]}] )",
R"( [{"a":{"0":null}, "b":1.0}, {"b":1.1}, {"b":2.1, "a":[123]}] )"};
std::vector<std::string> const inputs_succeed{
R"( [{"a":[123, {"0": 123}], "b":1.0}, {"b":1.1}, {"b":2.1}] )",
R"( [{"a":[123, "123"], "b":1.0}, {"b":1.1}, {"b":2.1}] )"};
// libcudf does not currently support a mix of lists and structs.
for (auto const& input : inputs_fail) {
auto const d_input = cudf::detail::make_device_uvector_async(
cudf::host_span<char const>{input.c_str(), input.size()},
stream,
rmm::mr::get_current_device_resource());
EXPECT_THROW(auto const cudf_table = json_parser(d_input, options, stream, mr),
cudf::logic_error);
}
for (auto const& input : inputs_succeed) {
auto const d_input = cudf::detail::make_device_uvector_async(
cudf::host_span<char const>{input.c_str(), input.size()},
stream,
rmm::mr::get_current_device_resource());
CUDF_EXPECT_NO_THROW(auto const cudf_table = json_parser(d_input, options, stream, mr));
}
}
TEST_P(JsonParserTest, EmptyString)
{
using cuio_json::SymbolT;
bool const is_full_gpu = GetParam();
auto json_parser = is_full_gpu ? cuio_json::detail::device_parse_nested_json
: cuio_json::detail::host_parse_nested_json;
// Prepare cuda stream for data transfers & kernels
auto const stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
// Default parsing options
cudf::io::json_reader_options default_options{};
std::string const input = R"([])";
auto const d_input =
cudf::detail::make_device_uvector_sync(cudf::host_span<char const>{input.c_str(), input.size()},
stream,
rmm::mr::get_current_device_resource());
// Get the JSON's tree representation
auto const cudf_table = json_parser(d_input, default_options, stream, mr);
auto const expected_col_count = 0;
EXPECT_EQ(cudf_table.tbl->num_columns(), expected_col_count);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/parquet_test.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/io_metadata_utilities.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/io/data_sink.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/io/parquet_metadata.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/transform.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <src/io/parquet/compact_protocol_reader.hpp>
#include <src/io/parquet/parquet.hpp>
#include <src/io/parquet/parquet_gpu.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <fstream>
#include <random>
#include <type_traits>
template <typename T, typename SourceElementT = T>
using column_wrapper =
typename std::conditional<std::is_same_v<T, cudf::string_view>,
cudf::test::strings_column_wrapper,
cudf::test::fixed_width_column_wrapper<T, SourceElementT>>::type;
using column = cudf::column;
using table = cudf::table;
using table_view = cudf::table_view;
// Global environment for temporary files
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
template <typename T, typename Elements>
std::unique_ptr<cudf::table> create_fixed_table(cudf::size_type num_columns,
cudf::size_type num_rows,
bool include_validity,
Elements elements)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
std::vector<cudf::test::fixed_width_column_wrapper<T>> src_cols(num_columns);
for (int idx = 0; idx < num_columns; idx++) {
if (include_validity) {
src_cols[idx] =
cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows, valids);
} else {
src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows);
}
}
std::vector<std::unique_ptr<cudf::column>> columns(num_columns);
std::transform(src_cols.begin(),
src_cols.end(),
columns.begin(),
[](cudf::test::fixed_width_column_wrapper<T>& in) {
auto ret = in.release();
// pre-cache the null count
[[maybe_unused]] auto const nulls = ret->has_nulls();
return ret;
});
return std::make_unique<cudf::table>(std::move(columns));
}
template <typename T>
std::unique_ptr<cudf::table> create_random_fixed_table(cudf::size_type num_columns,
cudf::size_type num_rows,
bool include_validity)
{
auto rand_elements =
cudf::detail::make_counting_transform_iterator(0, [](T i) { return rand(); });
return create_fixed_table<T>(num_columns, num_rows, include_validity, rand_elements);
}
template <typename T>
std::unique_ptr<cudf::table> create_compressible_fixed_table(cudf::size_type num_columns,
cudf::size_type num_rows,
cudf::size_type period,
bool include_validity)
{
auto compressible_elements =
cudf::detail::make_counting_transform_iterator(0, [period](T i) { return i / period; });
return create_fixed_table<T>(num_columns, num_rows, include_validity, compressible_elements);
}
// this function replicates the "list_gen" function in
// python/cudf/cudf/tests/test_parquet.py
template <typename T>
std::unique_ptr<cudf::column> make_parquet_list_list_col(
int skip_rows, int num_rows, int lists_per_row, int list_size, bool include_validity)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0 ? 1 : 0; });
// root list
std::vector<int> row_offsets(num_rows + 1);
int row_offset_count = 0;
{
int offset = 0;
for (int idx = 0; idx < (num_rows) + 1; idx++) {
row_offsets[row_offset_count] = offset;
if (!include_validity || valids[idx]) { offset += lists_per_row; }
row_offset_count++;
}
}
cudf::test::fixed_width_column_wrapper<int> offsets(row_offsets.begin(),
row_offsets.begin() + row_offset_count);
// child list
std::vector<int> child_row_offsets((num_rows * lists_per_row) + 1);
int child_row_offset_count = 0;
{
int offset = 0;
for (int idx = 0; idx < (num_rows * lists_per_row); idx++) {
int row_index = idx / lists_per_row;
if (include_validity && !valids[row_index]) { continue; }
child_row_offsets[child_row_offset_count] = offset;
offset += list_size;
child_row_offset_count++;
}
child_row_offsets[child_row_offset_count++] = offset;
}
cudf::test::fixed_width_column_wrapper<int> child_offsets(
child_row_offsets.begin(), child_row_offsets.begin() + child_row_offset_count);
// child values
std::vector<T> child_values(num_rows * lists_per_row * list_size);
T first_child_value_index = skip_rows * lists_per_row * list_size;
int child_value_count = 0;
{
for (int idx = 0; idx < (num_rows * lists_per_row * list_size); idx++) {
int row_index = idx / (lists_per_row * list_size);
int val = first_child_value_index;
first_child_value_index++;
if (include_validity && !valids[row_index]) { continue; }
child_values[child_value_count] = val;
child_value_count++;
}
}
// validity by value instead of index
auto valids2 = cudf::detail::make_counting_transform_iterator(
0, [list_size](auto i) { return (i % list_size) % 2 == 0 ? 1 : 0; });
auto child_data = include_validity
? cudf::test::fixed_width_column_wrapper<T>(
child_values.begin(), child_values.begin() + child_value_count, valids2)
: cudf::test::fixed_width_column_wrapper<T>(
child_values.begin(), child_values.begin() + child_value_count);
int child_offsets_size = static_cast<cudf::column_view>(child_offsets).size() - 1;
auto child = cudf::make_lists_column(
child_offsets_size, child_offsets.release(), child_data.release(), 0, rmm::device_buffer{});
int offsets_size = static_cast<cudf::column_view>(offsets).size() - 1;
auto [null_mask, null_count] = cudf::test::detail::make_null_mask(valids, valids + offsets_size);
return include_validity
? cudf::make_lists_column(
offsets_size, offsets.release(), std::move(child), null_count, std::move(null_mask))
: cudf::make_lists_column(
offsets_size, offsets.release(), std::move(child), 0, rmm::device_buffer{});
}
// given a datasource pointing to a parquet file, read the footer
// of the file to populate the FileMetaData pointed to by file_meta_data.
// throws cudf::logic_error if the file or metadata is invalid.
void read_footer(std::unique_ptr<cudf::io::datasource> const& source,
cudf::io::parquet::detail::FileMetaData* file_meta_data)
{
constexpr auto header_len = sizeof(cudf::io::parquet::detail::file_header_s);
constexpr auto ender_len = sizeof(cudf::io::parquet::detail::file_ender_s);
auto const len = source->size();
auto const header_buffer = source->host_read(0, header_len);
auto const header =
reinterpret_cast<cudf::io::parquet::detail::file_header_s const*>(header_buffer->data());
auto const ender_buffer = source->host_read(len - ender_len, ender_len);
auto const ender =
reinterpret_cast<cudf::io::parquet::detail::file_ender_s const*>(ender_buffer->data());
// checks for valid header, footer, and file length
ASSERT_GT(len, header_len + ender_len);
ASSERT_TRUE(header->magic == cudf::io::parquet::detail::parquet_magic &&
ender->magic == cudf::io::parquet::detail::parquet_magic);
ASSERT_TRUE(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len));
// parquet files end with 4-byte footer_length and 4-byte magic == "PAR1"
// seek backwards from the end of the file (footer_length + 8 bytes of ender)
auto const footer_buffer =
source->host_read(len - ender->footer_len - ender_len, ender->footer_len);
cudf::io::parquet::detail::CompactProtocolReader cp(footer_buffer->data(), ender->footer_len);
// returns true on success
bool res = cp.read(file_meta_data);
ASSERT_TRUE(res);
}
// returns the number of bits used for dictionary encoding data at the given page location.
// this assumes the data is uncompressed.
// throws cudf::logic_error if the page_loc data is invalid.
int read_dict_bits(std::unique_ptr<cudf::io::datasource> const& source,
cudf::io::parquet::detail::PageLocation const& page_loc)
{
CUDF_EXPECTS(page_loc.offset > 0, "Cannot find page header");
CUDF_EXPECTS(page_loc.compressed_page_size > 0, "Invalid page header length");
cudf::io::parquet::detail::PageHeader page_hdr;
auto const page_buf = source->host_read(page_loc.offset, page_loc.compressed_page_size);
cudf::io::parquet::detail::CompactProtocolReader cp(page_buf->data(), page_buf->size());
bool res = cp.read(&page_hdr);
CUDF_EXPECTS(res, "Cannot parse page header");
// cp should be pointing at the start of page data now. the first byte
// should be the encoding bit size
return cp.getb();
}
// read column index from datasource at location indicated by chunk,
// parse and return as a ColumnIndex struct.
// throws cudf::logic_error if the chunk data is invalid.
cudf::io::parquet::detail::ColumnIndex read_column_index(
std::unique_ptr<cudf::io::datasource> const& source,
cudf::io::parquet::detail::ColumnChunk const& chunk)
{
CUDF_EXPECTS(chunk.column_index_offset > 0, "Cannot find column index");
CUDF_EXPECTS(chunk.column_index_length > 0, "Invalid column index length");
cudf::io::parquet::detail::ColumnIndex colidx;
auto const ci_buf = source->host_read(chunk.column_index_offset, chunk.column_index_length);
cudf::io::parquet::detail::CompactProtocolReader cp(ci_buf->data(), ci_buf->size());
bool res = cp.read(&colidx);
CUDF_EXPECTS(res, "Cannot parse column index");
return colidx;
}
// read offset index from datasource at location indicated by chunk,
// parse and return as an OffsetIndex struct.
// throws cudf::logic_error if the chunk data is invalid.
cudf::io::parquet::detail::OffsetIndex read_offset_index(
std::unique_ptr<cudf::io::datasource> const& source,
cudf::io::parquet::detail::ColumnChunk const& chunk)
{
CUDF_EXPECTS(chunk.offset_index_offset > 0, "Cannot find offset index");
CUDF_EXPECTS(chunk.offset_index_length > 0, "Invalid offset index length");
cudf::io::parquet::detail::OffsetIndex offidx;
auto const oi_buf = source->host_read(chunk.offset_index_offset, chunk.offset_index_length);
cudf::io::parquet::detail::CompactProtocolReader cp(oi_buf->data(), oi_buf->size());
bool res = cp.read(&offidx);
CUDF_EXPECTS(res, "Cannot parse offset index");
return offidx;
}
// Return as a Statistics from the column chunk
cudf::io::parquet::detail::Statistics const& get_statistics(
cudf::io::parquet::detail::ColumnChunk const& chunk)
{
return chunk.meta_data.statistics;
}
// read page header from datasource at location indicated by page_loc,
// parse and return as a PageHeader struct.
// throws cudf::logic_error if the page_loc data is invalid.
cudf::io::parquet::detail::PageHeader read_page_header(
std::unique_ptr<cudf::io::datasource> const& source,
cudf::io::parquet::detail::PageLocation const& page_loc)
{
CUDF_EXPECTS(page_loc.offset > 0, "Cannot find page header");
CUDF_EXPECTS(page_loc.compressed_page_size > 0, "Invalid page header length");
cudf::io::parquet::detail::PageHeader page_hdr;
auto const page_buf = source->host_read(page_loc.offset, page_loc.compressed_page_size);
cudf::io::parquet::detail::CompactProtocolReader cp(page_buf->data(), page_buf->size());
bool res = cp.read(&page_hdr);
CUDF_EXPECTS(res, "Cannot parse page header");
return page_hdr;
}
// Base test fixture for tests
struct ParquetWriterTest : public cudf::test::BaseFixture {};
// Base test fixture for tests
struct ParquetReaderTest : public cudf::test::BaseFixture {};
// Base test fixture for "stress" tests
struct ParquetWriterStressTest : public cudf::test::BaseFixture {};
// Typed test fixture for numeric type tests
template <typename T>
struct ParquetWriterNumericTypeTest : public ParquetWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Typed test fixture for comparable type tests
template <typename T>
struct ParquetWriterComparableTypeTest : public ParquetWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Typed test fixture for timestamp type tests
template <typename T>
struct ParquetWriterChronoTypeTest : public ParquetWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Typed test fixture for timestamp type tests
template <typename T>
struct ParquetWriterTimestampTypeTest : public ParquetWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Typed test fixture for all types
template <typename T>
struct ParquetWriterSchemaTest : public ParquetWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
template <typename T>
struct ParquetReaderSourceTest : public ParquetReaderTest {};
template <typename T>
struct ParquetWriterDeltaTest : public ParquetWriterTest {};
// Declare typed test cases
// TODO: Replace with `NumericTypes` when unsigned support is added. Issue #5352
using SupportedTypes = cudf::test::Types<int8_t, int16_t, int32_t, int64_t, bool, float, double>;
TYPED_TEST_SUITE(ParquetWriterNumericTypeTest, SupportedTypes);
using ComparableAndFixedTypes =
cudf::test::Concat<cudf::test::ComparableTypes, cudf::test::FixedPointTypes>;
TYPED_TEST_SUITE(ParquetWriterComparableTypeTest, ComparableAndFixedTypes);
TYPED_TEST_SUITE(ParquetWriterChronoTypeTest, cudf::test::ChronoTypes);
using SupportedTimestampTypes =
cudf::test::Types<cudf::timestamp_ms, cudf::timestamp_us, cudf::timestamp_ns>;
TYPED_TEST_SUITE(ParquetWriterTimestampTypeTest, SupportedTimestampTypes);
TYPED_TEST_SUITE(ParquetWriterSchemaTest, cudf::test::AllTypes);
using ByteLikeTypes = cudf::test::Types<int8_t, char, uint8_t, unsigned char, std::byte>;
TYPED_TEST_SUITE(ParquetReaderSourceTest, ByteLikeTypes);
// Base test fixture for chunked writer tests
struct ParquetChunkedWriterTest : public cudf::test::BaseFixture {};
// Typed test fixture for numeric type tests
template <typename T>
struct ParquetChunkedWriterNumericTypeTest : public ParquetChunkedWriterTest {
auto type() { return cudf::data_type{cudf::type_to_id<T>()}; }
};
// Declare typed test cases
TYPED_TEST_SUITE(ParquetChunkedWriterNumericTypeTest, SupportedTypes);
// Base test fixture for size-parameterized tests
class ParquetSizedTest : public ::cudf::test::BaseFixtureWithParam<int> {};
// test the allowed bit widths for dictionary encoding
INSTANTIATE_TEST_SUITE_P(ParquetDictionaryTest,
ParquetSizedTest,
testing::Range(1, 25),
testing::PrintToStringParamName());
// Base test fixture for V2 header tests
class ParquetV2Test : public ::cudf::test::BaseFixtureWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(ParquetV2ReadWriteTest,
ParquetV2Test,
testing::Bool(),
testing::PrintToStringParamName());
namespace {
// Generates a vector of uniform random values of type T
template <typename T>
inline auto random_values(size_t size)
{
std::vector<T> values(size);
using T1 = T;
using uniform_distribution =
typename std::conditional_t<std::is_same_v<T1, bool>,
std::bernoulli_distribution,
std::conditional_t<std::is_floating_point_v<T1>,
std::uniform_real_distribution<T1>,
std::uniform_int_distribution<T1>>>;
static constexpr auto seed = 0xf00d;
static std::mt19937 engine{seed};
static uniform_distribution dist{};
std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; });
return values;
}
} // namespace
TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumn)
{
auto sequence =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i % 400); });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
constexpr auto num_rows = 800;
column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity);
auto expected = table_view{{col}};
auto filepath = temp_env->get_temp_filepath("SingleColumn.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumnWithNulls)
{
auto sequence =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return TypeParam(i); });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 2); });
constexpr auto num_rows = 100;
column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity);
auto expected = table_view{{col}};
auto filepath = temp_env->get_temp_filepath("SingleColumnWithNulls.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
template <typename mask_op_t>
void test_durations(mask_op_t mask_op)
{
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution_d(0, 30);
auto sequence_d = cudf::detail::make_counting_transform_iterator(
0, [&](auto i) { return distribution_d(generator); });
std::uniform_int_distribution<int> distribution_s(0, 86400);
auto sequence_s = cudf::detail::make_counting_transform_iterator(
0, [&](auto i) { return distribution_s(generator); });
std::uniform_int_distribution<int> distribution(0, 86400 * 1000);
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [&](auto i) { return distribution(generator); });
auto mask = cudf::detail::make_counting_transform_iterator(0, mask_op);
constexpr auto num_rows = 100;
// Durations longer than a day are not exactly valid, but cudf should be able to round trip
auto durations_d = cudf::test::fixed_width_column_wrapper<cudf::duration_D, int64_t>(
sequence_d, sequence_d + num_rows, mask);
auto durations_s = cudf::test::fixed_width_column_wrapper<cudf::duration_s, int64_t>(
sequence_s, sequence_s + num_rows, mask);
auto durations_ms = cudf::test::fixed_width_column_wrapper<cudf::duration_ms, int64_t>(
sequence, sequence + num_rows, mask);
auto durations_us = cudf::test::fixed_width_column_wrapper<cudf::duration_us, int64_t>(
sequence, sequence + num_rows, mask);
auto durations_ns = cudf::test::fixed_width_column_wrapper<cudf::duration_ns, int64_t>(
sequence, sequence + num_rows, mask);
auto expected = table_view{{durations_d, durations_s, durations_ms, durations_us, durations_ns}};
auto filepath = temp_env->get_temp_filepath("Durations.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
auto durations_d_got =
cudf::cast(result.tbl->view().column(0), cudf::data_type{cudf::type_id::DURATION_DAYS});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(durations_d, durations_d_got->view());
auto durations_s_got =
cudf::cast(result.tbl->view().column(1), cudf::data_type{cudf::type_id::DURATION_SECONDS});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(durations_s, durations_s_got->view());
CUDF_TEST_EXPECT_COLUMNS_EQUAL(durations_ms, result.tbl->view().column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(durations_us, result.tbl->view().column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(durations_ns, result.tbl->view().column(4));
}
TEST_F(ParquetWriterTest, Durations)
{
test_durations([](auto i) { return true; });
test_durations([](auto i) { return (i % 2) != 0; });
test_durations([](auto i) { return (i % 3) != 0; });
test_durations([](auto i) { return false; });
}
TYPED_TEST(ParquetWriterTimestampTypeTest, Timestamps)
{
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return ((std::rand() / 10000) * 1000); });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(
sequence, sequence + num_rows, validity);
auto expected = table_view{{col}};
auto filepath = temp_env->get_temp_filepath("Timestamps.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.timestamp_type(this->type());
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampsWithNulls)
{
auto sequence = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return ((std::rand() / 10000) * 1000); });
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i > 30) && (i < 60); });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(
sequence, sequence + num_rows, validity);
auto expected = table_view{{col}};
auto filepath = temp_env->get_temp_filepath("TimestampsWithNulls.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.timestamp_type(this->type());
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampOverflow)
{
constexpr int64_t max = std::numeric_limits<int64_t>::max();
auto sequence = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return max - i; });
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
constexpr auto num_rows = 100;
column_wrapper<TypeParam, typename decltype(sequence)::value_type> col(
sequence, sequence + num_rows, validity);
table_view expected({col});
auto filepath = temp_env->get_temp_filepath("ParquetTimestampOverflow.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.timestamp_type(this->type());
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_P(ParquetV2Test, MultiColumn)
{
constexpr auto num_rows = 100000;
auto const is_v2 = GetParam();
// auto col0_data = random_values<bool>(num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<float>(num_rows);
auto col5_data = random_values<double>(num_rows);
auto col6_vals = random_values<int16_t>(num_rows);
auto col7_vals = random_values<int32_t>(num_rows);
auto col8_vals = random_values<int64_t>(num_rows);
auto col6_data = cudf::detail::make_counting_transform_iterator(0, [col6_vals](auto i) {
return numeric::decimal32{col6_vals[i], numeric::scale_type{5}};
});
auto col7_data = cudf::detail::make_counting_transform_iterator(0, [col7_vals](auto i) {
return numeric::decimal64{col7_vals[i], numeric::scale_type{-5}};
});
auto col8_data = cudf::detail::make_counting_transform_iterator(0, [col8_vals](auto i) {
return numeric::decimal128{col8_vals[i], numeric::scale_type{-6}};
});
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
// column_wrapper<bool> col0{
// col0_data.begin(), col0_data.end(), validity};
column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity};
column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity};
column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity};
column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity};
column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity};
column_wrapper<numeric::decimal32> col6{col6_data, col6_data + num_rows, validity};
column_wrapper<numeric::decimal64> col7{col7_data, col7_data + num_rows, validity};
column_wrapper<numeric::decimal128> col8{col8_data, col8_data + num_rows, validity};
auto expected = table_view{{col1, col2, col3, col4, col5, col6, col7, col8}};
cudf::io::table_input_metadata expected_metadata(expected);
// expected_metadata.column_metadata[0].set_name( "bools");
expected_metadata.column_metadata[0].set_name("int8s");
expected_metadata.column_metadata[1].set_name("int16s");
expected_metadata.column_metadata[2].set_name("int32s");
expected_metadata.column_metadata[3].set_name("floats");
expected_metadata.column_metadata[4].set_name("doubles");
expected_metadata.column_metadata[5].set_name("decimal32s").set_decimal_precision(10);
expected_metadata.column_metadata[6].set_name("decimal64s").set_decimal_precision(20);
expected_metadata.column_metadata[7].set_name("decimal128s").set_decimal_precision(40);
auto filepath = temp_env->get_temp_filepath("MultiColumn.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_P(ParquetV2Test, MultiColumnWithNulls)
{
constexpr auto num_rows = 100;
auto const is_v2 = GetParam();
// auto col0_data = random_values<bool>(num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<float>(num_rows);
auto col5_data = random_values<double>(num_rows);
auto col6_vals = random_values<int32_t>(num_rows);
auto col7_vals = random_values<int64_t>(num_rows);
auto col6_data = cudf::detail::make_counting_transform_iterator(0, [col6_vals](auto i) {
return numeric::decimal32{col6_vals[i], numeric::scale_type{-2}};
});
auto col7_data = cudf::detail::make_counting_transform_iterator(0, [col7_vals](auto i) {
return numeric::decimal64{col7_vals[i], numeric::scale_type{-8}};
});
// auto col0_mask = cudf::detail::make_counting_transform_iterator(
// 0, [](auto i) { return (i % 2); });
auto col1_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i < 10); });
auto col2_mask = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
auto col3_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i == (num_rows - 1)); });
auto col4_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i >= 40 && i <= 60); });
auto col5_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i > 80); });
auto col6_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i % 5); });
auto col7_mask =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return (i != 55); });
// column_wrapper<bool> col0{
// col0_data.begin(), col0_data.end(), col0_mask};
column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), col1_mask};
column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), col2_mask};
column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), col3_mask};
column_wrapper<float> col4{col4_data.begin(), col4_data.end(), col4_mask};
column_wrapper<double> col5{col5_data.begin(), col5_data.end(), col5_mask};
column_wrapper<numeric::decimal32> col6{col6_data, col6_data + num_rows, col6_mask};
column_wrapper<numeric::decimal64> col7{col7_data, col7_data + num_rows, col7_mask};
auto expected = table_view{{/*col0, */ col1, col2, col3, col4, col5, col6, col7}};
cudf::io::table_input_metadata expected_metadata(expected);
// expected_metadata.column_names.emplace_back("bools");
expected_metadata.column_metadata[0].set_name("int8s");
expected_metadata.column_metadata[1].set_name("int16s");
expected_metadata.column_metadata[2].set_name("int32s");
expected_metadata.column_metadata[3].set_name("floats");
expected_metadata.column_metadata[4].set_name("doubles");
expected_metadata.column_metadata[5].set_name("decimal32s").set_decimal_precision(9);
expected_metadata.column_metadata[6].set_name("decimal64s").set_decimal_precision(20);
auto filepath = temp_env->get_temp_filepath("MultiColumnWithNulls.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// TODO: Need to be able to return metadata in tree form from reader so they can be compared.
// Unfortunately the closest thing to a hierarchical schema is column_name_info which does not
// have any tests for it c++ or python.
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_P(ParquetV2Test, Strings)
{
auto const is_v2 = GetParam();
std::vector<char const*> strings{
"Monday", "Wȅdnȅsday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
auto const num_rows = strings.size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity};
column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()};
column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity};
auto expected = table_view{{col0, col1, col2}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
expected_metadata.column_metadata[1].set_name("col_string");
expected_metadata.column_metadata[2].set_name("col_another");
auto filepath = temp_env->get_temp_filepath("Strings.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetWriterTest, StringsAsBinary)
{
std::vector<char const*> unicode_strings{
"Monday", "Wȅdnȅsday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
std::vector<char const*> ascii_strings{
"Monday", "Wednesday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
column_wrapper<cudf::string_view> col0{ascii_strings.begin(), ascii_strings.end()};
column_wrapper<cudf::string_view> col1{unicode_strings.begin(), unicode_strings.end()};
column_wrapper<cudf::string_view> col2{ascii_strings.begin(), ascii_strings.end()};
cudf::test::lists_column_wrapper<uint8_t> col3{{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'M', 'o', 'n', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'u', 'n', 'd', 'a', 'y'}};
cudf::test::lists_column_wrapper<uint8_t> col4{
{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 200, 133, 'd', 'n', 200, 133, 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'M', 'o', 'n', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'u', 'n', 'd', 'a', 'y'}};
auto write_tbl = table_view{{col0, col1, col2, col3, col4}};
cudf::io::table_input_metadata expected_metadata(write_tbl);
expected_metadata.column_metadata[0].set_name("col_single").set_output_as_binary(true);
expected_metadata.column_metadata[1].set_name("col_string").set_output_as_binary(true);
expected_metadata.column_metadata[2].set_name("col_another").set_output_as_binary(true);
expected_metadata.column_metadata[3].set_name("col_binary");
expected_metadata.column_metadata[4].set_name("col_binary");
auto filepath = temp_env->get_temp_filepath("BinaryStrings.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, write_tbl)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema(
{cudf::io::reader_column_schema().set_convert_binary_to_strings(false),
cudf::io::reader_column_schema().set_convert_binary_to_strings(false),
cudf::io::reader_column_schema().set_convert_binary_to_strings(false),
cudf::io::reader_column_schema().add_child(cudf::io::reader_column_schema()),
cudf::io::reader_column_schema().add_child(cudf::io::reader_column_schema())});
auto result = cudf::io::read_parquet(in_opts);
auto expected = table_view{{col3, col4, col3, col3, col4}};
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_P(ParquetV2Test, SlicedTable)
{
// This test checks for writing zero copy, offsetted views into existing cudf tables
std::vector<char const*> strings{
"Monday", "Wȅdnȅsday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
auto const num_rows = strings.size();
auto const is_v2 = GetParam();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 3 != 0; });
column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity};
column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()};
column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity};
using lcw = cudf::test::lists_column_wrapper<uint64_t>;
lcw col3{{9, 8}, {7, 6, 5}, {}, {4}, {3, 2, 1, 0}, {20, 21, 22, 23, 24}, {}, {66, 666}};
// [[[NULL,2,NULL,4]], [[NULL,6,NULL], [8,9]]]
// [NULL, [[13],[14,15,16]], NULL]
// [NULL, [], NULL, [[]]]
// NULL
// [[[NULL,2,NULL,4]], [[NULL,6,NULL], [8,9]]]
// [NULL, [[13],[14,15,16]], NULL]
// [[[]]]
// [NULL, [], NULL, [[]]]
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
lcw col4{{
{{{{1, 2, 3, 4}, valids}}, {{{5, 6, 7}, valids}, {8, 9}}},
{{{{10, 11}, {12}}, {{13}, {14, 15, 16}}, {{17, 18}}}, valids},
{{lcw{lcw{}}, lcw{}, lcw{}, lcw{lcw{}}}, valids},
lcw{lcw{lcw{}}},
{{{{1, 2, 3, 4}, valids}}, {{{5, 6, 7}, valids}, {8, 9}}},
{{{{10, 11}, {12}}, {{13}, {14, 15, 16}}, {{17, 18}}}, valids},
lcw{lcw{lcw{}}},
{{lcw{lcw{}}, lcw{}, lcw{}, lcw{lcw{}}}, valids},
},
valids2};
// Struct column
auto ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{
{48, 27, 25, 31, 351, 351, 29, 15}, {1, 1, 1, 1, 1, 0, 1, 1}};
auto col5 = cudf::test::structs_column_wrapper{{ages_col}, {1, 1, 1, 1, 0, 1, 1, 1}};
// Struct/List mixed column
// []
// [NULL, 2, NULL]
// [4, 5]
// NULL
// []
// [7, 8, 9]
// [10]
// [11, 12]
lcw land{{{}, {{1, 2, 3}, valids}, {4, 5}, {}, {}, {7, 8, 9}, {10}, {11, 12}}, valids2};
// []
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8], []]
// [[]]
// [[]]
// [[], [], []]
// [[10]]
// [[13, 14], [15]]
lcw flats{lcw{},
{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}},
{{7, 8}, {}},
lcw{lcw{}},
lcw{lcw{}},
lcw{lcw{}, lcw{}, lcw{}},
{lcw{10}},
{{13, 14}, {15}}};
auto struct_1 = cudf::test::structs_column_wrapper{land, flats};
auto is_human = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, true, false, true, false}};
auto col6 = cudf::test::structs_column_wrapper{{is_human, struct_1}};
auto expected = table_view({col0, col1, col2, col3, col4, col5, col6});
// auto expected_slice = expected;
auto expected_slice = cudf::slice(expected, {2, static_cast<cudf::size_type>(num_rows) - 1});
cudf::io::table_input_metadata expected_metadata(expected_slice);
expected_metadata.column_metadata[0].set_name("col_other");
expected_metadata.column_metadata[1].set_name("col_string");
expected_metadata.column_metadata[2].set_name("col_another");
expected_metadata.column_metadata[3].set_name("col_list");
expected_metadata.column_metadata[4].set_name("col_multi_level_list");
expected_metadata.column_metadata[5].set_name("col_struct");
expected_metadata.column_metadata[5].set_name("col_struct_list");
expected_metadata.column_metadata[6].child(0).set_name("human?");
expected_metadata.column_metadata[6].child(1).set_name("particulars");
expected_metadata.column_metadata[6].child(1).child(0).set_name("land");
expected_metadata.column_metadata[6].child(1).child(1).set_name("flats");
auto filepath = temp_env->get_temp_filepath("SlicedTable.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected_slice)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(expected_slice, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_P(ParquetV2Test, ListColumn)
{
auto const is_v2 = GetParam();
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// [NULL, 2, NULL]
// []
// [4, 5]
// NULL
lcw col0{{{{1, 2, 3}, valids}, {}, {4, 5}, {}}, valids2};
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8]]
// []
// [[]]
lcw col1{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, {{7, 8}}, lcw{}, lcw{lcw{}}};
// [[1, 2, 3], [], [4, 5], NULL, [0, 6, 0]]
// [[7, 8]]
// []
// [[]]
lcw col2{{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, valids2}, {{7, 8}}, lcw{}, lcw{lcw{}}};
// [[1, 2, 3], [], [4, 5], NULL, [NULL, 6, NULL]]
// [[7, 8]]
// []
// [[]]
using dlcw = cudf::test::lists_column_wrapper<double>;
dlcw col3{{{{1., 2., 3.}, {}, {4., 5.}, {}, {{0., 6., 0.}, valids}}, valids2},
{{7., 8.}},
dlcw{},
dlcw{dlcw{}}};
// TODO: uint16_t lists are not read properly in parquet reader
// [[1, 2, 3], [], [4, 5], NULL, [0, 6, 0]]
// [[7, 8]]
// []
// NULL
// using ui16lcw = cudf::test::lists_column_wrapper<uint16_t>;
// cudf::test::lists_column_wrapper<uint16_t> col4{
// {{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, valids2}, {{7, 8}}, ui16lcw{}, ui16lcw{ui16lcw{}}},
// valids2};
// [[1, 2, 3], [], [4, 5], NULL, [NULL, 6, NULL]]
// [[7, 8]]
// []
// NULL
lcw col5{
{{{{1, 2, 3}, {}, {4, 5}, {}, {{0, 6, 0}, valids}}, valids2}, {{7, 8}}, lcw{}, lcw{lcw{}}},
valids2};
using strlcw = cudf::test::lists_column_wrapper<cudf::string_view>;
cudf::test::lists_column_wrapper<cudf::string_view> col6{
{{"Monday", "Monday", "Friday"}, {}, {"Monday", "Friday"}, {}, {"Sunday", "Funday"}},
{{"bee", "sting"}},
strlcw{},
strlcw{strlcw{}}};
// [[[NULL,2,NULL,4]], [[NULL,6,NULL], [8,9]]]
// [NULL, [[13],[14,15,16]], NULL]
// [NULL, [], NULL, [[]]]
// NULL
lcw col7{{
{{{{1, 2, 3, 4}, valids}}, {{{5, 6, 7}, valids}, {8, 9}}},
{{{{10, 11}, {12}}, {{13}, {14, 15, 16}}, {{17, 18}}}, valids},
{{lcw{lcw{}}, lcw{}, lcw{}, lcw{lcw{}}}, valids},
lcw{lcw{lcw{}}},
},
valids2};
table_view expected({col0, col1, col2, col3, /* col4, */ col5, col6, col7});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_list_int_0");
expected_metadata.column_metadata[1].set_name("col_list_list_int_1");
expected_metadata.column_metadata[2].set_name("col_list_list_int_nullable_2");
expected_metadata.column_metadata[3].set_name("col_list_list_nullable_double_nullable_3");
// expected_metadata.column_metadata[0].set_name("col_list_list_uint16_4");
expected_metadata.column_metadata[4].set_name("col_list_nullable_list_nullable_int_nullable_5");
expected_metadata.column_metadata[5].set_name("col_list_list_string_6");
expected_metadata.column_metadata[6].set_name("col_list_list_list_7");
auto filepath = temp_env->get_temp_filepath("ListColumn.parquet");
auto out_opts = cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(out_opts);
auto in_opts = cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetWriterTest, MultiIndex)
{
constexpr auto num_rows = 100;
auto col0_data = random_values<int8_t>(num_rows);
auto col1_data = random_values<int16_t>(num_rows);
auto col2_data = random_values<int32_t>(num_rows);
auto col3_data = random_values<float>(num_rows);
auto col4_data = random_values<double>(num_rows);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int8_t> col0{col0_data.begin(), col0_data.end(), validity};
column_wrapper<int16_t> col1{col1_data.begin(), col1_data.end(), validity};
column_wrapper<int32_t> col2{col2_data.begin(), col2_data.end(), validity};
column_wrapper<float> col3{col3_data.begin(), col3_data.end(), validity};
column_wrapper<double> col4{col4_data.begin(), col4_data.end(), validity};
auto expected = table_view{{col0, col1, col2, col3, col4}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("int8s");
expected_metadata.column_metadata[1].set_name("int16s");
expected_metadata.column_metadata[2].set_name("int32s");
expected_metadata.column_metadata[3].set_name("floats");
expected_metadata.column_metadata[4].set_name("doubles");
auto filepath = temp_env->get_temp_filepath("MultiIndex.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata)
.key_value_metadata(
{{{"pandas", "\"index_columns\": [\"int8s\", \"int16s\"], \"column1\": [\"int32s\"]"}}});
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.use_pandas_metadata(true)
.columns({"int32s", "floats", "doubles"});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetWriterTest, BufferSource)
{
constexpr auto num_rows = 100 << 10;
auto const seq_col = random_values<int>(num_rows);
auto const validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int> col{seq_col.begin(), seq_col.end(), validity};
auto const expected = table_view{{col}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_other");
std::vector<char> out_buffer;
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer), expected)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
// host buffer
{
cudf::io::parquet_reader_options in_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info(out_buffer.data(), out_buffer.size()));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
// device buffer
{
auto const d_input = cudf::detail::make_device_uvector_sync(
cudf::host_span<uint8_t const>{reinterpret_cast<uint8_t const*>(out_buffer.data()),
out_buffer.size()},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
auto const d_buffer = cudf::device_span<std::byte const>(
reinterpret_cast<std::byte const*>(d_input.data()), d_input.size());
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(d_buffer));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
}
TEST_F(ParquetWriterTest, ManyFragments)
{
srand(31337);
auto const expected = create_random_fixed_table<int>(10, 6'000'000, false);
auto const filepath = temp_env->get_temp_filepath("ManyFragments.parquet");
cudf::io::parquet_writer_options const args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected)
.max_page_size_bytes(8 * 1024);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options const read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetWriterTest, NonNullable)
{
srand(31337);
auto expected = create_random_fixed_table<int>(9, 9, false);
auto filepath = temp_env->get_temp_filepath("NonNullable.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetWriterTest, Struct)
{
// Struct<is_human:bool, Struct<names:string, ages:int>>
auto names = {"Samuel Vimes",
"Carrot Ironfoundersson",
"Angua von Uberwald",
"Cheery Littlebottom",
"Detritus",
"Mr Slant"};
// `Name` column has all valid values.
auto names_col = cudf::test::strings_column_wrapper{names.begin(), names.end()};
auto ages_col =
cudf::test::fixed_width_column_wrapper<int32_t>{{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto struct_1 = cudf::test::structs_column_wrapper{{names_col, ages_col}, {1, 1, 1, 1, 0, 1}};
auto is_human_col = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, false, false}, {1, 1, 0, 1, 1, 0}};
auto struct_2 =
cudf::test::structs_column_wrapper{{is_human_col, struct_1}, {0, 1, 1, 1, 1, 1}}.release();
auto expected = table_view({*struct_2});
auto filepath = temp_env->get_temp_filepath("Struct.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath));
cudf::io::read_parquet(read_args);
}
TEST_P(ParquetV2Test, StructOfList)
{
auto const is_v2 = GetParam();
// Struct<is_human:bool,
// Struct<weight:float,
// ages:int,
// land_unit:List<int>>,
// flats:List<List<int>>
// >
// >
auto weights_col = cudf::test::fixed_width_column_wrapper<float>{1.1, 2.4, 5.3, 8.0, 9.6, 6.9};
auto ages_col =
cudf::test::fixed_width_column_wrapper<int32_t>{{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// []
// [NULL, 2, NULL]
// [4, 5]
// NULL
// []
// [7, 8, 9]
lcw land_unit{{{}, {{1, 2, 3}, valids}, {4, 5}, {}, {}, {7, 8, 9}}, valids2};
// []
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8], []]
// [[]]
// [[]]
// [[], [], []]
lcw flats{lcw{},
{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}},
{{7, 8}, {}},
lcw{lcw{}},
lcw{lcw{}},
lcw{lcw{}, lcw{}, lcw{}}};
auto struct_1 = cudf::test::structs_column_wrapper{{weights_col, ages_col, land_unit, flats},
{1, 1, 1, 1, 0, 1}};
auto is_human_col = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, false, false}, {1, 1, 0, 1, 1, 0}};
auto struct_2 =
cudf::test::structs_column_wrapper{{is_human_col, struct_1}, {0, 1, 1, 1, 1, 1}}.release();
auto expected = table_view({*struct_2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("being");
expected_metadata.column_metadata[0].child(0).set_name("human?");
expected_metadata.column_metadata[0].child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).set_name("age");
expected_metadata.column_metadata[0].child(1).child(2).set_name("land_unit");
expected_metadata.column_metadata[0].child(1).child(3).set_name("flats");
auto filepath = temp_env->get_temp_filepath("StructOfList.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath));
auto const result = cudf::io::read_parquet(read_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_P(ParquetV2Test, ListOfStruct)
{
auto const is_v2 = GetParam();
// List<Struct<is_human:bool,
// Struct<weight:float,
// ages:int,
// >
// >
// >
auto weight_col = cudf::test::fixed_width_column_wrapper<float>{1.1, 2.4, 5.3, 8.0, 9.6, 6.9};
auto ages_col =
cudf::test::fixed_width_column_wrapper<int32_t>{{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto struct_1 = cudf::test::structs_column_wrapper{{weight_col, ages_col}, {1, 1, 1, 1, 0, 1}};
auto is_human_col = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, false, false}, {1, 1, 0, 1, 1, 0}};
auto struct_2 =
cudf::test::structs_column_wrapper{{is_human_col, struct_1}, {0, 1, 1, 1, 1, 1}}.release();
auto list_offsets_column =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 2, 5, 5, 6}.release();
auto num_list_rows = list_offsets_column->size() - 1;
auto list_col = cudf::make_lists_column(
num_list_rows, std::move(list_offsets_column), std::move(struct_2), 0, {});
auto expected = table_view({*list_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("family");
expected_metadata.column_metadata[0].child(1).child(0).set_name("human?");
expected_metadata.column_metadata[0].child(1).child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).child(1).set_name("age");
auto filepath = temp_env->get_temp_filepath("ListOfStruct.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(is_v2)
.metadata(expected_metadata);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath));
auto const result = cudf::io::read_parquet(read_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
// custom data sink that supports device writes. uses plain file io.
class custom_test_data_sink : public cudf::io::data_sink {
public:
explicit custom_test_data_sink(std::string const& filepath)
{
outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc);
CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file");
}
virtual ~custom_test_data_sink() { flush(); }
void host_write(void const* data, size_t size) override
{
outfile_.write(static_cast<char const*>(data), size);
}
[[nodiscard]] bool supports_device_write() const override { return true; }
void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override
{
this->device_write_async(gpu_data, size, stream).get();
}
std::future<void> device_write_async(void const* gpu_data,
size_t size,
rmm::cuda_stream_view stream) override
{
return std::async(std::launch::deferred, [=] {
char* ptr = nullptr;
CUDF_CUDA_TRY(cudaMallocHost(&ptr, size));
CUDF_CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDefault, stream.value()));
stream.synchronize();
outfile_.write(ptr, size);
CUDF_CUDA_TRY(cudaFreeHost(ptr));
});
}
void flush() override { outfile_.flush(); }
size_t bytes_written() override { return outfile_.tellp(); }
private:
std::ofstream outfile_;
};
TEST_F(ParquetWriterTest, CustomDataSink)
{
auto filepath = temp_env->get_temp_filepath("CustomDataSink.parquet");
custom_test_data_sink custom_sink(filepath);
srand(31337);
auto expected = create_random_fixed_table<int>(5, 10, false);
// write out using the custom sink
{
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
}
// write out using a memmapped sink
std::vector<char> buf_sink;
{
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&buf_sink}, *expected);
cudf::io::write_parquet(args);
}
// read them back in and make sure everything matches
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
cudf::io::parquet_reader_options buf_args = cudf::io::parquet_reader_options::builder(
cudf::io::source_info{buf_sink.data(), buf_sink.size()});
auto buf_tbl = cudf::io::read_parquet(buf_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(buf_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterTest, DeviceWriteLargeishFile)
{
auto filepath = temp_env->get_temp_filepath("DeviceWriteLargeishFile.parquet");
custom_test_data_sink custom_sink(filepath);
// exercises multiple rowgroups
srand(31337);
auto expected = create_random_fixed_table<int>(4, 4 * 1024 * 1024, false);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterTest, PartitionedWrite)
{
auto source = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 1000, false);
auto filepath1 = temp_env->get_temp_filepath("PartitionedWrite1.parquet");
auto filepath2 = temp_env->get_temp_filepath("PartitionedWrite2.parquet");
auto partition1 = cudf::io::partition_info{10, 1024 * 1024};
auto partition2 = cudf::io::partition_info{20 * 1024 + 7, 3 * 1024 * 1024};
auto expected1 =
cudf::slice(*source, {partition1.start_row, partition1.start_row + partition1.num_rows});
auto expected2 =
cudf::slice(*source, {partition2.start_row, partition2.start_row + partition2.num_rows});
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(
cudf::io::sink_info(std::vector<std::string>{filepath1, filepath2}), *source)
.partitions({partition1, partition2})
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(args);
auto result1 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath1)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected1, result1.tbl->view());
auto result2 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath2)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected2, result2.tbl->view());
}
TEST_P(ParquetV2Test, PartitionedWriteEmptyPartitions)
{
auto const is_v2 = GetParam();
auto source = create_random_fixed_table<int>(4, 4, false);
auto filepath1 = temp_env->get_temp_filepath("PartitionedWrite1.parquet");
auto filepath2 = temp_env->get_temp_filepath("PartitionedWrite2.parquet");
auto partition1 = cudf::io::partition_info{1, 0};
auto partition2 = cudf::io::partition_info{1, 0};
auto expected1 =
cudf::slice(*source, {partition1.start_row, partition1.start_row + partition1.num_rows});
auto expected2 =
cudf::slice(*source, {partition2.start_row, partition2.start_row + partition2.num_rows});
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(
cudf::io::sink_info(std::vector<std::string>{filepath1, filepath2}), *source)
.partitions({partition1, partition2})
.write_v2_headers(is_v2)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(args);
auto result1 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath1)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected1, result1.tbl->view());
auto result2 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath2)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected2, result2.tbl->view());
}
TEST_P(ParquetV2Test, PartitionedWriteEmptyColumns)
{
auto const is_v2 = GetParam();
auto source = create_random_fixed_table<int>(0, 4, false);
auto filepath1 = temp_env->get_temp_filepath("PartitionedWrite1.parquet");
auto filepath2 = temp_env->get_temp_filepath("PartitionedWrite2.parquet");
auto partition1 = cudf::io::partition_info{1, 0};
auto partition2 = cudf::io::partition_info{1, 0};
auto expected1 =
cudf::slice(*source, {partition1.start_row, partition1.start_row + partition1.num_rows});
auto expected2 =
cudf::slice(*source, {partition2.start_row, partition2.start_row + partition2.num_rows});
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(
cudf::io::sink_info(std::vector<std::string>{filepath1, filepath2}), *source)
.partitions({partition1, partition2})
.write_v2_headers(is_v2)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(args);
auto result1 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath1)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected1, result1.tbl->view());
auto result2 = cudf::io::read_parquet(
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath2)));
CUDF_TEST_EXPECT_TABLES_EQUAL(expected2, result2.tbl->view());
}
template <typename T>
std::string create_parquet_file(int num_cols)
{
srand(31337);
auto const table = create_random_fixed_table<T>(num_cols, 10, true);
auto const filepath =
temp_env->get_temp_filepath(typeid(T).name() + std::to_string(num_cols) + ".parquet");
cudf::io::parquet_writer_options const out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, table->view());
cudf::io::write_parquet(out_opts);
return filepath;
}
TEST_F(ParquetWriterTest, MultipleMismatchedSources)
{
auto const int5file = create_parquet_file<int>(5);
{
auto const float5file = create_parquet_file<float>(5);
std::vector<std::string> files{int5file, float5file};
cudf::io::parquet_reader_options const read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{files});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
}
{
auto const int10file = create_parquet_file<int>(10);
std::vector<std::string> files{int5file, int10file};
cudf::io::parquet_reader_options const read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{files});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
}
}
TEST_F(ParquetWriterTest, Slice)
{
auto col =
cudf::test::fixed_width_column_wrapper<int>{{1, 2, 3, 4, 5}, {true, true, true, false, true}};
std::vector<cudf::size_type> indices{2, 5};
std::vector<cudf::column_view> result = cudf::slice(col, indices);
cudf::table_view tbl{result};
auto filepath = temp_env->get_temp_filepath("Slice.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto read_table = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(read_table.tbl->view(), tbl);
}
TEST_F(ParquetChunkedWriterTest, SingleTable)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto filepath = temp_env->get_temp_filepath("ChunkedSingle.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(*table1);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *table1);
}
TEST_F(ParquetChunkedWriterTest, SimpleTable)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedSimple.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, LargeTables)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(512, 4096, true);
auto table2 = create_random_fixed_table<int>(512, 8192, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedLarge.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
auto md = cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2).close();
ASSERT_EQ(md, nullptr);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, ManyTables)
{
srand(31337);
std::vector<std::unique_ptr<table>> tables;
std::vector<table_view> table_views;
constexpr int num_tables = 96;
for (int idx = 0; idx < num_tables; idx++) {
auto tbl = create_random_fixed_table<int>(16, 64, true);
table_views.push_back(*tbl);
tables.push_back(std::move(tbl));
}
auto expected = cudf::concatenate(table_views);
auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
std::for_each(table_views.begin(), table_views.end(), [&writer](table_view const& tbl) {
writer.write(tbl);
});
auto md = writer.close({"dummy/path"});
ASSERT_NE(md, nullptr);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetChunkedWriterTest, Strings)
{
std::vector<std::unique_ptr<cudf::column>> cols;
bool mask1[] = {true, true, false, true, true, true, true};
std::vector<char const*> h_strings1{"four", "score", "and", "seven", "years", "ago", "abcdefgh"};
cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), mask1);
cols.push_back(strings1.release());
cudf::table tbl1(std::move(cols));
bool mask2[] = {false, true, true, true, true, true, true};
std::vector<char const*> h_strings2{"ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz"};
cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), mask2);
cols.push_back(strings2.release());
cudf::table tbl2(std::move(cols));
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedStrings.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(tbl1).write(tbl2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetChunkedWriterTest, ListColumn)
{
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// COL0 (Same nullability) ====================
// [NULL, 2, NULL]
// []
// [4, 5]
// NULL
lcw col0_tbl0{{{{1, 2, 3}, valids}, {}, {4, 5}, {}}, valids2};
// [7, 8, 9]
// []
// [NULL, 11]
// NULL
lcw col0_tbl1{{{7, 8, 9}, {}, {{10, 11}, valids}, {}}, valids2};
// COL1 (Nullability different in different chunks, test of merging nullability in writer)
// [NULL, 2, NULL]
// []
// [4, 5]
// []
lcw col1_tbl0{{{1, 2, 3}, valids}, {}, {4, 5}, {}};
// [7, 8, 9]
// []
// [10, 11]
// NULL
lcw col1_tbl1{{{7, 8, 9}, {}, {10, 11}, {}}, valids2};
// COL2 (non-nested columns to test proper schema construction)
size_t num_rows_tbl0 = static_cast<cudf::column_view>(col0_tbl0).size();
size_t num_rows_tbl1 = static_cast<cudf::column_view>(col0_tbl1).size();
auto seq_col0 = random_values<int>(num_rows_tbl0);
auto seq_col1 = random_values<int>(num_rows_tbl1);
column_wrapper<int> col2_tbl0{seq_col0.begin(), seq_col0.end(), valids};
column_wrapper<int> col2_tbl1{seq_col1.begin(), seq_col1.end(), valids2};
auto tbl0 = table_view({col0_tbl0, col1_tbl0, col2_tbl0});
auto tbl1 = table_view({col0_tbl1, col1_tbl1, col2_tbl1});
auto expected = cudf::concatenate(std::vector<table_view>({tbl0, tbl1}));
auto filepath = temp_env->get_temp_filepath("ChunkedLists.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(tbl0).write(tbl1);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetChunkedWriterTest, ListOfStruct)
{
// Table 1
auto weight_1 = cudf::test::fixed_width_column_wrapper<float>{{57.5, 51.1, 15.3}};
auto ages_1 = cudf::test::fixed_width_column_wrapper<int32_t>{{30, 27, 5}};
auto struct_1_1 = cudf::test::structs_column_wrapper{weight_1, ages_1};
auto is_human_1 = cudf::test::fixed_width_column_wrapper<bool>{{true, true, false}};
auto struct_2_1 = cudf::test::structs_column_wrapper{{is_human_1, struct_1_1}};
auto list_offsets_column_1 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 2, 3, 3}.release();
auto num_list_rows_1 = list_offsets_column_1->size() - 1;
auto list_col_1 = cudf::make_lists_column(
num_list_rows_1, std::move(list_offsets_column_1), struct_2_1.release(), 0, {});
auto table_1 = table_view({*list_col_1});
// Table 2
auto weight_2 = cudf::test::fixed_width_column_wrapper<float>{{1.1, -1.0, -1.0}};
auto ages_2 = cudf::test::fixed_width_column_wrapper<int32_t>{{31, 351, 351}, {1, 1, 0}};
auto struct_1_2 = cudf::test::structs_column_wrapper{{weight_2, ages_2}, {1, 0, 1}};
auto is_human_2 = cudf::test::fixed_width_column_wrapper<bool>{{false, false, false}, {1, 1, 0}};
auto struct_2_2 = cudf::test::structs_column_wrapper{{is_human_2, struct_1_2}};
auto list_offsets_column_2 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 1, 2, 3}.release();
auto num_list_rows_2 = list_offsets_column_2->size() - 1;
auto list_col_2 = cudf::make_lists_column(
num_list_rows_2, std::move(list_offsets_column_2), struct_2_2.release(), 0, {});
auto table_2 = table_view({*list_col_2});
auto full_table = cudf::concatenate(std::vector<table_view>({table_1, table_2}));
cudf::io::table_input_metadata expected_metadata(table_1);
expected_metadata.column_metadata[0].set_name("family");
expected_metadata.column_metadata[0].child(1).set_nullability(false);
expected_metadata.column_metadata[0].child(1).child(0).set_name("human?");
expected_metadata.column_metadata[0].child(1).child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).child(1).set_name("age");
auto filepath = temp_env->get_temp_filepath("ChunkedListOfStruct.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
args.set_metadata(expected_metadata);
cudf::io::parquet_chunked_writer(args).write(table_1).write(table_2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(*result.tbl, *full_table);
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetChunkedWriterTest, ListOfStructOfStructOfListOfList)
{
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// Table 1 ===========================
// []
// [NULL, 2, NULL]
// [4, 5]
// NULL
lcw land_1{{{}, {{1, 2, 3}, valids}, {4, 5}, {}}, valids2};
// []
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8], []]
// [[]]
lcw flats_1{lcw{}, {{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, {{7, 8}, {}}, lcw{lcw{}}};
auto weight_1 = cudf::test::fixed_width_column_wrapper<float>{{57.5, 51.1, 15.3, 1.1}};
auto ages_1 = cudf::test::fixed_width_column_wrapper<int32_t>{{30, 27, 5, 31}};
auto struct_1_1 = cudf::test::structs_column_wrapper{weight_1, ages_1, land_1, flats_1};
auto is_human_1 = cudf::test::fixed_width_column_wrapper<bool>{{true, true, false, false}};
auto struct_2_1 = cudf::test::structs_column_wrapper{{is_human_1, struct_1_1}};
auto list_offsets_column_1 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 2, 3, 4}.release();
auto num_list_rows_1 = list_offsets_column_1->size() - 1;
auto list_col_1 = cudf::make_lists_column(
num_list_rows_1, std::move(list_offsets_column_1), struct_2_1.release(), 0, {});
auto table_1 = table_view({*list_col_1});
// Table 2 ===========================
// []
// [7, 8, 9]
lcw land_2{{}, {7, 8, 9}};
// [[]]
// [[], [], []]
lcw flats_2{lcw{lcw{}}, lcw{lcw{}, lcw{}, lcw{}}};
auto weight_2 = cudf::test::fixed_width_column_wrapper<float>{{-1.0, -1.0}};
auto ages_2 = cudf::test::fixed_width_column_wrapper<int32_t>{{351, 351}, {1, 0}};
auto struct_1_2 = cudf::test::structs_column_wrapper{{weight_2, ages_2, land_2, flats_2}, {0, 1}};
auto is_human_2 = cudf::test::fixed_width_column_wrapper<bool>{{false, false}, {1, 0}};
auto struct_2_2 = cudf::test::structs_column_wrapper{{is_human_2, struct_1_2}};
auto list_offsets_column_2 =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 1, 2}.release();
auto num_list_rows_2 = list_offsets_column_2->size() - 1;
auto list_col_2 = cudf::make_lists_column(
num_list_rows_2, std::move(list_offsets_column_2), struct_2_2.release(), 0, {});
auto table_2 = table_view({*list_col_2});
auto full_table = cudf::concatenate(std::vector<table_view>({table_1, table_2}));
cudf::io::table_input_metadata expected_metadata(table_1);
expected_metadata.column_metadata[0].set_name("family");
expected_metadata.column_metadata[0].child(1).set_nullability(false);
expected_metadata.column_metadata[0].child(1).child(0).set_name("human?");
expected_metadata.column_metadata[0].child(1).child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).child(1).set_name("age");
expected_metadata.column_metadata[0].child(1).child(1).child(2).set_name("land_unit");
expected_metadata.column_metadata[0].child(1).child(1).child(3).set_name("flats");
auto filepath = temp_env->get_temp_filepath("ListOfStructOfStructOfListOfList.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
args.set_metadata(expected_metadata);
cudf::io::parquet_chunked_writer(args).write(table_1).write(table_2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(*result.tbl, *full_table);
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
// We specifically mentioned in input schema that struct_2 is non-nullable across chunked calls.
auto result_parent_list = result.tbl->get_column(0);
auto result_struct_2 = result_parent_list.child(cudf::lists_column_view::child_column_index);
EXPECT_EQ(result_struct_2.nullable(), false);
}
TEST_F(ParquetChunkedWriterTest, MismatchedTypes)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(4, 4, true);
auto table2 = create_random_fixed_table<float>(4, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
writer.write(*table1);
EXPECT_THROW(writer.write(*table2), cudf::logic_error);
writer.close();
}
TEST_F(ParquetChunkedWriterTest, ChunkedWriteAfterClosing)
{
srand(31337);
auto table = create_random_fixed_table<int>(4, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedWriteAfterClosing.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
writer.write(*table).close();
EXPECT_THROW(writer.write(*table), cudf::logic_error);
}
TEST_F(ParquetChunkedWriterTest, ReadingUnclosedFile)
{
srand(31337);
auto table = create_random_fixed_table<int>(4, 4, true);
auto filepath = temp_env->get_temp_filepath("ReadingUnclosedFile.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
writer.write(*table);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
}
TEST_F(ParquetChunkedWriterTest, MismatchedStructure)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(4, 4, true);
auto table2 = create_random_fixed_table<float>(3, 4, true);
auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
writer.write(*table1);
EXPECT_THROW(writer.write(*table2), cudf::logic_error);
writer.close();
}
TEST_F(ParquetChunkedWriterTest, MismatchedStructureList)
{
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// COL0 (mismatched depth) ====================
// [NULL, 2, NULL]
// []
// [4, 5]
// NULL
lcw col00{{{{1, 2, 3}, valids}, {}, {4, 5}, {}}, valids2};
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8]]
// []
// [[]]
lcw col01{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, {{7, 8}}, lcw{}, lcw{lcw{}}};
// COL2 (non-nested columns to test proper schema construction)
size_t num_rows = static_cast<cudf::column_view>(col00).size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col1 = random_values<int>(num_rows);
column_wrapper<int> col10{seq_col0.begin(), seq_col0.end(), valids};
column_wrapper<int> col11{seq_col1.begin(), seq_col1.end(), valids2};
auto tbl0 = table_view({col00, col10});
auto tbl1 = table_view({col01, col11});
auto filepath = temp_env->get_temp_filepath("ChunkedLists.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer writer(args);
writer.write(tbl0);
EXPECT_THROW(writer.write(tbl1), cudf::logic_error);
}
TEST_F(ParquetChunkedWriterTest, DifferentNullability)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, false);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedNullable.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, DifferentNullabilityStruct)
{
// Struct<is_human:bool (non-nullable),
// Struct<weight:float>,
// age:int
// > (nullable)
// > (non-nullable)
// Table 1: is_human and struct_1 are non-nullable but should be nullable when read back.
auto weight_1 = cudf::test::fixed_width_column_wrapper<float>{{57.5, 51.1, 15.3}};
auto ages_1 = cudf::test::fixed_width_column_wrapper<int32_t>{{30, 27, 5}};
auto struct_1_1 = cudf::test::structs_column_wrapper{weight_1, ages_1};
auto is_human_1 = cudf::test::fixed_width_column_wrapper<bool>{{true, true, false}};
auto struct_2_1 = cudf::test::structs_column_wrapper{{is_human_1, struct_1_1}};
auto table_1 = cudf::table_view({struct_2_1});
// Table 2: struct_1 and is_human are nullable now so if we hadn't assumed worst case (nullable)
// when writing table_1, we would have wrong pages for it.
auto weight_2 = cudf::test::fixed_width_column_wrapper<float>{{1.1, -1.0, -1.0}};
auto ages_2 = cudf::test::fixed_width_column_wrapper<int32_t>{{31, 351, 351}, {1, 1, 0}};
auto struct_1_2 = cudf::test::structs_column_wrapper{{weight_2, ages_2}, {1, 0, 1}};
auto is_human_2 = cudf::test::fixed_width_column_wrapper<bool>{{false, false, false}, {1, 1, 0}};
auto struct_2_2 = cudf::test::structs_column_wrapper{{is_human_2, struct_1_2}};
auto table_2 = cudf::table_view({struct_2_2});
auto full_table = cudf::concatenate(std::vector<table_view>({table_1, table_2}));
cudf::io::table_input_metadata expected_metadata(table_1);
expected_metadata.column_metadata[0].set_name("being");
expected_metadata.column_metadata[0].child(0).set_name("human?");
expected_metadata.column_metadata[0].child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).set_name("age");
auto filepath = temp_env->get_temp_filepath("ChunkedNullableStruct.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
args.set_metadata(expected_metadata);
cudf::io::parquet_chunked_writer(args).write(table_1).write(table_2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(*result.tbl, *full_table);
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetChunkedWriterTest, ForcedNullability)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, false);
auto table2 = create_random_fixed_table<int>(5, 5, false);
auto full_table = cudf::concatenate(std::vector<table_view>({*table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedNoNullable.parquet");
cudf::io::table_input_metadata metadata(*table1);
// In the absence of prescribed per-column nullability in metadata, the writer assumes the worst
// and considers all columns nullable. However cudf::concatenate will not force nulls in case no
// columns are nullable. To get the expected result, we tell the writer the nullability of all
// columns in advance.
for (auto& col_meta : metadata.column_metadata) {
col_meta.set_nullability(false);
}
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath})
.metadata(std::move(metadata));
cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, ForcedNullabilityList)
{
srand(31337);
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2; });
auto valids2 = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 3; });
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// COL0 ====================
// [1, 2, 3]
// []
// [4, 5]
// NULL
lcw col00{{{1, 2, 3}, {}, {4, 5}, {}}, valids2};
// [7]
// []
// [8, 9, 10, 11]
// NULL
lcw col01{{{7}, {}, {8, 9, 10, 11}, {}}, valids2};
// COL1 (non-nested columns to test proper schema construction)
size_t num_rows = static_cast<cudf::column_view>(col00).size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col1 = random_values<int>(num_rows);
column_wrapper<int> col10{seq_col0.begin(), seq_col0.end(), valids};
column_wrapper<int> col11{seq_col1.begin(), seq_col1.end(), valids2};
auto table1 = table_view({col00, col10});
auto table2 = table_view({col01, col11});
auto full_table = cudf::concatenate(std::vector<table_view>({table1, table2}));
cudf::io::table_input_metadata metadata(table1);
metadata.column_metadata[0].set_nullability(true); // List is nullable at first (root) level
metadata.column_metadata[0].child(1).set_nullability(
false); // non-nullable at second (leaf) level
metadata.column_metadata[1].set_nullability(true);
auto filepath = temp_env->get_temp_filepath("ChunkedListNullable.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath})
.metadata(std::move(metadata));
cudf::io::parquet_chunked_writer(args).write(table1).write(table2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, ForcedNullabilityStruct)
{
// Struct<is_human:bool (non-nullable),
// Struct<weight:float>,
// age:int
// > (nullable)
// > (non-nullable)
// Table 1: is_human and struct_2 are non-nullable and should stay that way when read back.
auto weight_1 = cudf::test::fixed_width_column_wrapper<float>{{57.5, 51.1, 15.3}};
auto ages_1 = cudf::test::fixed_width_column_wrapper<int32_t>{{30, 27, 5}};
auto struct_1_1 = cudf::test::structs_column_wrapper{weight_1, ages_1};
auto is_human_1 = cudf::test::fixed_width_column_wrapper<bool>{{true, true, false}};
auto struct_2_1 = cudf::test::structs_column_wrapper{{is_human_1, struct_1_1}};
auto table_1 = cudf::table_view({struct_2_1});
auto weight_2 = cudf::test::fixed_width_column_wrapper<float>{{1.1, -1.0, -1.0}};
auto ages_2 = cudf::test::fixed_width_column_wrapper<int32_t>{{31, 351, 351}, {1, 1, 0}};
auto struct_1_2 = cudf::test::structs_column_wrapper{{weight_2, ages_2}, {1, 0, 1}};
auto is_human_2 = cudf::test::fixed_width_column_wrapper<bool>{{false, false, false}};
auto struct_2_2 = cudf::test::structs_column_wrapper{{is_human_2, struct_1_2}};
auto table_2 = cudf::table_view({struct_2_2});
auto full_table = cudf::concatenate(std::vector<table_view>({table_1, table_2}));
cudf::io::table_input_metadata expected_metadata(table_1);
expected_metadata.column_metadata[0].set_name("being").set_nullability(false);
expected_metadata.column_metadata[0].child(0).set_name("human?").set_nullability(false);
expected_metadata.column_metadata[0].child(1).set_name("particulars");
expected_metadata.column_metadata[0].child(1).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(1).child(1).set_name("age");
auto filepath = temp_env->get_temp_filepath("ChunkedNullableStruct.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
args.set_metadata(expected_metadata);
cudf::io::parquet_chunked_writer(args).write(table_1).write(table_2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
TEST_F(ParquetChunkedWriterTest, ReadRowGroups)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto table2 = create_random_fixed_table<int>(5, 5, true);
auto full_table = cudf::concatenate(std::vector<table_view>({*table2, *table1, *table2}));
auto filepath = temp_env->get_temp_filepath("ChunkedRowGroups.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
{
cudf::io::parquet_chunked_writer(args).write(*table1).write(*table2);
}
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.row_groups({{1, 0, 1}});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *full_table);
}
TEST_F(ParquetChunkedWriterTest, ReadRowGroupsError)
{
srand(31337);
auto table1 = create_random_fixed_table<int>(5, 5, true);
auto filepath = temp_env->get_temp_filepath("ChunkedRowGroupsError.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(*table1);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).row_groups({{0, 1}});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
read_opts.set_row_groups({{-1}});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
read_opts.set_row_groups({{0}, {0}});
EXPECT_THROW(cudf::io::read_parquet(read_opts), cudf::logic_error);
}
TEST_F(ParquetWriterTest, DecimalWrite)
{
constexpr cudf::size_type num_rows = 500;
auto seq_col0 = random_values<int32_t>(num_rows);
auto seq_col1 = random_values<int64_t>(num_rows);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
auto col0 = cudf::test::fixed_point_column_wrapper<int32_t>{
seq_col0.begin(), seq_col0.end(), valids, numeric::scale_type{5}};
auto col1 = cudf::test::fixed_point_column_wrapper<int64_t>{
seq_col1.begin(), seq_col1.end(), valids, numeric::scale_type{-9}};
auto table = table_view({col0, col1});
auto filepath = temp_env->get_temp_filepath("DecimalWrite.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, table);
cudf::io::table_input_metadata expected_metadata(table);
// verify failure if too small a precision is given
expected_metadata.column_metadata[0].set_decimal_precision(7);
expected_metadata.column_metadata[1].set_decimal_precision(1);
args.set_metadata(expected_metadata);
EXPECT_THROW(cudf::io::write_parquet(args), cudf::logic_error);
// verify success if equal precision is given
expected_metadata.column_metadata[0].set_decimal_precision(7);
expected_metadata.column_metadata[1].set_decimal_precision(9);
args.set_metadata(std::move(expected_metadata));
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, table);
}
TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize)
{
// write out two 31 row tables and make sure they get
// read back with all their validity bits in the right place
using T = TypeParam;
int num_els = 31;
std::vector<std::unique_ptr<cudf::column>> cols;
bool mask[] = {false, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true};
T c1a[num_els];
std::fill(c1a, c1a + num_els, static_cast<T>(5));
T c1b[num_els];
std::fill(c1b, c1b + num_els, static_cast<T>(6));
column_wrapper<T> c1a_w(c1a, c1a + num_els, mask);
column_wrapper<T> c1b_w(c1b, c1b + num_els, mask);
cols.push_back(c1a_w.release());
cols.push_back(c1b_w.release());
cudf::table tbl1(std::move(cols));
T c2a[num_els];
std::fill(c2a, c2a + num_els, static_cast<T>(8));
T c2b[num_els];
std::fill(c2b, c2b + num_els, static_cast<T>(9));
column_wrapper<T> c2a_w(c2a, c2a + num_els, mask);
column_wrapper<T> c2b_w(c2b, c2b + num_els, mask);
cols.push_back(c2a_w.release());
cols.push_back(c2b_w.release());
cudf::table tbl2(std::move(cols));
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(tbl1).write(tbl2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize2)
{
// write out two 33 row tables and make sure they get
// read back with all their validity bits in the right place
using T = TypeParam;
int num_els = 33;
std::vector<std::unique_ptr<cudf::column>> cols;
bool mask[] = {false, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true, true, true, true};
T c1a[num_els];
std::fill(c1a, c1a + num_els, static_cast<T>(5));
T c1b[num_els];
std::fill(c1b, c1b + num_els, static_cast<T>(6));
column_wrapper<T> c1a_w(c1a, c1a + num_els, mask);
column_wrapper<T> c1b_w(c1b, c1b + num_els, mask);
cols.push_back(c1a_w.release());
cols.push_back(c1b_w.release());
cudf::table tbl1(std::move(cols));
T c2a[num_els];
std::fill(c2a, c2a + num_els, static_cast<T>(8));
T c2b[num_els];
std::fill(c2b, c2b + num_els, static_cast<T>(9));
column_wrapper<T> c2a_w(c2a, c2a + num_els, mask);
column_wrapper<T> c2b_w(c2b, c2b + num_els, mask);
cols.push_back(c2a_w.release());
cols.push_back(c2b_w.release());
cudf::table tbl2(std::move(cols));
auto expected = cudf::concatenate(std::vector<table_view>({tbl1, tbl2}));
auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.parquet");
cudf::io::chunked_parquet_writer_options args =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{filepath});
cudf::io::parquet_chunked_writer(args).write(tbl1).write(tbl2);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
// custom mem mapped data sink that supports device writes
template <bool supports_device_writes>
class custom_test_memmap_sink : public cudf::io::data_sink {
public:
explicit custom_test_memmap_sink(std::vector<char>* mm_writer_buf)
{
mm_writer = cudf::io::data_sink::create(mm_writer_buf);
}
virtual ~custom_test_memmap_sink() { mm_writer->flush(); }
void host_write(void const* data, size_t size) override { mm_writer->host_write(data, size); }
[[nodiscard]] bool supports_device_write() const override { return supports_device_writes; }
void device_write(void const* gpu_data, size_t size, rmm::cuda_stream_view stream) override
{
this->device_write_async(gpu_data, size, stream).get();
}
std::future<void> device_write_async(void const* gpu_data,
size_t size,
rmm::cuda_stream_view stream) override
{
return std::async(std::launch::deferred, [=] {
char* ptr = nullptr;
CUDF_CUDA_TRY(cudaMallocHost(&ptr, size));
CUDF_CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDefault, stream.value()));
stream.synchronize();
mm_writer->host_write(ptr, size);
CUDF_CUDA_TRY(cudaFreeHost(ptr));
});
}
void flush() override { mm_writer->flush(); }
size_t bytes_written() override { return mm_writer->bytes_written(); }
private:
std::unique_ptr<data_sink> mm_writer;
};
TEST_F(ParquetWriterStressTest, LargeTableWeakCompression)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<false> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterStressTest, LargeTableGoodCompression)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<false> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterStressTest, LargeTableWithValids)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<false> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWeakCompression)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<true> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableGoodCompression)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<true> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWithValids)
{
std::vector<char> mm_buf;
mm_buf.reserve(4 * 1024 * 1024 * 16);
custom_test_memmap_sink<true> custom_sink(&mm_buf);
// exercises multiple rowgroups
srand(31337);
auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true);
// write out using the custom sink (which uses device writes)
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&custom_sink}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options custom_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{mm_buf.data(), mm_buf.size()});
auto custom_tbl = cudf::io::read_parquet(custom_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(custom_tbl.tbl->view(), expected->view());
}
TEST_F(ParquetReaderTest, UserBounds)
{
// trying to read more rows than there are should result in
// receiving the properly capped # of rows
{
srand(31337);
auto expected = create_random_fixed_table<int>(4, 4, false);
auto filepath = temp_env->get_temp_filepath("TooManyRows.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected);
cudf::io::write_parquet(args);
// attempt to read more rows than there actually are
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).num_rows(16);
auto result = cudf::io::read_parquet(read_opts);
// we should only get back 4 rows
EXPECT_EQ(result.tbl->view().column(0).size(), 4);
}
// trying to read past the end of the # of actual rows should result
// in empty columns.
{
srand(31337);
auto expected = create_random_fixed_table<int>(4, 4, false);
auto filepath = temp_env->get_temp_filepath("PastBounds.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected);
cudf::io::write_parquet(args);
// attempt to read more rows than there actually are
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).skip_rows(4);
auto result = cudf::io::read_parquet(read_opts);
// we should get empty columns back
EXPECT_EQ(result.tbl->view().num_columns(), 4);
EXPECT_EQ(result.tbl->view().column(0).size(), 0);
}
// trying to read 0 rows should result in empty columns
{
srand(31337);
auto expected = create_random_fixed_table<int>(4, 4, false);
auto filepath = temp_env->get_temp_filepath("ZeroRows.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected);
cudf::io::write_parquet(args);
// attempt to read more rows than there actually are
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath}).num_rows(0);
auto result = cudf::io::read_parquet(read_opts);
EXPECT_EQ(result.tbl->view().num_columns(), 4);
EXPECT_EQ(result.tbl->view().column(0).size(), 0);
}
// trying to read 0 rows past the end of the # of actual rows should result
// in empty columns.
{
srand(31337);
auto expected = create_random_fixed_table<int>(4, 4, false);
auto filepath = temp_env->get_temp_filepath("ZeroRowsPastBounds.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *expected);
cudf::io::write_parquet(args);
// attempt to read more rows than there actually are
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.skip_rows(4)
.num_rows(0);
auto result = cudf::io::read_parquet(read_opts);
// we should get empty columns back
EXPECT_EQ(result.tbl->view().num_columns(), 4);
EXPECT_EQ(result.tbl->view().column(0).size(), 0);
}
}
TEST_F(ParquetReaderTest, UserBoundsWithNulls)
{
// clang-format off
cudf::test::fixed_width_column_wrapper<float> col{{1,1,1,1,1,1,1,1, 2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3, 4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7, 8,8,8,8,8,8,8,8}
,{1,1,1,0,0,0,1,1, 1,1,1,1,1,1,1,1, 0,0,0,0,0,0,0,0, 1,1,1,1,1,1,0,0, 1,0,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,0}};
// clang-format on
cudf::table_view tbl({col});
auto filepath = temp_env->get_temp_filepath("UserBoundsWithNulls.parquet");
cudf::io::parquet_writer_options out_args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_parquet(out_args);
// skip_rows / num_rows
// clang-format off
std::vector<std::pair<int, int>> params{ {-1, -1}, {1, 3}, {3, -1},
{31, -1}, {32, -1}, {33, -1},
{31, 5}, {32, 5}, {33, 5},
{-1, 7}, {-1, 31}, {-1, 32}, {-1, 33},
{62, -1}, {63, -1},
{62, 2}, {63, 1}};
// clang-format on
for (auto p : params) {
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
if (p.first >= 0) { read_args.set_skip_rows(p.first); }
if (p.second >= 0) { read_args.set_num_rows(p.second); }
auto result = cudf::io::read_parquet(read_args);
p.first = p.first < 0 ? 0 : p.first;
p.second = p.second < 0 ? static_cast<cudf::column_view>(col).size() - p.first : p.second;
std::vector<cudf::size_type> slice_indices{p.first, p.first + p.second};
auto expected = cudf::slice(col, slice_indices);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), expected[0]);
}
}
TEST_F(ParquetReaderTest, UserBoundsWithNullsMixedTypes)
{
constexpr int num_rows = 32 * 1024;
std::mt19937 gen(6542);
std::bernoulli_distribution bn(0.7f);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [&](int index) { return bn(gen); });
auto values = thrust::make_counting_iterator(0);
// int64
cudf::test::fixed_width_column_wrapper<int64_t> c0(values, values + num_rows, valids);
// list<float>
constexpr int floats_per_row = 4;
auto c1_offset_iter = cudf::detail::make_counting_transform_iterator(
0, [floats_per_row](cudf::size_type idx) { return idx * floats_per_row; });
cudf::test::fixed_width_column_wrapper<cudf::size_type> c1_offsets(c1_offset_iter,
c1_offset_iter + num_rows + 1);
cudf::test::fixed_width_column_wrapper<float> c1_floats(
values, values + (num_rows * floats_per_row), valids);
auto [null_mask, null_count] = cudf::test::detail::make_null_mask(valids, valids + num_rows);
auto _c1 = cudf::make_lists_column(
num_rows, c1_offsets.release(), c1_floats.release(), null_count, std::move(null_mask));
auto c1 = cudf::purge_nonempty_nulls(*_c1);
// list<list<int>>
auto c2 = make_parquet_list_list_col<int>(0, num_rows, 5, 8, true);
// struct<list<string>, int, float>
std::vector<std::string> strings{
"abc", "x", "bananas", "gpu", "minty", "backspace", "", "cayenne", "turbine", "soft"};
std::uniform_int_distribution<int> uni(0, strings.size() - 1);
auto string_iter = cudf::detail::make_counting_transform_iterator(
0, [&](cudf::size_type idx) { return strings[uni(gen)]; });
constexpr int string_per_row = 3;
constexpr int num_string_rows = num_rows * string_per_row;
cudf::test::strings_column_wrapper string_col{string_iter, string_iter + num_string_rows};
auto offset_iter = cudf::detail::make_counting_transform_iterator(
0, [string_per_row](cudf::size_type idx) { return idx * string_per_row; });
cudf::test::fixed_width_column_wrapper<cudf::size_type> offsets(offset_iter,
offset_iter + num_rows + 1);
auto _c3_valids =
cudf::detail::make_counting_transform_iterator(0, [&](int index) { return index % 200; });
std::vector<bool> c3_valids(num_rows);
std::copy(_c3_valids, _c3_valids + num_rows, c3_valids.begin());
std::tie(null_mask, null_count) = cudf::test::detail::make_null_mask(valids, valids + num_rows);
auto _c3_list = cudf::make_lists_column(
num_rows, offsets.release(), string_col.release(), null_count, std::move(null_mask));
auto c3_list = cudf::purge_nonempty_nulls(*_c3_list);
cudf::test::fixed_width_column_wrapper<int> c3_ints(values, values + num_rows, valids);
cudf::test::fixed_width_column_wrapper<float> c3_floats(values, values + num_rows, valids);
std::vector<std::unique_ptr<cudf::column>> c3_children;
c3_children.push_back(std::move(c3_list));
c3_children.push_back(c3_ints.release());
c3_children.push_back(c3_floats.release());
cudf::test::structs_column_wrapper _c3(std::move(c3_children), c3_valids);
auto c3 = cudf::purge_nonempty_nulls(_c3);
// write it out
cudf::table_view tbl({c0, *c1, *c2, *c3});
auto filepath = temp_env->get_temp_filepath("UserBoundsWithNullsMixedTypes.parquet");
cudf::io::parquet_writer_options out_args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_parquet(out_args);
// read it back
std::vector<std::pair<int, int>> params{
{-1, -1}, {0, num_rows}, {1, num_rows - 1}, {num_rows - 1, 1}, {517, 22000}};
for (auto p : params) {
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
if (p.first >= 0) { read_args.set_skip_rows(p.first); }
if (p.second >= 0) { read_args.set_num_rows(p.second); }
auto result = cudf::io::read_parquet(read_args);
p.first = p.first < 0 ? 0 : p.first;
p.second = p.second < 0 ? num_rows - p.first : p.second;
std::vector<cudf::size_type> slice_indices{p.first, p.first + p.second};
auto expected = cudf::slice(tbl, slice_indices);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, expected[0]);
}
}
TEST_F(ParquetReaderTest, UserBoundsWithNullsLarge)
{
constexpr int num_rows = 30 * 1000000;
std::mt19937 gen(6747);
std::bernoulli_distribution bn(0.7f);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [&](int index) { return bn(gen); });
auto values = thrust::make_counting_iterator(0);
cudf::test::fixed_width_column_wrapper<int> col(values, values + num_rows, valids);
// this file will have row groups of 1,000,000 each
cudf::table_view tbl({col});
auto filepath = temp_env->get_temp_filepath("UserBoundsWithNullsLarge.parquet");
cudf::io::parquet_writer_options out_args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_parquet(out_args);
// skip_rows / num_rows
// clang-format off
std::vector<std::pair<int, int>> params{ {-1, -1}, {31, -1}, {32, -1}, {33, -1}, {1613470, -1}, {1999999, -1},
{31, 1}, {32, 1}, {33, 1},
// deliberately span some row group boundaries
{999000, 1001}, {999000, 2000}, {2999999, 2}, {13999997, -1},
{16785678, 3}, {22996176, 31},
{24001231, 17}, {29000001, 989999}, {29999999, 1} };
// clang-format on
for (auto p : params) {
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
if (p.first >= 0) { read_args.set_skip_rows(p.first); }
if (p.second >= 0) { read_args.set_num_rows(p.second); }
auto result = cudf::io::read_parquet(read_args);
p.first = p.first < 0 ? 0 : p.first;
p.second = p.second < 0 ? static_cast<cudf::column_view>(col).size() - p.first : p.second;
std::vector<cudf::size_type> slice_indices{p.first, p.first + p.second};
auto expected = cudf::slice(col, slice_indices);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), expected[0]);
}
}
TEST_F(ParquetReaderTest, ListUserBoundsWithNullsLarge)
{
constexpr int num_rows = 5 * 1000000;
auto colp = make_parquet_list_list_col<int>(0, num_rows, 5, 8, true);
cudf::column_view col = *colp;
// this file will have row groups of 1,000,000 each
cudf::table_view tbl({col});
auto filepath = temp_env->get_temp_filepath("ListUserBoundsWithNullsLarge.parquet");
cudf::io::parquet_writer_options out_args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl);
cudf::io::write_parquet(out_args);
// skip_rows / num_rows
// clang-format off
std::vector<std::pair<int, int>> params{ {-1, -1}, {31, -1}, {32, -1}, {33, -1}, {161470, -1}, {4499997, -1},
{31, 1}, {32, 1}, {33, 1},
// deliberately span some row group boundaries
{999000, 1001}, {999000, 2000}, {2999999, 2},
{1678567, 3}, {4299676, 31},
{4001231, 17}, {1900000, 989999}, {4999999, 1} };
// clang-format on
for (auto p : params) {
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
if (p.first >= 0) { read_args.set_skip_rows(p.first); }
if (p.second >= 0) { read_args.set_num_rows(p.second); }
auto result = cudf::io::read_parquet(read_args);
p.first = p.first < 0 ? 0 : p.first;
p.second = p.second < 0 ? static_cast<cudf::column_view>(col).size() - p.first : p.second;
std::vector<cudf::size_type> slice_indices{p.first, p.first + p.second};
auto expected = cudf::slice(col, slice_indices);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), expected[0]);
}
}
TEST_F(ParquetReaderTest, ReorderedColumns)
{
{
auto a = cudf::test::strings_column_wrapper{{"a", "", "c"}, {true, false, true}};
auto b = cudf::test::fixed_width_column_wrapper<int>{1, 2, 3};
cudf::table_view tbl{{a, b}};
auto filepath = temp_env->get_temp_filepath("ReorderedColumns.parquet");
cudf::io::table_input_metadata md(tbl);
md.column_metadata[0].set_name("a");
md.column_metadata[1].set_name("b");
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl).metadata(md);
cudf::io::write_parquet(opts);
// read them out of order
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.columns({"b", "a"});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), b);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), a);
}
{
auto a = cudf::test::fixed_width_column_wrapper<int>{1, 2, 3};
auto b = cudf::test::strings_column_wrapper{{"a", "", "c"}, {true, false, true}};
cudf::table_view tbl{{a, b}};
auto filepath = temp_env->get_temp_filepath("ReorderedColumns2.parquet");
cudf::io::table_input_metadata md(tbl);
md.column_metadata[0].set_name("a");
md.column_metadata[1].set_name("b");
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl).metadata(md);
cudf::io::write_parquet(opts);
// read them out of order
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.columns({"b", "a"});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), b);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), a);
}
auto a = cudf::test::fixed_width_column_wrapper<int>{1, 2, 3, 10, 20, 30};
auto b = cudf::test::strings_column_wrapper{{"a", "", "c", "cats", "dogs", "owls"},
{true, false, true, true, false, true}};
auto c = cudf::test::fixed_width_column_wrapper<int>{{15, 16, 17, 25, 26, 32},
{false, true, true, true, true, false}};
auto d = cudf::test::strings_column_wrapper{"ducks", "sheep", "cows", "fish", "birds", "ants"};
cudf::table_view tbl{{a, b, c, d}};
auto filepath = temp_env->get_temp_filepath("ReorderedColumns3.parquet");
cudf::io::table_input_metadata md(tbl);
md.column_metadata[0].set_name("a");
md.column_metadata[1].set_name("b");
md.column_metadata[2].set_name("c");
md.column_metadata[3].set_name("d");
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, tbl)
.metadata(std::move(md));
cudf::io::write_parquet(opts);
{
// read them out of order
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.columns({"d", "a", "b", "c"});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), d);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), a);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(2), b);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(3), c);
}
{
// read them out of order
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.columns({"c", "d", "a", "b"});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), c);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), d);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(2), a);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(3), b);
}
{
// read them out of order
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.columns({"d", "c", "b", "a"});
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), d);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), c);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(2), b);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(3), a);
}
}
TEST_F(ParquetReaderTest, SelectNestedColumn)
{
// Struct<is_human:bool,
// Struct<weight:float,
// ages:int,
// land_unit:List<int>>,
// flats:List<List<int>>
// >
// >
auto weights_col = cudf::test::fixed_width_column_wrapper<float>{1.1, 2.4, 5.3, 8.0, 9.6, 6.9};
auto ages_col =
cudf::test::fixed_width_column_wrapper<int32_t>{{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto struct_1 = cudf::test::structs_column_wrapper{{weights_col, ages_col}, {1, 1, 1, 1, 0, 1}};
auto is_human_col = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, false, false}, {1, 1, 0, 1, 1, 0}};
auto struct_2 =
cudf::test::structs_column_wrapper{{is_human_col, struct_1}, {0, 1, 1, 1, 1, 1}}.release();
auto input = table_view({*struct_2});
cudf::io::table_input_metadata input_metadata(input);
input_metadata.column_metadata[0].set_name("being");
input_metadata.column_metadata[0].child(0).set_name("human?");
input_metadata.column_metadata[0].child(1).set_name("particulars");
input_metadata.column_metadata[0].child(1).child(0).set_name("weight");
input_metadata.column_metadata[0].child(1).child(1).set_name("age");
auto filepath = temp_env->get_temp_filepath("SelectNestedColumn.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, input)
.metadata(std::move(input_metadata));
cudf::io::write_parquet(args);
{ // Test selecting a single leaf from the table
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath))
.columns({"being.particulars.age"});
auto const result = cudf::io::read_parquet(read_args);
auto expect_ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{
{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto expect_s_1 = cudf::test::structs_column_wrapper{{expect_ages_col}, {1, 1, 1, 1, 0, 1}};
auto expect_s_2 =
cudf::test::structs_column_wrapper{{expect_s_1}, {0, 1, 1, 1, 1, 1}}.release();
auto expected = table_view({*expect_s_2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("being");
expected_metadata.column_metadata[0].child(0).set_name("particulars");
expected_metadata.column_metadata[0].child(0).child(0).set_name("age");
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
{ // Test selecting a non-leaf and expecting all hierarchy from that node onwards
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath))
.columns({"being.particulars"});
auto const result = cudf::io::read_parquet(read_args);
auto expected_weights_col =
cudf::test::fixed_width_column_wrapper<float>{1.1, 2.4, 5.3, 8.0, 9.6, 6.9};
auto expected_ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{
{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto expected_s_1 = cudf::test::structs_column_wrapper{
{expected_weights_col, expected_ages_col}, {1, 1, 1, 1, 0, 1}};
auto expect_s_2 =
cudf::test::structs_column_wrapper{{expected_s_1}, {0, 1, 1, 1, 1, 1}}.release();
auto expected = table_view({*expect_s_2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("being");
expected_metadata.column_metadata[0].child(0).set_name("particulars");
expected_metadata.column_metadata[0].child(0).child(0).set_name("weight");
expected_metadata.column_metadata[0].child(0).child(1).set_name("age");
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
{ // Test selecting struct children out of order
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath))
.columns({"being.particulars.age", "being.particulars.weight", "being.human?"});
auto const result = cudf::io::read_parquet(read_args);
auto expected_weights_col =
cudf::test::fixed_width_column_wrapper<float>{1.1, 2.4, 5.3, 8.0, 9.6, 6.9};
auto expected_ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{
{48, 27, 25, 31, 351, 351}, {1, 1, 1, 1, 1, 0}};
auto expected_is_human_col = cudf::test::fixed_width_column_wrapper<bool>{
{true, true, false, false, false, false}, {1, 1, 0, 1, 1, 0}};
auto expect_s_1 = cudf::test::structs_column_wrapper{{expected_ages_col, expected_weights_col},
{1, 1, 1, 1, 0, 1}};
auto expect_s_2 =
cudf::test::structs_column_wrapper{{expect_s_1, expected_is_human_col}, {0, 1, 1, 1, 1, 1}}
.release();
auto expected = table_view({*expect_s_2});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("being");
expected_metadata.column_metadata[0].child(0).set_name("particulars");
expected_metadata.column_metadata[0].child(0).child(0).set_name("age");
expected_metadata.column_metadata[0].child(0).child(1).set_name("weight");
expected_metadata.column_metadata[0].child(1).set_name("human?");
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
cudf::test::expect_metadata_equal(expected_metadata, result.metadata);
}
}
TEST_F(ParquetReaderTest, DecimalRead)
{
{
/* We could add a dataset to include this file, but we don't want tests in cudf to have data.
This test is a temporary test until python gains the ability to write decimal, so we're
embedding
a parquet file directly into the code here to prevent issues with finding the file */
unsigned char const decimals_parquet[] = {
0x50, 0x41, 0x52, 0x31, 0x15, 0x00, 0x15, 0xb0, 0x03, 0x15, 0xb8, 0x03, 0x2c, 0x15, 0x6a,
0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1c, 0x36, 0x02, 0x28, 0x04, 0x7f, 0x96, 0x98, 0x00,
0x18, 0x04, 0x81, 0x69, 0x67, 0xff, 0x00, 0x00, 0x00, 0xd8, 0x01, 0xf0, 0xd7, 0x04, 0x00,
0x00, 0x00, 0x64, 0x01, 0x03, 0x06, 0x68, 0x12, 0xdc, 0xff, 0xbd, 0x18, 0xfd, 0xff, 0x64,
0x13, 0x80, 0x00, 0xb3, 0x5d, 0x62, 0x00, 0x90, 0x35, 0xa9, 0xff, 0xa2, 0xde, 0xe3, 0xff,
0xe9, 0xbf, 0x96, 0xff, 0x1f, 0x8a, 0x98, 0xff, 0xb1, 0x50, 0x34, 0x00, 0x88, 0x24, 0x59,
0x00, 0x2a, 0x33, 0xbe, 0xff, 0xd5, 0x16, 0xbc, 0xff, 0x13, 0x50, 0x8d, 0xff, 0xcb, 0x63,
0x2d, 0x00, 0x80, 0x8f, 0xbe, 0xff, 0x82, 0x40, 0x10, 0x00, 0x84, 0x68, 0x70, 0xff, 0x9b,
0x69, 0x78, 0x00, 0x14, 0x6c, 0x10, 0x00, 0x50, 0xd9, 0xe1, 0xff, 0xaa, 0xcd, 0x6a, 0x00,
0xcf, 0xb1, 0x28, 0x00, 0x77, 0x57, 0x8d, 0x00, 0xee, 0x05, 0x79, 0x00, 0xf0, 0x15, 0xeb,
0xff, 0x02, 0xe2, 0x06, 0x00, 0x87, 0x43, 0x86, 0x00, 0xf8, 0x2d, 0x2e, 0x00, 0xee, 0x2e,
0x98, 0xff, 0x39, 0xcb, 0x4d, 0x00, 0x1e, 0x6b, 0xea, 0xff, 0x80, 0x8e, 0x6c, 0xff, 0x97,
0x25, 0x26, 0x00, 0x4d, 0x0d, 0x0a, 0x00, 0xca, 0x64, 0x7f, 0x00, 0xf4, 0xbe, 0xa1, 0xff,
0xe2, 0x12, 0x6c, 0xff, 0xbd, 0x77, 0xae, 0xff, 0xf9, 0x4b, 0x36, 0x00, 0xb0, 0xe3, 0x79,
0xff, 0xa2, 0x2a, 0x29, 0x00, 0xcd, 0x06, 0xbc, 0xff, 0x2d, 0xa3, 0x7e, 0x00, 0xa9, 0x08,
0xa1, 0xff, 0xbf, 0x81, 0xd0, 0xff, 0x4f, 0x03, 0x73, 0x00, 0xb0, 0x99, 0x0c, 0x00, 0xbd,
0x6f, 0xf8, 0xff, 0x6b, 0x02, 0x05, 0x00, 0xc1, 0xe1, 0xba, 0xff, 0x81, 0x69, 0x67, 0xff,
0x7f, 0x96, 0x98, 0x00, 0x15, 0x00, 0x15, 0xd0, 0x06, 0x15, 0xda, 0x06, 0x2c, 0x15, 0x6a,
0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1c, 0x36, 0x02, 0x28, 0x08, 0xff, 0x3f, 0x7a, 0x10,
0xf3, 0x5a, 0x00, 0x00, 0x18, 0x08, 0x01, 0xc0, 0x85, 0xef, 0x0c, 0xa5, 0xff, 0xff, 0x00,
0x00, 0x00, 0xa8, 0x03, 0xf4, 0xa7, 0x01, 0x04, 0x00, 0x00, 0x00, 0x64, 0x01, 0x03, 0x06,
0x55, 0x6f, 0xc5, 0xe4, 0x9f, 0x1a, 0x00, 0x00, 0x47, 0x89, 0x0a, 0xe8, 0x58, 0xf0, 0xff,
0xff, 0x63, 0xee, 0x21, 0xdd, 0xdd, 0xca, 0xff, 0xff, 0xbe, 0x6f, 0x3b, 0xaa, 0xe9, 0x3d,
0x00, 0x00, 0xd6, 0x91, 0x2a, 0xb7, 0x08, 0x02, 0x00, 0x00, 0x75, 0x45, 0x2c, 0xd7, 0x76,
0x0c, 0x00, 0x00, 0x54, 0x49, 0x92, 0x44, 0x9c, 0xbf, 0xff, 0xff, 0x41, 0xa9, 0x6d, 0xec,
0x7a, 0xd0, 0xff, 0xff, 0x27, 0xa0, 0x23, 0x41, 0x44, 0xc1, 0xff, 0xff, 0x18, 0xd4, 0xe1,
0x30, 0xd3, 0xe0, 0xff, 0xff, 0x59, 0xac, 0x14, 0xf4, 0xec, 0x58, 0x00, 0x00, 0x2c, 0x17,
0x29, 0x57, 0x44, 0x13, 0x00, 0x00, 0xa2, 0x0d, 0x4a, 0xcc, 0x63, 0xff, 0xff, 0xff, 0x81,
0x33, 0xbc, 0xda, 0xd5, 0xda, 0xff, 0xff, 0x4c, 0x05, 0xf4, 0x78, 0x19, 0xea, 0xff, 0xff,
0x06, 0x71, 0x25, 0xde, 0x5a, 0xaf, 0xff, 0xff, 0x95, 0x32, 0x5f, 0x76, 0x98, 0xb3, 0xff,
0xff, 0xf1, 0x34, 0x3c, 0xbf, 0xa8, 0xbe, 0xff, 0xff, 0x27, 0x73, 0x40, 0x0c, 0x7d, 0xcd,
0xff, 0xff, 0x68, 0xa9, 0xc2, 0xe9, 0x2c, 0x03, 0x00, 0x00, 0x3f, 0x79, 0xd9, 0x04, 0x8c,
0xe5, 0xff, 0xff, 0x91, 0xb4, 0x9b, 0xe3, 0x8f, 0x21, 0x00, 0x00, 0xb8, 0x20, 0xc8, 0xc2,
0x4d, 0xa6, 0xff, 0xff, 0x47, 0xfa, 0xde, 0x36, 0x4a, 0xf3, 0xff, 0xff, 0x72, 0x80, 0x94,
0x59, 0xdd, 0x4e, 0x00, 0x00, 0x29, 0xe4, 0xd6, 0x43, 0xb0, 0xf0, 0xff, 0xff, 0x68, 0x36,
0xbc, 0x2d, 0xd1, 0xa9, 0xff, 0xff, 0xbc, 0xe4, 0xbe, 0xd7, 0xed, 0x1b, 0x00, 0x00, 0x02,
0x8b, 0xcb, 0xd7, 0xed, 0x47, 0x00, 0x00, 0x3c, 0x06, 0xe4, 0xda, 0xc7, 0x47, 0x00, 0x00,
0xf3, 0x39, 0x55, 0x28, 0x97, 0xba, 0xff, 0xff, 0x07, 0x79, 0x38, 0x4e, 0xe0, 0x21, 0x00,
0x00, 0xde, 0xed, 0x1c, 0x23, 0x09, 0x49, 0x00, 0x00, 0x49, 0x46, 0x49, 0x5d, 0x8f, 0x34,
0x00, 0x00, 0x38, 0x18, 0x50, 0xf6, 0xa1, 0x11, 0x00, 0x00, 0xdf, 0xb8, 0x19, 0x14, 0xd1,
0xe1, 0xff, 0xff, 0x2c, 0x56, 0x72, 0x93, 0x64, 0x3f, 0x00, 0x00, 0x1c, 0xe0, 0xbe, 0x87,
0x7d, 0xf9, 0xff, 0xff, 0x73, 0x0e, 0x3c, 0x01, 0x91, 0xf9, 0xff, 0xff, 0xb2, 0x37, 0x85,
0x81, 0x5f, 0x54, 0x00, 0x00, 0x58, 0x44, 0xb0, 0x1a, 0xac, 0xbb, 0xff, 0xff, 0x36, 0xbf,
0xbe, 0x5e, 0x22, 0xff, 0xff, 0xff, 0x06, 0x20, 0xa0, 0x23, 0x0d, 0x3b, 0x00, 0x00, 0x19,
0xc6, 0x49, 0x0a, 0x00, 0xcf, 0xff, 0xff, 0x4f, 0xcd, 0xc6, 0x95, 0x4b, 0xf1, 0xff, 0xff,
0xa3, 0x59, 0xaf, 0x65, 0xec, 0xe9, 0xff, 0xff, 0x58, 0xef, 0x05, 0x50, 0x63, 0xe4, 0xff,
0xff, 0xc7, 0x6a, 0x9e, 0xf1, 0x69, 0x20, 0x00, 0x00, 0xd1, 0xb3, 0xc9, 0x14, 0xb2, 0x29,
0x00, 0x00, 0x1d, 0x48, 0x16, 0x70, 0xf0, 0x40, 0x00, 0x00, 0x01, 0xc0, 0x85, 0xef, 0x0c,
0xa5, 0xff, 0xff, 0xff, 0x3f, 0x7a, 0x10, 0xf3, 0x5a, 0x00, 0x00, 0x15, 0x00, 0x15, 0x90,
0x0d, 0x15, 0x9a, 0x0d, 0x2c, 0x15, 0x6a, 0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1c, 0x36,
0x02, 0x28, 0x10, 0x4b, 0x3b, 0x4c, 0xa8, 0x5a, 0x86, 0xc4, 0x7a, 0x09, 0x8a, 0x22, 0x3f,
0xff, 0xff, 0xff, 0xff, 0x18, 0x10, 0xb4, 0xc4, 0xb3, 0x57, 0xa5, 0x79, 0x3b, 0x85, 0xf6,
0x75, 0xdd, 0xc0, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xc8, 0x06, 0xf4, 0x47, 0x03,
0x04, 0x00, 0x00, 0x00, 0x64, 0x01, 0x03, 0x06, 0x05, 0x49, 0xf7, 0xfc, 0x89, 0x3d, 0x3e,
0x20, 0x07, 0x72, 0x3e, 0xa1, 0x66, 0x81, 0x67, 0x80, 0x23, 0x78, 0x06, 0x68, 0x0e, 0x78,
0xf5, 0x08, 0xed, 0x20, 0xcd, 0x0e, 0x7f, 0x9c, 0x70, 0xa0, 0xb9, 0x16, 0x44, 0xb2, 0x41,
0x62, 0xba, 0x82, 0xad, 0xe1, 0x12, 0x9b, 0xa6, 0x53, 0x8d, 0x20, 0x27, 0xd5, 0x84, 0x63,
0xb8, 0x07, 0x4b, 0x5b, 0xa4, 0x1c, 0xa4, 0x1c, 0x17, 0xbf, 0x4b, 0x00, 0x24, 0x04, 0x56,
0xa8, 0x52, 0xaf, 0x33, 0xf7, 0xad, 0x7c, 0xc8, 0x83, 0x25, 0x13, 0xaf, 0x80, 0x25, 0x6f,
0xbd, 0xd1, 0x15, 0x69, 0x64, 0x20, 0x7b, 0xd7, 0x33, 0xba, 0x66, 0x29, 0x8a, 0x00, 0xda,
0x42, 0x07, 0x2c, 0x6c, 0x39, 0x76, 0x9f, 0xdc, 0x17, 0xad, 0xb6, 0x58, 0xdf, 0x5f, 0x00,
0x18, 0x3a, 0xae, 0x1c, 0xd6, 0x5f, 0x9d, 0x78, 0x8d, 0x73, 0xdd, 0x3e, 0xd6, 0x18, 0x33,
0x40, 0xe4, 0x36, 0xde, 0xb0, 0xb7, 0x33, 0x2a, 0x6b, 0x08, 0x03, 0x6c, 0x6d, 0x8f, 0x13,
0x93, 0xd0, 0xd7, 0x87, 0x62, 0x63, 0x53, 0xfb, 0xd8, 0xbb, 0xc9, 0x54, 0x90, 0xd6, 0xa9,
0x8f, 0xc8, 0x60, 0xbd, 0xec, 0x75, 0x23, 0x9a, 0x21, 0xec, 0xe4, 0x86, 0x43, 0xd7, 0xc1,
0x88, 0xdc, 0x82, 0x00, 0x32, 0x79, 0xc9, 0x2b, 0x70, 0x85, 0xb7, 0x25, 0xa1, 0xcc, 0x7d,
0x0b, 0x29, 0x03, 0xea, 0x80, 0xff, 0x9b, 0xf3, 0x24, 0x7f, 0xd1, 0xff, 0xf0, 0x22, 0x65,
0x85, 0x99, 0x17, 0x63, 0xc2, 0xc0, 0xb7, 0x62, 0x05, 0xda, 0x7a, 0xa0, 0xc3, 0x2a, 0x6f,
0x1f, 0xee, 0x1f, 0x31, 0xa8, 0x42, 0x80, 0xe4, 0xb7, 0x6c, 0xf6, 0xac, 0x47, 0xb0, 0x17,
0x69, 0xcb, 0xff, 0x66, 0x8a, 0xd6, 0x25, 0x00, 0xf3, 0xcf, 0x0a, 0xaf, 0xf8, 0x92, 0x8a,
0xa0, 0xdf, 0x71, 0x13, 0x8d, 0x9d, 0xff, 0x7e, 0xe0, 0x0a, 0x52, 0xf1, 0x97, 0x01, 0xa9,
0x73, 0x27, 0xfd, 0x63, 0x58, 0x00, 0x32, 0xa6, 0xf6, 0x78, 0xb8, 0xe4, 0xfd, 0x20, 0x7c,
0x90, 0xee, 0xad, 0x8c, 0xc9, 0x71, 0x35, 0x66, 0x71, 0x3c, 0xe0, 0xe4, 0x0b, 0xbb, 0xa0,
0x50, 0xe9, 0xf2, 0x81, 0x1d, 0x3a, 0x95, 0x94, 0x00, 0xd5, 0x49, 0x00, 0x07, 0xdf, 0x21,
0x53, 0x36, 0x8d, 0x9e, 0xd9, 0xa5, 0x52, 0x4d, 0x0d, 0x29, 0x74, 0xf0, 0x40, 0xbd, 0xda,
0x63, 0x4e, 0xdd, 0x91, 0x8e, 0xa6, 0xa7, 0xf6, 0x78, 0x58, 0x3b, 0x0a, 0x5c, 0x60, 0x3c,
0x15, 0x34, 0xf8, 0x2c, 0x21, 0xe3, 0x56, 0x1b, 0x9e, 0xd9, 0x56, 0xd3, 0x13, 0x2e, 0x80,
0x2c, 0x36, 0xda, 0x1d, 0xc8, 0xfb, 0x52, 0xee, 0x17, 0xb3, 0x2b, 0xf3, 0xd2, 0xeb, 0x29,
0xa0, 0x37, 0xa0, 0x12, 0xce, 0x1c, 0x50, 0x6a, 0xf4, 0x11, 0xcd, 0x96, 0x88, 0x3f, 0x43,
0x78, 0xc0, 0x2c, 0x53, 0x6c, 0xa6, 0xdf, 0xb9, 0x9e, 0x93, 0xd4, 0x1e, 0xa9, 0x7f, 0x67,
0xa6, 0xc1, 0x80, 0x46, 0x0f, 0x63, 0x7d, 0x15, 0xf2, 0x4c, 0xc5, 0xda, 0x11, 0x9a, 0x20,
0x67, 0x27, 0xe8, 0x00, 0xec, 0x03, 0x1d, 0x15, 0xa7, 0x92, 0xb3, 0x1f, 0xda, 0x20, 0x92,
0xd8, 0x00, 0xfb, 0x06, 0x80, 0xeb, 0x4b, 0x0c, 0xc1, 0x1f, 0x49, 0x40, 0x06, 0x8d, 0x8a,
0xf8, 0x34, 0xb1, 0x0c, 0x1d, 0x20, 0xd0, 0x47, 0xe5, 0xb1, 0x7e, 0xf7, 0xe4, 0xb4, 0x7e,
0x9c, 0x84, 0x18, 0x61, 0x32, 0x4f, 0xc0, 0xc2, 0xb2, 0xcc, 0x63, 0xf6, 0xe1, 0x16, 0xd6,
0xd9, 0x4b, 0x74, 0x13, 0x01, 0xa1, 0xe2, 0x00, 0xb7, 0x9e, 0xc1, 0x3a, 0xc5, 0xaf, 0xe8,
0x54, 0x07, 0x2a, 0x20, 0xfd, 0x2c, 0x6f, 0xb9, 0x80, 0x18, 0x92, 0x87, 0xa0, 0x81, 0x24,
0x60, 0x47, 0x17, 0x4f, 0xbc, 0xbe, 0xf5, 0x03, 0x69, 0x80, 0xe3, 0x10, 0x54, 0xd6, 0x68,
0x7d, 0x75, 0xd3, 0x0a, 0x45, 0x38, 0x9e, 0xa9, 0xfd, 0x05, 0x40, 0xd2, 0x1e, 0x6f, 0x5c,
0x30, 0x10, 0xfe, 0x9b, 0x9f, 0x6d, 0xc0, 0x9d, 0x6c, 0x17, 0x7d, 0x00, 0x09, 0xb6, 0x8a,
0x31, 0x8e, 0x1b, 0x6b, 0x84, 0x1e, 0x79, 0xce, 0x10, 0x55, 0x59, 0x6a, 0x40, 0x16, 0xdc,
0x9a, 0xcf, 0x4d, 0xb0, 0x8f, 0xac, 0xe3, 0x8d, 0xee, 0xd2, 0xef, 0x01, 0x8c, 0xe0, 0x2b,
0x24, 0xe5, 0xb4, 0xe1, 0x86, 0x72, 0x00, 0x30, 0x07, 0xce, 0x02, 0x23, 0x41, 0x33, 0x40,
0xf0, 0x9b, 0xc2, 0x2d, 0x30, 0xec, 0x3b, 0x17, 0xb2, 0x8f, 0x64, 0x7d, 0xcd, 0x70, 0x9e,
0x80, 0x22, 0xb5, 0xdf, 0x6d, 0x2a, 0x43, 0xd4, 0x2b, 0x5a, 0xf6, 0x96, 0xa6, 0xea, 0x91,
0x62, 0x80, 0x39, 0xf2, 0x5a, 0x8e, 0xc0, 0xb9, 0x29, 0x99, 0x17, 0xe7, 0x35, 0x2c, 0xf6,
0x4d, 0x18, 0x00, 0x48, 0x10, 0x85, 0xb4, 0x3f, 0x89, 0x60, 0x49, 0x6e, 0xf0, 0xcd, 0x9d,
0x92, 0xeb, 0x96, 0x80, 0xcf, 0xf9, 0xf1, 0x46, 0x1d, 0xc0, 0x49, 0xb3, 0x36, 0x2e, 0x24,
0xc8, 0xdb, 0x41, 0x72, 0x20, 0xf5, 0xde, 0x5c, 0xf9, 0x4a, 0x6e, 0xa0, 0x0b, 0x13, 0xfc,
0x2d, 0x17, 0x07, 0x16, 0x5e, 0x00, 0x3c, 0x54, 0x41, 0x0e, 0xa2, 0x0d, 0xf3, 0x48, 0x12,
0x2e, 0x7c, 0xab, 0x3c, 0x59, 0x1c, 0x40, 0xca, 0xb0, 0x71, 0xc7, 0x29, 0xf0, 0xbb, 0x9f,
0xf4, 0x3f, 0x25, 0x49, 0xad, 0xc2, 0x8f, 0x80, 0x04, 0x38, 0x6d, 0x35, 0x02, 0xca, 0xe6,
0x02, 0x83, 0x89, 0x4e, 0x74, 0xdb, 0x08, 0x5a, 0x80, 0x13, 0x99, 0xd4, 0x26, 0xc1, 0x27,
0xce, 0xb0, 0x98, 0x99, 0xca, 0xf6, 0x3e, 0x50, 0x49, 0xd0, 0xbf, 0xcb, 0x6f, 0xbe, 0x5b,
0x92, 0x63, 0xde, 0x94, 0xd3, 0x8f, 0x07, 0x06, 0x0f, 0x2b, 0x80, 0x36, 0xf1, 0x77, 0xf6,
0x29, 0x33, 0x13, 0xa9, 0x4a, 0x55, 0x3d, 0x6c, 0xca, 0xdb, 0x4e, 0x40, 0xc4, 0x95, 0x54,
0xf4, 0xe2, 0x8c, 0x1b, 0xa0, 0xfe, 0x30, 0x50, 0x9d, 0x62, 0xbc, 0x5c, 0x00, 0xb4, 0xc4,
0xb3, 0x57, 0xa5, 0x79, 0x3b, 0x85, 0xf6, 0x75, 0xdd, 0xc0, 0x00, 0x00, 0x00, 0x01, 0x4b,
0x3b, 0x4c, 0xa8, 0x5a, 0x86, 0xc4, 0x7a, 0x09, 0x8a, 0x22, 0x3f, 0xff, 0xff, 0xff, 0xff,
0x15, 0x02, 0x19, 0x4c, 0x48, 0x0c, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x5f, 0x73, 0x63, 0x68,
0x65, 0x6d, 0x61, 0x15, 0x06, 0x00, 0x15, 0x02, 0x25, 0x02, 0x18, 0x06, 0x64, 0x65, 0x63,
0x37, 0x70, 0x34, 0x25, 0x0a, 0x15, 0x08, 0x15, 0x0e, 0x00, 0x15, 0x04, 0x25, 0x02, 0x18,
0x07, 0x64, 0x65, 0x63, 0x31, 0x34, 0x70, 0x35, 0x25, 0x0a, 0x15, 0x0a, 0x15, 0x1c, 0x00,
0x15, 0x0e, 0x15, 0x20, 0x15, 0x02, 0x18, 0x08, 0x64, 0x65, 0x63, 0x33, 0x38, 0x70, 0x31,
0x38, 0x25, 0x0a, 0x15, 0x24, 0x15, 0x4c, 0x00, 0x16, 0x6a, 0x19, 0x1c, 0x19, 0x3c, 0x26,
0x08, 0x1c, 0x15, 0x02, 0x19, 0x35, 0x06, 0x08, 0x00, 0x19, 0x18, 0x06, 0x64, 0x65, 0x63,
0x37, 0x70, 0x34, 0x15, 0x02, 0x16, 0x6a, 0x16, 0xf6, 0x03, 0x16, 0xfe, 0x03, 0x26, 0x08,
0x3c, 0x36, 0x02, 0x28, 0x04, 0x7f, 0x96, 0x98, 0x00, 0x18, 0x04, 0x81, 0x69, 0x67, 0xff,
0x00, 0x19, 0x1c, 0x15, 0x00, 0x15, 0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x26, 0x86, 0x04,
0x1c, 0x15, 0x04, 0x19, 0x35, 0x06, 0x08, 0x00, 0x19, 0x18, 0x07, 0x64, 0x65, 0x63, 0x31,
0x34, 0x70, 0x35, 0x15, 0x02, 0x16, 0x6a, 0x16, 0xa6, 0x07, 0x16, 0xb0, 0x07, 0x26, 0x86,
0x04, 0x3c, 0x36, 0x02, 0x28, 0x08, 0xff, 0x3f, 0x7a, 0x10, 0xf3, 0x5a, 0x00, 0x00, 0x18,
0x08, 0x01, 0xc0, 0x85, 0xef, 0x0c, 0xa5, 0xff, 0xff, 0x00, 0x19, 0x1c, 0x15, 0x00, 0x15,
0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x26, 0xb6, 0x0b, 0x1c, 0x15, 0x0e, 0x19, 0x35, 0x06,
0x08, 0x00, 0x19, 0x18, 0x08, 0x64, 0x65, 0x63, 0x33, 0x38, 0x70, 0x31, 0x38, 0x15, 0x02,
0x16, 0x6a, 0x16, 0x86, 0x0e, 0x16, 0x90, 0x0e, 0x26, 0xb6, 0x0b, 0x3c, 0x36, 0x02, 0x28,
0x10, 0x4b, 0x3b, 0x4c, 0xa8, 0x5a, 0x86, 0xc4, 0x7a, 0x09, 0x8a, 0x22, 0x3f, 0xff, 0xff,
0xff, 0xff, 0x18, 0x10, 0xb4, 0xc4, 0xb3, 0x57, 0xa5, 0x79, 0x3b, 0x85, 0xf6, 0x75, 0xdd,
0xc0, 0x00, 0x00, 0x00, 0x01, 0x00, 0x19, 0x1c, 0x15, 0x00, 0x15, 0x00, 0x15, 0x02, 0x00,
0x00, 0x00, 0x16, 0xa2, 0x19, 0x16, 0x6a, 0x00, 0x19, 0x2c, 0x18, 0x18, 0x6f, 0x72, 0x67,
0x2e, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x2e, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x33, 0x2e, 0x30, 0x2e, 0x31, 0x00, 0x18,
0x29, 0x6f, 0x72, 0x67, 0x2e, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x73, 0x70, 0x61,
0x72, 0x6b, 0x2e, 0x73, 0x71, 0x6c, 0x2e, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2e,
0x72, 0x6f, 0x77, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0xf4, 0x01,
0x7b, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3a, 0x22, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
0x22, 0x2c, 0x22, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x3a, 0x5b, 0x7b, 0x22, 0x6e,
0x61, 0x6d, 0x65, 0x22, 0x3a, 0x22, 0x64, 0x65, 0x63, 0x37, 0x70, 0x34, 0x22, 0x2c, 0x22,
0x74, 0x79, 0x70, 0x65, 0x22, 0x3a, 0x22, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x28,
0x37, 0x2c, 0x34, 0x29, 0x22, 0x2c, 0x22, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65,
0x22, 0x3a, 0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x22, 0x3a, 0x7b, 0x7d, 0x7d, 0x2c, 0x7b, 0x22, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a,
0x22, 0x64, 0x65, 0x63, 0x31, 0x34, 0x70, 0x35, 0x22, 0x2c, 0x22, 0x74, 0x79, 0x70, 0x65,
0x22, 0x3a, 0x22, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x28, 0x31, 0x34, 0x2c, 0x35,
0x29, 0x22, 0x2c, 0x22, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x3a, 0x74,
0x72, 0x75, 0x65, 0x2c, 0x22, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3a,
0x7b, 0x7d, 0x7d, 0x2c, 0x7b, 0x22, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x22, 0x64, 0x65,
0x63, 0x33, 0x38, 0x70, 0x31, 0x38, 0x22, 0x2c, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3a,
0x22, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x28, 0x33, 0x38, 0x2c, 0x31, 0x38, 0x29,
0x22, 0x2c, 0x22, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x3a, 0x74, 0x72,
0x75, 0x65, 0x2c, 0x22, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3a, 0x7b,
0x7d, 0x7d, 0x5d, 0x7d, 0x00, 0x18, 0x4a, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2d,
0x6d, 0x72, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x31, 0x30,
0x2e, 0x31, 0x20, 0x28, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x20, 0x61, 0x38, 0x39, 0x64, 0x66,
0x38, 0x66, 0x39, 0x39, 0x33, 0x32, 0x62, 0x36, 0x65, 0x66, 0x36, 0x36, 0x33, 0x33, 0x64,
0x30, 0x36, 0x30, 0x36, 0x39, 0x65, 0x35, 0x30, 0x63, 0x39, 0x62, 0x37, 0x39, 0x37, 0x30,
0x62, 0x65, 0x62, 0x64, 0x31, 0x29, 0x19, 0x3c, 0x1c, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x1c,
0x00, 0x00, 0x00, 0xd3, 0x02, 0x00, 0x00, 0x50, 0x41, 0x52, 0x31};
unsigned int decimals_parquet_len = 2366;
cudf::io::parquet_reader_options read_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info{reinterpret_cast<char const*>(decimals_parquet), decimals_parquet_len});
auto result = cudf::io::read_parquet(read_opts);
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 50; });
EXPECT_EQ(result.tbl->view().num_columns(), 3);
int32_t col0_data[] = {
-2354584, -190275, 8393572, 6446515, -5687920, -1843550, -6897687, -6780385, 3428529,
5842056, -4312278, -4450603, -7516141, 2974667, -4288640, 1065090, -9410428, 7891355,
1076244, -1975984, 6999466, 2666959, 9262967, 7931374, -1370640, 451074, 8799111,
3026424, -6803730, 5098297, -1414370, -9662848, 2499991, 658765, 8348874, -6177036,
-9694494, -5343299, 3558393, -8789072, 2697890, -4454707, 8299309, -6223703, -3112513,
7537487, 825776, -495683, 328299, -4529727, 0, -9999999, 9999999};
EXPECT_EQ(static_cast<std::size_t>(result.tbl->view().column(0).size()),
sizeof(col0_data) / sizeof(col0_data[0]));
cudf::test::fixed_point_column_wrapper<int32_t> col0(
std::begin(col0_data), std::end(col0_data), validity, numeric::scale_type{-4});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), col0);
int64_t col1_data[] = {29274040266581, -17210335917753, -58420730139037,
68073792696254, 2236456014294, 13704555677045,
-70797090469548, -52248605513407, -68976081919961,
-34277313883112, 97774730521689, 21184241014572,
-670882460254, -40862944054399, -24079852370612,
-88670167797498, -84007574359403, -71843004533519,
-55538016554201, 3491435293032, -29085437167297,
36901882672273, -98622066122568, -13974902998457,
86712597643378, -16835133643735, -94759096142232,
30708340810940, 79086853262082, 78923696440892,
-76316597208589, 37247268714759, 80303592631774,
57790350050889, 19387319851064, -33186875066145,
69701203023404, -7157433049060, -7073790423437,
92769171617714, -75127120182184, -951893180618,
64927618310150, -53875897154023, -16168039035569,
-24273449166429, -30359781249192, 35639397345991,
45844829680593, 71401416837149, 0,
-99999999999999, 99999999999999};
EXPECT_EQ(static_cast<std::size_t>(result.tbl->view().column(1).size()),
sizeof(col1_data) / sizeof(col1_data[0]));
cudf::test::fixed_point_column_wrapper<int64_t> col1(
std::begin(col1_data), std::end(col1_data), validity, numeric::scale_type{-5});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), col1);
cudf::io::parquet_reader_options read_strict_opts = read_opts;
read_strict_opts.set_columns({"dec7p4", "dec14p5"});
EXPECT_NO_THROW(cudf::io::read_parquet(read_strict_opts));
}
{
// dec7p3: Decimal(precision=7, scale=3) backed by FIXED_LENGTH_BYTE_ARRAY(length = 4)
// dec12p11: Decimal(precision=12, scale=11) backed by FIXED_LENGTH_BYTE_ARRAY(length = 6)
// dec20p1: Decimal(precision=20, scale=1) backed by FIXED_LENGTH_BYTE_ARRAY(length = 9)
unsigned char const fixed_len_bytes_decimal_parquet[] = {
0x50, 0x41, 0x52, 0x31, 0x15, 0x00, 0x15, 0xA8, 0x01, 0x15, 0xAE, 0x01, 0x2C, 0x15, 0x28,
0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1C, 0x36, 0x02, 0x28, 0x04, 0x00, 0x97, 0x45, 0x72,
0x18, 0x04, 0x00, 0x01, 0x81, 0x3B, 0x00, 0x00, 0x00, 0x54, 0xF0, 0x53, 0x04, 0x00, 0x00,
0x00, 0x26, 0x01, 0x03, 0x00, 0x00, 0x61, 0x10, 0xCF, 0x00, 0x0A, 0xA9, 0x08, 0x00, 0x77,
0x58, 0x6F, 0x00, 0x6B, 0xEE, 0xA4, 0x00, 0x92, 0xF8, 0x94, 0x00, 0x2E, 0x18, 0xD4, 0x00,
0x4F, 0x45, 0x33, 0x00, 0x97, 0x45, 0x72, 0x00, 0x0D, 0xC2, 0x75, 0x00, 0x76, 0xAA, 0xAA,
0x00, 0x30, 0x9F, 0x86, 0x00, 0x4B, 0x9D, 0xB1, 0x00, 0x4E, 0x4B, 0x3B, 0x00, 0x01, 0x81,
0x3B, 0x00, 0x22, 0xD4, 0x53, 0x00, 0x72, 0xC4, 0xAF, 0x00, 0x43, 0x9B, 0x72, 0x00, 0x1D,
0x91, 0xC3, 0x00, 0x45, 0x27, 0x48, 0x15, 0x00, 0x15, 0xF4, 0x01, 0x15, 0xFA, 0x01, 0x2C,
0x15, 0x28, 0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1C, 0x36, 0x02, 0x28, 0x06, 0x00, 0xD5,
0xD7, 0x31, 0x99, 0xA6, 0x18, 0x06, 0xFF, 0x17, 0x2B, 0x5A, 0xF0, 0x01, 0x00, 0x00, 0x00,
0x7A, 0xF0, 0x79, 0x04, 0x00, 0x00, 0x00, 0x24, 0x01, 0x03, 0x02, 0x00, 0x54, 0x23, 0xCF,
0x13, 0x0A, 0x00, 0x07, 0x22, 0xB1, 0x21, 0x7E, 0x00, 0x64, 0x19, 0xD6, 0xD2, 0xA5, 0x00,
0x61, 0x7F, 0xF6, 0xB9, 0xB0, 0x00, 0xD0, 0x7F, 0x9C, 0xA9, 0xE9, 0x00, 0x65, 0x58, 0xF0,
0xAD, 0xFB, 0x00, 0xBC, 0x61, 0xE2, 0x03, 0xDA, 0xFF, 0x17, 0x2B, 0x5A, 0xF0, 0x01, 0x00,
0x63, 0x4B, 0x4C, 0xFE, 0x45, 0x00, 0x7A, 0xA0, 0xD8, 0xD1, 0xC0, 0x00, 0xC0, 0x63, 0xF7,
0x9D, 0x0A, 0x00, 0x88, 0x22, 0x0F, 0x1B, 0x25, 0x00, 0x1A, 0x80, 0x56, 0x34, 0xC7, 0x00,
0x5F, 0x48, 0x61, 0x09, 0x7C, 0x00, 0x61, 0xEF, 0x92, 0x42, 0x2F, 0x00, 0xD5, 0xD7, 0x31,
0x99, 0xA6, 0xFF, 0x17, 0x2B, 0x5A, 0xF0, 0x01, 0x00, 0x71, 0xDD, 0xE2, 0x22, 0x7B, 0x00,
0x54, 0xBF, 0xAE, 0xE9, 0x3C, 0x15, 0x00, 0x15, 0xD4, 0x02, 0x15, 0xDC, 0x02, 0x2C, 0x15,
0x28, 0x15, 0x00, 0x15, 0x06, 0x15, 0x08, 0x1C, 0x36, 0x04, 0x28, 0x09, 0x00, 0x7D, 0xFE,
0x02, 0xDA, 0xB2, 0x62, 0xA3, 0xFB, 0x18, 0x09, 0x00, 0x03, 0x9C, 0xCD, 0x5A, 0xAC, 0xBB,
0xF1, 0xE3, 0x00, 0x00, 0x00, 0xAA, 0x01, 0xF0, 0xA9, 0x04, 0x00, 0x00, 0x00, 0x07, 0xBF,
0xBF, 0x0F, 0x00, 0x7D, 0xFE, 0x02, 0xDA, 0xB2, 0x62, 0xA3, 0xFB, 0x00, 0x7D, 0x9A, 0xCB,
0xDA, 0x4B, 0x10, 0x8B, 0xAC, 0x00, 0x20, 0xBA, 0x97, 0x87, 0x2E, 0x3B, 0x4E, 0x04, 0x00,
0x15, 0xBB, 0xC2, 0xDF, 0x2D, 0x25, 0x08, 0xB6, 0x00, 0x5C, 0x67, 0x0E, 0x36, 0x30, 0xF1,
0xAC, 0xA4, 0x00, 0x44, 0xF1, 0x8E, 0xFB, 0x17, 0x5E, 0xE1, 0x96, 0x00, 0x64, 0x69, 0xF9,
0x66, 0x3F, 0x11, 0xED, 0xB9, 0x00, 0x45, 0xB5, 0xDA, 0x14, 0x9C, 0xA3, 0xFA, 0x64, 0x00,
0x26, 0x5F, 0xDE, 0xD7, 0x67, 0x95, 0xEF, 0xB1, 0x00, 0x35, 0xDB, 0x9B, 0x88, 0x46, 0xD0,
0xA1, 0x0E, 0x00, 0x45, 0xA9, 0x92, 0x8E, 0x89, 0xD1, 0xAC, 0x4C, 0x00, 0x4C, 0xF1, 0xCB,
0x27, 0x82, 0x3A, 0x7D, 0xB7, 0x00, 0x64, 0xD3, 0xD2, 0x2F, 0x9C, 0x83, 0x16, 0x75, 0x00,
0x15, 0xDF, 0xC2, 0xA9, 0x63, 0xB8, 0x33, 0x65, 0x00, 0x27, 0x40, 0x28, 0x97, 0x05, 0x8E,
0xE3, 0x46, 0x00, 0x03, 0x9C, 0xCD, 0x5A, 0xAC, 0xBB, 0xF1, 0xE3, 0x00, 0x22, 0x23, 0xF5,
0xE8, 0x9D, 0x55, 0xD4, 0x9C, 0x00, 0x25, 0xB9, 0xD8, 0x87, 0x2D, 0xF1, 0xF2, 0x17, 0x15,
0x02, 0x19, 0x4C, 0x48, 0x0C, 0x73, 0x70, 0x61, 0x72, 0x6B, 0x5F, 0x73, 0x63, 0x68, 0x65,
0x6D, 0x61, 0x15, 0x06, 0x00, 0x15, 0x0E, 0x15, 0x08, 0x15, 0x02, 0x18, 0x06, 0x64, 0x65,
0x63, 0x37, 0x70, 0x33, 0x25, 0x0A, 0x15, 0x06, 0x15, 0x0E, 0x00, 0x15, 0x0E, 0x15, 0x0C,
0x15, 0x02, 0x18, 0x08, 0x64, 0x65, 0x63, 0x31, 0x32, 0x70, 0x31, 0x31, 0x25, 0x0A, 0x15,
0x16, 0x15, 0x18, 0x00, 0x15, 0x0E, 0x15, 0x12, 0x15, 0x02, 0x18, 0x07, 0x64, 0x65, 0x63,
0x32, 0x30, 0x70, 0x31, 0x25, 0x0A, 0x15, 0x02, 0x15, 0x28, 0x00, 0x16, 0x28, 0x19, 0x1C,
0x19, 0x3C, 0x26, 0x08, 0x1C, 0x15, 0x0E, 0x19, 0x35, 0x06, 0x08, 0x00, 0x19, 0x18, 0x06,
0x64, 0x65, 0x63, 0x37, 0x70, 0x33, 0x15, 0x02, 0x16, 0x28, 0x16, 0xEE, 0x01, 0x16, 0xF4,
0x01, 0x26, 0x08, 0x3C, 0x36, 0x02, 0x28, 0x04, 0x00, 0x97, 0x45, 0x72, 0x18, 0x04, 0x00,
0x01, 0x81, 0x3B, 0x00, 0x19, 0x1C, 0x15, 0x00, 0x15, 0x00, 0x15, 0x02, 0x00, 0x00, 0x00,
0x26, 0xFC, 0x01, 0x1C, 0x15, 0x0E, 0x19, 0x35, 0x06, 0x08, 0x00, 0x19, 0x18, 0x08, 0x64,
0x65, 0x63, 0x31, 0x32, 0x70, 0x31, 0x31, 0x15, 0x02, 0x16, 0x28, 0x16, 0xC2, 0x02, 0x16,
0xC8, 0x02, 0x26, 0xFC, 0x01, 0x3C, 0x36, 0x02, 0x28, 0x06, 0x00, 0xD5, 0xD7, 0x31, 0x99,
0xA6, 0x18, 0x06, 0xFF, 0x17, 0x2B, 0x5A, 0xF0, 0x01, 0x00, 0x19, 0x1C, 0x15, 0x00, 0x15,
0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x26, 0xC4, 0x04, 0x1C, 0x15, 0x0E, 0x19, 0x35, 0x06,
0x08, 0x00, 0x19, 0x18, 0x07, 0x64, 0x65, 0x63, 0x32, 0x30, 0x70, 0x31, 0x15, 0x02, 0x16,
0x28, 0x16, 0xAE, 0x03, 0x16, 0xB6, 0x03, 0x26, 0xC4, 0x04, 0x3C, 0x36, 0x04, 0x28, 0x09,
0x00, 0x7D, 0xFE, 0x02, 0xDA, 0xB2, 0x62, 0xA3, 0xFB, 0x18, 0x09, 0x00, 0x03, 0x9C, 0xCD,
0x5A, 0xAC, 0xBB, 0xF1, 0xE3, 0x00, 0x19, 0x1C, 0x15, 0x00, 0x15, 0x00, 0x15, 0x02, 0x00,
0x00, 0x00, 0x16, 0xDE, 0x07, 0x16, 0x28, 0x00, 0x19, 0x2C, 0x18, 0x18, 0x6F, 0x72, 0x67,
0x2E, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2E, 0x73, 0x70, 0x61, 0x72, 0x6B, 0x2E, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x18, 0x05, 0x33, 0x2E, 0x30, 0x2E, 0x31, 0x00, 0x18,
0x29, 0x6F, 0x72, 0x67, 0x2E, 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2E, 0x73, 0x70, 0x61,
0x72, 0x6B, 0x2E, 0x73, 0x71, 0x6C, 0x2E, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2E,
0x72, 0x6F, 0x77, 0x2E, 0x6D, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0xF4, 0x01,
0x7B, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x22, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
0x22, 0x2C, 0x22, 0x66, 0x69, 0x65, 0x6C, 0x64, 0x73, 0x22, 0x3A, 0x5B, 0x7B, 0x22, 0x6E,
0x61, 0x6D, 0x65, 0x22, 0x3A, 0x22, 0x64, 0x65, 0x63, 0x37, 0x70, 0x33, 0x22, 0x2C, 0x22,
0x74, 0x79, 0x70, 0x65, 0x22, 0x3A, 0x22, 0x64, 0x65, 0x63, 0x69, 0x6D, 0x61, 0x6C, 0x28,
0x37, 0x2C, 0x33, 0x29, 0x22, 0x2C, 0x22, 0x6E, 0x75, 0x6C, 0x6C, 0x61, 0x62, 0x6C, 0x65,
0x22, 0x3A, 0x74, 0x72, 0x75, 0x65, 0x2C, 0x22, 0x6D, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x22, 0x3A, 0x7B, 0x7D, 0x7D, 0x2C, 0x7B, 0x22, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A,
0x22, 0x64, 0x65, 0x63, 0x31, 0x32, 0x70, 0x31, 0x31, 0x22, 0x2C, 0x22, 0x74, 0x79, 0x70,
0x65, 0x22, 0x3A, 0x22, 0x64, 0x65, 0x63, 0x69, 0x6D, 0x61, 0x6C, 0x28, 0x31, 0x32, 0x2C,
0x31, 0x31, 0x29, 0x22, 0x2C, 0x22, 0x6E, 0x75, 0x6C, 0x6C, 0x61, 0x62, 0x6C, 0x65, 0x22,
0x3A, 0x74, 0x72, 0x75, 0x65, 0x2C, 0x22, 0x6D, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x22, 0x3A, 0x7B, 0x7D, 0x7D, 0x2C, 0x7B, 0x22, 0x6E, 0x61, 0x6D, 0x65, 0x22, 0x3A, 0x22,
0x64, 0x65, 0x63, 0x32, 0x30, 0x70, 0x31, 0x22, 0x2C, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22,
0x3A, 0x22, 0x64, 0x65, 0x63, 0x69, 0x6D, 0x61, 0x6C, 0x28, 0x32, 0x30, 0x2C, 0x31, 0x29,
0x22, 0x2C, 0x22, 0x6E, 0x75, 0x6C, 0x6C, 0x61, 0x62, 0x6C, 0x65, 0x22, 0x3A, 0x74, 0x72,
0x75, 0x65, 0x2C, 0x22, 0x6D, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3A, 0x7B,
0x7D, 0x7D, 0x5D, 0x7D, 0x00, 0x18, 0x4A, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2D,
0x6D, 0x72, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x31, 0x2E, 0x31, 0x30,
0x2E, 0x31, 0x20, 0x28, 0x62, 0x75, 0x69, 0x6C, 0x64, 0x20, 0x61, 0x38, 0x39, 0x64, 0x66,
0x38, 0x66, 0x39, 0x39, 0x33, 0x32, 0x62, 0x36, 0x65, 0x66, 0x36, 0x36, 0x33, 0x33, 0x64,
0x30, 0x36, 0x30, 0x36, 0x39, 0x65, 0x35, 0x30, 0x63, 0x39, 0x62, 0x37, 0x39, 0x37, 0x30,
0x62, 0x65, 0x62, 0x64, 0x31, 0x29, 0x19, 0x3C, 0x1C, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x1C,
0x00, 0x00, 0x00, 0xC5, 0x02, 0x00, 0x00, 0x50, 0x41, 0x52, 0x31,
};
unsigned int parquet_len = 1226;
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{
reinterpret_cast<char const*>(fixed_len_bytes_decimal_parquet), parquet_len});
auto result = cudf::io::read_parquet(read_opts);
EXPECT_EQ(result.tbl->view().num_columns(), 3);
auto validity_c0 = cudf::test::iterators::nulls_at({19});
int32_t col0_data[] = {6361295, 698632, 7821423, 7073444, 9631892, 3021012, 5195059,
9913714, 901749, 7776938, 3186566, 4955569, 5131067, 98619,
2282579, 7521455, 4430706, 1937859, 4532040, 0};
EXPECT_EQ(static_cast<std::size_t>(result.tbl->view().column(0).size()),
sizeof(col0_data) / sizeof(col0_data[0]));
cudf::test::fixed_point_column_wrapper<int32_t> col0(
std::begin(col0_data), std::end(col0_data), validity_c0, numeric::scale_type{-3});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), col0);
auto validity_c1 = cudf::test::iterators::nulls_at({18});
int64_t col1_data[] = {361378026250,
30646804862,
429930238629,
418758703536,
895494171113,
435283865083,
809096053722,
-999999999999,
426465099333,
526684574144,
826310892810,
584686967589,
113822282951,
409236212092,
420631167535,
918438386086,
-999999999999,
489053889147,
0,
363993164092};
EXPECT_EQ(static_cast<std::size_t>(result.tbl->view().column(1).size()),
sizeof(col1_data) / sizeof(col1_data[0]));
cudf::test::fixed_point_column_wrapper<int64_t> col1(
std::begin(col1_data), std::end(col1_data), validity_c1, numeric::scale_type{-11});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(1), col1);
auto validity_c2 = cudf::test::iterators::nulls_at({6, 14});
__int128_t col2_data[] = {9078697037144433659,
9050770539577117612,
2358363961733893636,
1566059559232276662,
6658306200002735268,
4967909073046397334,
0,
7235588493887532473,
5023160741463849572,
2765173712965988273,
3880866513515749646,
5019704400576359500,
5544435986818825655,
7265381725809874549,
0,
1576192427381240677,
2828305195087094598,
260308667809395171,
2460080200895288476,
2718441925197820439};
EXPECT_EQ(static_cast<std::size_t>(result.tbl->view().column(2).size()),
sizeof(col2_data) / sizeof(col2_data[0]));
cudf::test::fixed_point_column_wrapper<__int128_t> col2(
std::begin(col2_data), std::end(col2_data), validity_c2, numeric::scale_type{-1});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(2), col2);
}
}
TEST_F(ParquetReaderTest, EmptyOutput)
{
cudf::test::fixed_width_column_wrapper<int> c0;
cudf::test::strings_column_wrapper c1;
cudf::test::fixed_point_column_wrapper<int> c2({}, numeric::scale_type{2});
cudf::test::lists_column_wrapper<float> _c3{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}};
auto c3 = cudf::empty_like(_c3);
cudf::test::fixed_width_column_wrapper<int> sc0;
cudf::test::strings_column_wrapper sc1;
cudf::test::lists_column_wrapper<int> _sc2{{1, 2}};
std::vector<std::unique_ptr<cudf::column>> struct_children;
struct_children.push_back(sc0.release());
struct_children.push_back(sc1.release());
struct_children.push_back(cudf::empty_like(_sc2));
cudf::test::structs_column_wrapper c4(std::move(struct_children));
table_view expected({c0, c1, c2, *c3, c4});
// set precision on the decimal column
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[2].set_decimal_precision(1);
auto filepath = temp_env->get_temp_filepath("EmptyOutput.parquet");
cudf::io::parquet_writer_options out_args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
out_args.set_metadata(std::move(expected_metadata));
cudf::io::write_parquet(out_args);
cudf::io::parquet_reader_options read_args =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_args);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(ParquetWriterTest, RowGroupSizeInvalid)
{
auto const unused_table = std::make_unique<table>();
std::vector<char> out_buffer;
EXPECT_THROW(cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.row_group_size_rows(0),
cudf::logic_error);
EXPECT_THROW(cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.max_page_size_rows(0),
cudf::logic_error);
EXPECT_THROW(cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.row_group_size_bytes(3 << 8),
cudf::logic_error);
EXPECT_THROW(cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.max_page_size_bytes(3 << 8),
cudf::logic_error);
EXPECT_THROW(cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.max_page_size_bytes(0xFFFF'FFFFUL),
cudf::logic_error);
EXPECT_THROW(cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.row_group_size_rows(0),
cudf::logic_error);
EXPECT_THROW(cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.max_page_size_rows(0),
cudf::logic_error);
EXPECT_THROW(cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.row_group_size_bytes(3 << 8),
cudf::logic_error);
EXPECT_THROW(cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.max_page_size_bytes(3 << 8),
cudf::logic_error);
EXPECT_THROW(cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.max_page_size_bytes(0xFFFF'FFFFUL),
cudf::logic_error);
}
TEST_F(ParquetWriterTest, RowGroupPageSizeMatch)
{
auto const unused_table = std::make_unique<table>();
std::vector<char> out_buffer;
auto options = cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer),
unused_table->view())
.row_group_size_bytes(128 * 1024)
.max_page_size_bytes(512 * 1024)
.row_group_size_rows(10000)
.max_page_size_rows(20000)
.build();
EXPECT_EQ(options.get_row_group_size_bytes(), options.get_max_page_size_bytes());
EXPECT_EQ(options.get_row_group_size_rows(), options.get_max_page_size_rows());
}
TEST_F(ParquetChunkedWriterTest, RowGroupPageSizeMatch)
{
std::vector<char> out_buffer;
auto options = cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info(&out_buffer))
.row_group_size_bytes(128 * 1024)
.max_page_size_bytes(512 * 1024)
.row_group_size_rows(10000)
.max_page_size_rows(20000)
.build();
EXPECT_EQ(options.get_row_group_size_bytes(), options.get_max_page_size_bytes());
EXPECT_EQ(options.get_row_group_size_rows(), options.get_max_page_size_rows());
}
TEST_F(ParquetWriterTest, EmptyList)
{
auto L1 = cudf::make_lists_column(0,
cudf::make_empty_column(cudf::data_type(cudf::type_id::INT32)),
cudf::make_empty_column(cudf::data_type{cudf::type_id::INT64}),
0,
{});
auto L0 = cudf::make_lists_column(
3, cudf::test::fixed_width_column_wrapper<int32_t>{0, 0, 0, 0}.release(), std::move(L1), 0, {});
auto filepath = temp_env->get_temp_filepath("EmptyList.parquet");
cudf::io::write_parquet(cudf::io::parquet_writer_options_builder(cudf::io::sink_info(filepath),
cudf::table_view({*L0})));
auto result = cudf::io::read_parquet(
cudf::io::parquet_reader_options_builder(cudf::io::source_info(filepath)));
using lcw = cudf::test::lists_column_wrapper<int64_t>;
auto expected = lcw{lcw{}, lcw{}, lcw{}};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), expected);
}
TEST_F(ParquetWriterTest, DeepEmptyList)
{
// Make a list column LLLi st only L is valid and LLi are all null. This tests whether we can
// handle multiple nullptr offsets
auto L2 = cudf::make_lists_column(0,
cudf::make_empty_column(cudf::data_type(cudf::type_id::INT32)),
cudf::make_empty_column(cudf::data_type{cudf::type_id::INT64}),
0,
{});
auto L1 = cudf::make_lists_column(
0, cudf::make_empty_column(cudf::data_type(cudf::type_id::INT32)), std::move(L2), 0, {});
auto L0 = cudf::make_lists_column(
3, cudf::test::fixed_width_column_wrapper<int32_t>{0, 0, 0, 0}.release(), std::move(L1), 0, {});
auto filepath = temp_env->get_temp_filepath("DeepEmptyList.parquet");
cudf::io::write_parquet(cudf::io::parquet_writer_options_builder(cudf::io::sink_info(filepath),
cudf::table_view({*L0})));
auto result = cudf::io::read_parquet(
cudf::io::parquet_reader_options_builder(cudf::io::source_info(filepath)));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), *L0);
}
TEST_F(ParquetWriterTest, EmptyListWithStruct)
{
auto L2 = cudf::make_lists_column(0,
cudf::make_empty_column(cudf::data_type(cudf::type_id::INT32)),
cudf::make_empty_column(cudf::data_type{cudf::type_id::INT64}),
0,
{});
auto children = std::vector<std::unique_ptr<cudf::column>>{};
children.push_back(std::move(L2));
auto S2 = cudf::make_structs_column(0, std::move(children), 0, {});
auto L1 = cudf::make_lists_column(
0, cudf::make_empty_column(cudf::data_type(cudf::type_id::INT32)), std::move(S2), 0, {});
auto L0 = cudf::make_lists_column(
3, cudf::test::fixed_width_column_wrapper<int32_t>{0, 0, 0, 0}.release(), std::move(L1), 0, {});
auto filepath = temp_env->get_temp_filepath("EmptyListWithStruct.parquet");
cudf::io::write_parquet(cudf::io::parquet_writer_options_builder(cudf::io::sink_info(filepath),
cudf::table_view({*L0})));
auto result = cudf::io::read_parquet(
cudf::io::parquet_reader_options_builder(cudf::io::source_info(filepath)));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->view().column(0), *L0);
}
TEST_F(ParquetWriterTest, CheckPageRows)
{
auto sequence = thrust::make_counting_iterator(0);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
constexpr auto page_rows = 5000;
constexpr auto num_rows = 2 * page_rows;
column_wrapper<int> col(sequence, sequence + num_rows, validity);
auto expected = table_view{{col}};
auto const filepath = temp_env->get_temp_filepath("CheckPageRows.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.max_page_size_rows(page_rows);
cudf::io::write_parquet(out_opts);
// check first page header and make sure it has only page_rows values
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
ASSERT_GT(fmd.row_groups.size(), 0);
ASSERT_EQ(fmd.row_groups[0].columns.size(), 1);
auto const& first_chunk = fmd.row_groups[0].columns[0].meta_data;
ASSERT_GT(first_chunk.data_page_offset, 0);
// read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded
// version should be smaller than size of the struct.
auto const ph = read_page_header(
source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0});
EXPECT_EQ(ph.data_page_header.num_values, page_rows);
}
TEST_F(ParquetWriterTest, CheckPageRowsAdjusted)
{
// enough for a few pages with the default 20'000 rows/page
constexpr auto rows_per_page = 20'000;
constexpr auto num_rows = 3 * rows_per_page;
const std::string s1(32, 'a');
auto col0_elements =
cudf::detail::make_counting_transform_iterator(0, [&](auto i) { return s1; });
auto col0 = cudf::test::strings_column_wrapper(col0_elements, col0_elements + num_rows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("CheckPageRowsAdjusted.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.max_page_size_rows(rows_per_page);
cudf::io::write_parquet(out_opts);
// check first page header and make sure it has only page_rows values
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
ASSERT_GT(fmd.row_groups.size(), 0);
ASSERT_EQ(fmd.row_groups[0].columns.size(), 1);
auto const& first_chunk = fmd.row_groups[0].columns[0].meta_data;
ASSERT_GT(first_chunk.data_page_offset, 0);
// read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded
// version should be smaller than size of the struct.
auto const ph = read_page_header(
source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0});
EXPECT_LE(ph.data_page_header.num_values, rows_per_page);
}
TEST_F(ParquetWriterTest, CheckPageRowsTooSmall)
{
constexpr auto rows_per_page = 1'000;
constexpr auto fragment_size = 5'000;
constexpr auto num_rows = 3 * rows_per_page;
const std::string s1(32, 'a');
auto col0_elements =
cudf::detail::make_counting_transform_iterator(0, [&](auto i) { return s1; });
auto col0 = cudf::test::strings_column_wrapper(col0_elements, col0_elements + num_rows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("CheckPageRowsTooSmall.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.max_page_fragment_size(fragment_size)
.max_page_size_rows(rows_per_page);
cudf::io::write_parquet(out_opts);
// check that file is written correctly when rows/page < fragment size
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
ASSERT_TRUE(fmd.row_groups.size() > 0);
ASSERT_TRUE(fmd.row_groups[0].columns.size() == 1);
auto const& first_chunk = fmd.row_groups[0].columns[0].meta_data;
ASSERT_TRUE(first_chunk.data_page_offset > 0);
// read first data page header. sizeof(PageHeader) is not exact, but the thrift encoded
// version should be smaller than size of the struct.
auto const ph = read_page_header(
source, {first_chunk.data_page_offset, sizeof(cudf::io::parquet::detail::PageHeader), 0});
// there should be only one page since the fragment size is larger than rows_per_page
EXPECT_EQ(ph.data_page_header.num_values, num_rows);
}
TEST_F(ParquetWriterTest, Decimal128Stats)
{
// check that decimal128 min and max statistics are written in network byte order
// this is negative, so should be the min
std::vector<uint8_t> expected_min{
0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
std::vector<uint8_t> expected_max{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6};
__int128_t val0 = 0xa1b2'c3d4'e5f6ULL;
__int128_t val1 = val0 << 80;
column_wrapper<numeric::decimal128> col0{{numeric::decimal128(val0, numeric::scale_type{0}),
numeric::decimal128(val1, numeric::scale_type{0})}};
auto expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("Decimal128Stats.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto const stats = get_statistics(fmd.row_groups[0].columns[0]);
EXPECT_EQ(expected_min, stats.min_value);
EXPECT_EQ(expected_max, stats.max_value);
}
// =============================================================================
// ---- test data for stats sort order tests
// need at least 3 pages, and min page count is 5000, so need at least 15000 values.
// use 20000 to be safe.
static constexpr int num_ordered_rows = 20000;
static constexpr int page_size_for_ordered_tests = 5000;
namespace {
namespace testdata {
// ----- most numerics. scale by 100 so all values fit in a single byte
template <typename T>
std::enable_if_t<std::is_arithmetic_v<T> && !std::is_same_v<T, bool>,
cudf::test::fixed_width_column_wrapper<T>>
ascending()
{
int start = std::is_signed_v<T> ? -num_ordered_rows / 2 : 0;
auto elements =
cudf::detail::make_counting_transform_iterator(start, [](auto i) { return i / 100; });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<std::is_arithmetic_v<T> && !std::is_same_v<T, bool>,
cudf::test::fixed_width_column_wrapper<T>>
descending()
{
if (std::is_signed_v<T>) {
auto elements = cudf::detail::make_counting_transform_iterator(-num_ordered_rows / 2,
[](auto i) { return -i / 100; });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
} else {
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return (num_ordered_rows - i) / 100; });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
}
template <typename T>
std::enable_if_t<std::is_arithmetic_v<T> && !std::is_same_v<T, bool>,
cudf::test::fixed_width_column_wrapper<T>>
unordered()
{
if (std::is_signed_v<T>) {
auto elements = cudf::detail::make_counting_transform_iterator(
-num_ordered_rows / 2, [](auto i) { return (i % 2 ? i : -i) / 100; });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
} else {
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return (i % 2 ? i : num_ordered_rows - i) / 100; });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
}
// ----- bool
template <typename T>
std::enable_if_t<std::is_same_v<T, bool>, cudf::test::fixed_width_column_wrapper<bool>> ascending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i >= num_ordered_rows / 2; });
return cudf::test::fixed_width_column_wrapper<bool>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<std::is_same_v<T, bool>, cudf::test::fixed_width_column_wrapper<bool>> descending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i < num_ordered_rows / 2; });
return cudf::test::fixed_width_column_wrapper<bool>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<std::is_same_v<T, bool>, cudf::test::fixed_width_column_wrapper<bool>> unordered()
{
auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
switch (i / page_size_for_ordered_tests) {
case 0: return true;
case 1: return false;
case 2: return true;
default: return false;
}
});
return cudf::test::fixed_width_column_wrapper<bool>(elements, elements + num_ordered_rows);
}
// ----- fixed point types
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), cudf::test::fixed_width_column_wrapper<T>> ascending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
-num_ordered_rows / 2, [](auto i) { return T(i, numeric::scale_type{0}); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), cudf::test::fixed_width_column_wrapper<T>> descending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
-num_ordered_rows / 2, [](auto i) { return T(-i, numeric::scale_type{0}); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), cudf::test::fixed_width_column_wrapper<T>> unordered()
{
auto elements = cudf::detail::make_counting_transform_iterator(
-num_ordered_rows / 2, [](auto i) { return T(i % 2 ? i : -i, numeric::scale_type{0}); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
// ----- chrono types
// ----- timstamp
template <typename T>
std::enable_if_t<cudf::is_timestamp<T>(), cudf::test::fixed_width_column_wrapper<T>> ascending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return T(typename T::duration(i)); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_timestamp<T>(), cudf::test::fixed_width_column_wrapper<T>> descending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return T(typename T::duration(num_ordered_rows - i)); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_timestamp<T>(), cudf::test::fixed_width_column_wrapper<T>> unordered()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return T(typename T::duration(i % 2 ? i : num_ordered_rows - i)); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
// ----- duration
template <typename T>
std::enable_if_t<cudf::is_duration<T>(), cudf::test::fixed_width_column_wrapper<T>> ascending()
{
auto elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return T(i); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_duration<T>(), cudf::test::fixed_width_column_wrapper<T>> descending()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return T(num_ordered_rows - i); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<cudf::is_duration<T>(), cudf::test::fixed_width_column_wrapper<T>> unordered()
{
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return T(i % 2 ? i : num_ordered_rows - i); });
return cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_ordered_rows);
}
// ----- string_view
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::string_view>, cudf::test::strings_column_wrapper>
ascending()
{
char buf[10];
auto elements = cudf::detail::make_counting_transform_iterator(0, [&buf](auto i) {
sprintf(buf, "%09d", i);
return std::string(buf);
});
return cudf::test::strings_column_wrapper(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::string_view>, cudf::test::strings_column_wrapper>
descending()
{
char buf[10];
auto elements = cudf::detail::make_counting_transform_iterator(0, [&buf](auto i) {
sprintf(buf, "%09d", num_ordered_rows - i);
return std::string(buf);
});
return cudf::test::strings_column_wrapper(elements, elements + num_ordered_rows);
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::string_view>, cudf::test::strings_column_wrapper>
unordered()
{
char buf[10];
auto elements = cudf::detail::make_counting_transform_iterator(0, [&buf](auto i) {
sprintf(buf, "%09d", (i % 2 == 0) ? i : (num_ordered_rows - i));
return std::string(buf);
});
return cudf::test::strings_column_wrapper(elements, elements + num_ordered_rows);
}
} // namespace testdata
} // anonymous namespace
TYPED_TEST(ParquetWriterComparableTypeTest, ThreeColumnSorted)
{
using T = TypeParam;
auto col0 = testdata::ascending<T>();
auto col1 = testdata::descending<T>();
auto col2 = testdata::unordered<T>();
auto const expected = table_view{{col0, col1, col2}};
auto const filepath = temp_env->get_temp_filepath("ThreeColumnSorted.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.max_page_size_rows(page_size_for_ordered_tests)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
ASSERT_GT(fmd.row_groups.size(), 0);
auto const& columns = fmd.row_groups[0].columns;
ASSERT_EQ(columns.size(), static_cast<size_t>(expected.num_columns()));
// now check that the boundary order for chunk 1 is ascending,
// chunk 2 is descending, and chunk 3 is unordered
cudf::io::parquet::detail::BoundaryOrder expected_orders[] = {
cudf::io::parquet::detail::BoundaryOrder::ASCENDING,
cudf::io::parquet::detail::BoundaryOrder::DESCENDING,
cudf::io::parquet::detail::BoundaryOrder::UNORDERED};
for (std::size_t i = 0; i < columns.size(); i++) {
auto const ci = read_column_index(source, columns[i]);
EXPECT_EQ(ci.boundary_order, expected_orders[i]);
}
}
// utility functions for column index tests
// compare two values. return -1 if v1 < v2,
// 0 if v1 == v2, and 1 if v1 > v2.
template <typename T>
int32_t compare(T& v1, T& v2)
{
return (v1 > v2) - (v1 < v2);
}
// compare two binary statistics blobs based on their physical
// and converted types. returns -1 if v1 < v2, 0 if v1 == v2, and
// 1 if v1 > v2.
int32_t compare_binary(std::vector<uint8_t> const& v1,
std::vector<uint8_t> const& v2,
cudf::io::parquet::detail::Type ptype,
thrust::optional<cudf::io::parquet::detail::ConvertedType> const& ctype)
{
auto ctype_val = ctype.value_or(cudf::io::parquet::detail::UNKNOWN);
switch (ptype) {
case cudf::io::parquet::detail::INT32:
switch (ctype_val) {
case cudf::io::parquet::detail::UINT_8:
case cudf::io::parquet::detail::UINT_16:
case cudf::io::parquet::detail::UINT_32:
return compare(*(reinterpret_cast<uint32_t const*>(v1.data())),
*(reinterpret_cast<uint32_t const*>(v2.data())));
default:
return compare(*(reinterpret_cast<int32_t const*>(v1.data())),
*(reinterpret_cast<int32_t const*>(v2.data())));
}
case cudf::io::parquet::detail::INT64:
if (ctype_val == cudf::io::parquet::detail::UINT_64) {
return compare(*(reinterpret_cast<uint64_t const*>(v1.data())),
*(reinterpret_cast<uint64_t const*>(v2.data())));
}
return compare(*(reinterpret_cast<int64_t const*>(v1.data())),
*(reinterpret_cast<int64_t const*>(v2.data())));
case cudf::io::parquet::detail::FLOAT:
return compare(*(reinterpret_cast<float const*>(v1.data())),
*(reinterpret_cast<float const*>(v2.data())));
case cudf::io::parquet::detail::DOUBLE:
return compare(*(reinterpret_cast<double const*>(v1.data())),
*(reinterpret_cast<double const*>(v2.data())));
case cudf::io::parquet::detail::BYTE_ARRAY: {
int32_t v1sz = v1.size();
int32_t v2sz = v2.size();
int32_t ret = memcmp(v1.data(), v2.data(), std::min(v1sz, v2sz));
if (ret != 0 or v1sz == v2sz) { return ret; }
return v1sz - v2sz;
}
default: CUDF_FAIL("Invalid type in compare_binary");
}
return 0;
}
TEST_P(ParquetV2Test, LargeColumnIndex)
{
// create a file large enough to be written in 2 batches (currently 1GB per batch)
// pick fragment size that num_rows is divisible by, so we'll get equal sized row groups
const std::string s1(1000, 'a');
const std::string s2(1000, 'b');
constexpr auto num_rows = 512 * 1024;
constexpr auto frag_size = num_rows / 128;
auto const is_v2 = GetParam();
auto col0_elements = cudf::detail::make_counting_transform_iterator(
0, [&](auto i) { return (i < num_rows) ? s1 : s2; });
auto col0 = cudf::test::strings_column_wrapper(col0_elements, col0_elements + 2 * num_rows);
auto const expected = table_view{{col0, col0}};
auto const filepath = temp_env->get_temp_filepath("LargeColumnIndex.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.compression(cudf::io::compression_type::NONE)
.dictionary_policy(cudf::io::dictionary_policy::NEVER)
.write_v2_headers(is_v2)
.max_page_fragment_size(frag_size)
.row_group_size_bytes(1024 * 1024 * 1024)
.row_group_size_rows(num_rows);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (auto const& rg : fmd.row_groups) {
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
// check trunc(page.min) <= stats.min && trun(page.max) >= stats.max
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0);
EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0);
}
}
}
TEST_P(ParquetV2Test, CheckColumnOffsetIndex)
{
constexpr auto num_rows = 100000;
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
// fixed length strings
auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%012d", i);
return std::string(buf);
});
auto col0 = cudf::test::strings_column_wrapper(str1_elements, str1_elements + num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<uint64_t>(num_rows);
auto col5_data = random_values<float>(num_rows);
auto col6_data = random_values<double>(num_rows);
auto col1 = cudf::test::fixed_width_column_wrapper<int8_t>(col1_data.begin(), col1_data.end());
auto col2 = cudf::test::fixed_width_column_wrapper<int16_t>(col2_data.begin(), col2_data.end());
auto col3 = cudf::test::fixed_width_column_wrapper<int32_t>(col3_data.begin(), col3_data.end());
auto col4 = cudf::test::fixed_width_column_wrapper<uint64_t>(col4_data.begin(), col4_data.end());
auto col5 = cudf::test::fixed_width_column_wrapper<float>(col5_data.begin(), col5_data.end());
auto col6 = cudf::test::fixed_width_column_wrapper<double>(col6_data.begin(), col6_data.end());
// mixed length strings
auto str2_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%d", i);
return std::string(buf);
});
auto col7 = cudf::test::strings_column_wrapper(str2_elements, str2_elements + num_rows);
auto const expected = table_view{{col0, col1, col2, col3, col4, col5, col6, col7}};
auto const filepath = temp_env->get_temp_filepath("CheckColumnOffsetIndex.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.max_page_size_rows(20000);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
int64_t num_vals = 0;
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
EXPECT_EQ(page_loc.first_row_index, num_vals);
num_vals += is_v2 ? ph.data_page_header_v2.num_rows : ph.data_page_header.num_values;
}
// loop over page stats from the column index. check that stats.min <= page.min
// and stats.max >= page.max for each page.
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
// schema indexing starts at 1
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
for (size_t p = 0; p < ci.min_values.size(); p++) {
// null_pages should always be false
EXPECT_FALSE(ci.null_pages[p]);
// null_counts should always be 0
EXPECT_EQ(ci.null_counts[p], 0);
EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0);
}
for (size_t p = 0; p < ci.max_values.size(); p++)
EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0);
}
}
}
TEST_P(ParquetV2Test, CheckColumnOffsetIndexNulls)
{
constexpr auto num_rows = 100000;
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
// fixed length strings
auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%012d", i);
return std::string(buf);
});
auto col0 = cudf::test::strings_column_wrapper(str1_elements, str1_elements + num_rows);
auto col1_data = random_values<int8_t>(num_rows);
auto col2_data = random_values<int16_t>(num_rows);
auto col3_data = random_values<int32_t>(num_rows);
auto col4_data = random_values<uint64_t>(num_rows);
auto col5_data = random_values<float>(num_rows);
auto col6_data = random_values<double>(num_rows);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
// add null values for all but first column
auto col1 =
cudf::test::fixed_width_column_wrapper<int8_t>(col1_data.begin(), col1_data.end(), valids);
auto col2 =
cudf::test::fixed_width_column_wrapper<int16_t>(col2_data.begin(), col2_data.end(), valids);
auto col3 =
cudf::test::fixed_width_column_wrapper<int32_t>(col3_data.begin(), col3_data.end(), valids);
auto col4 =
cudf::test::fixed_width_column_wrapper<uint64_t>(col4_data.begin(), col4_data.end(), valids);
auto col5 =
cudf::test::fixed_width_column_wrapper<float>(col5_data.begin(), col5_data.end(), valids);
auto col6 =
cudf::test::fixed_width_column_wrapper<double>(col6_data.begin(), col6_data.end(), valids);
// mixed length strings
auto str2_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%d", i);
return std::string(buf);
});
auto col7 = cudf::test::strings_column_wrapper(str2_elements, str2_elements + num_rows, valids);
auto expected = table_view{{col0, col1, col2, col3, col4, col5, col6, col7}};
auto const filepath = temp_env->get_temp_filepath("CheckColumnOffsetIndexNulls.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.max_page_size_rows(20000);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
int64_t num_vals = 0;
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
EXPECT_EQ(page_loc.first_row_index, num_vals);
num_vals += is_v2 ? ph.data_page_header_v2.num_rows : ph.data_page_header.num_values;
}
// loop over page stats from the column index. check that stats.min <= page.min
// and stats.max >= page.max for each page.
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
// should be half nulls, except no nulls in column 0
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
ASSERT_TRUE(stats.null_count.has_value());
EXPECT_EQ(stats.null_count.value(), c == 0 ? 0 : num_rows / 2);
// schema indexing starts at 1
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
for (size_t p = 0; p < ci.min_values.size(); p++) {
EXPECT_FALSE(ci.null_pages[p]);
if (c > 0) { // first column has no nulls
EXPECT_GT(ci.null_counts[p], 0);
} else {
EXPECT_EQ(ci.null_counts[p], 0);
}
EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0);
}
for (size_t p = 0; p < ci.max_values.size(); p++) {
EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0);
}
}
}
}
TEST_P(ParquetV2Test, CheckColumnOffsetIndexNullColumn)
{
constexpr auto num_rows = 100000;
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
// fixed length strings
auto str1_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%012d", i);
return std::string(buf);
});
auto col0 = cudf::test::strings_column_wrapper(str1_elements, str1_elements + num_rows);
auto col1_data = random_values<int32_t>(num_rows);
auto col2_data = random_values<int32_t>(num_rows);
// col1 is all nulls
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return false; });
auto col1 =
cudf::test::fixed_width_column_wrapper<int32_t>(col1_data.begin(), col1_data.end(), valids);
auto col2 = cudf::test::fixed_width_column_wrapper<int32_t>(col2_data.begin(), col2_data.end());
// mixed length strings
auto str2_elements = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
char buf[30];
sprintf(buf, "%d", i);
return std::string(buf);
});
auto col3 = cudf::test::strings_column_wrapper(str2_elements, str2_elements + num_rows);
auto expected = table_view{{col0, col1, col2, col3}};
auto const filepath = temp_env->get_temp_filepath("CheckColumnOffsetIndexNullColumn.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.max_page_size_rows(20000);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
int64_t num_vals = 0;
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
EXPECT_EQ(page_loc.first_row_index, num_vals);
num_vals += is_v2 ? ph.data_page_header_v2.num_rows : ph.data_page_header.num_values;
}
// loop over page stats from the column index. check that stats.min <= page.min
// and stats.max >= page.max for each non-empty page.
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
// there should be no nulls except column 1 which is all nulls
if (c != 1) {
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
}
ASSERT_TRUE(stats.null_count.has_value());
EXPECT_EQ(stats.null_count.value(), c == 1 ? num_rows : 0);
// schema indexing starts at 1
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
for (size_t p = 0; p < ci.min_values.size(); p++) {
// check tnat null_pages is true for column 1
if (c == 1) {
EXPECT_TRUE(ci.null_pages[p]);
EXPECT_GT(ci.null_counts[p], 0);
}
if (not ci.null_pages[p]) {
EXPECT_EQ(ci.null_counts[p], 0);
EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0);
}
}
for (size_t p = 0; p < ci.max_values.size(); p++) {
if (not ci.null_pages[p]) {
EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0);
}
}
}
}
}
TEST_P(ParquetV2Test, CheckColumnOffsetIndexStruct)
{
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
auto c0 = testdata::ascending<uint32_t>();
auto sc0 = testdata::ascending<cudf::string_view>();
auto sc1 = testdata::descending<int32_t>();
auto sc2 = testdata::unordered<int64_t>();
std::vector<std::unique_ptr<cudf::column>> struct_children;
struct_children.push_back(sc0.release());
struct_children.push_back(sc1.release());
struct_children.push_back(sc2.release());
cudf::test::structs_column_wrapper c1(std::move(struct_children));
auto listgen = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return i % 2 == 0 ? i / 2 : num_ordered_rows - (i / 2); });
auto list =
cudf::test::fixed_width_column_wrapper<int32_t>(listgen, listgen + 2 * num_ordered_rows);
auto offgen = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i * 2; });
auto offsets =
cudf::test::fixed_width_column_wrapper<int32_t>(offgen, offgen + num_ordered_rows + 1);
auto c2 = cudf::make_lists_column(num_ordered_rows, offsets.release(), list.release(), 0, {});
table_view expected({c0, c1, *c2});
auto const filepath = temp_env->get_temp_filepath("CheckColumnOffsetIndexStruct.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.max_page_size_rows(page_size_for_ordered_tests);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
// hard coded schema indices.
// TODO find a way to do this without magic
size_t const colidxs[] = {1, 3, 4, 5, 8};
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
size_t colidx = colidxs[c];
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
int64_t num_vals = 0;
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
EXPECT_EQ(page_loc.first_row_index, num_vals);
// last column has 2 values per row
num_vals += is_v2 ? ph.data_page_header_v2.num_rows
: ph.data_page_header.num_values / (c == rg.columns.size() - 1 ? 2 : 1);
}
// loop over page stats from the column index. check that stats.min <= page.min
// and stats.max >= page.max for each page.
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
auto const ptype = fmd.schema[colidx].type;
auto const ctype = fmd.schema[colidx].converted_type;
for (size_t p = 0; p < ci.min_values.size(); p++) {
EXPECT_TRUE(compare_binary(stats.min_value.value(), ci.min_values[p], ptype, ctype) <= 0);
}
for (size_t p = 0; p < ci.max_values.size(); p++) {
EXPECT_TRUE(compare_binary(stats.max_value.value(), ci.max_values[p], ptype, ctype) >= 0);
}
}
}
}
TEST_P(ParquetV2Test, CheckColumnOffsetIndexStructNulls)
{
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
auto validity2 =
cudf::detail::make_counting_transform_iterator(0, [](cudf::size_type i) { return i % 2; });
auto validity3 = cudf::detail::make_counting_transform_iterator(
0, [](cudf::size_type i) { return (i % 3) != 0; });
auto validity4 = cudf::detail::make_counting_transform_iterator(
0, [](cudf::size_type i) { return (i % 4) != 0; });
auto validity5 = cudf::detail::make_counting_transform_iterator(
0, [](cudf::size_type i) { return (i % 5) != 0; });
auto c0 = testdata::ascending<uint32_t>();
auto col1_data = random_values<int32_t>(num_ordered_rows);
auto col2_data = random_values<int32_t>(num_ordered_rows);
auto col3_data = random_values<int32_t>(num_ordered_rows);
// col1 is all nulls
auto col1 =
cudf::test::fixed_width_column_wrapper<int32_t>(col1_data.begin(), col1_data.end(), validity2);
auto col2 =
cudf::test::fixed_width_column_wrapper<int32_t>(col2_data.begin(), col2_data.end(), validity3);
auto col3 =
cudf::test::fixed_width_column_wrapper<int32_t>(col2_data.begin(), col2_data.end(), validity4);
std::vector<std::unique_ptr<cudf::column>> struct_children;
struct_children.push_back(col1.release());
struct_children.push_back(col2.release());
struct_children.push_back(col3.release());
auto struct_validity = std::vector<bool>(validity5, validity5 + num_ordered_rows);
cudf::test::structs_column_wrapper c1(std::move(struct_children), struct_validity);
table_view expected({c0, c1});
auto const filepath = temp_env->get_temp_filepath("CheckColumnOffsetIndexStructNulls.parquet");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.max_page_size_rows(page_size_for_ordered_tests);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
auto const ci = read_column_index(source, chunk);
int64_t num_vals = 0;
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
EXPECT_EQ(page_loc.first_row_index, num_vals);
num_vals += is_v2 ? ph.data_page_header_v2.num_rows : ph.data_page_header.num_values;
// check that null counts match
if (is_v2) { EXPECT_EQ(ci.null_counts[o], ph.data_page_header_v2.num_nulls); }
}
}
}
}
TEST_P(ParquetV2Test, CheckColumnIndexListWithNulls)
{
auto const is_v2 = GetParam();
auto const expected_hdr_type = is_v2 ? cudf::io::parquet::detail::PageType::DATA_PAGE_V2
: cudf::io::parquet::detail::PageType::DATA_PAGE;
using cudf::test::iterators::null_at;
using cudf::test::iterators::nulls_at;
using lcw = cudf::test::lists_column_wrapper<int32_t>;
// 4 nulls
// [NULL, 2, NULL]
// []
// [4, 5]
// NULL
lcw col0{{{{1, 2, 3}, nulls_at({0, 2})}, {}, {4, 5}, {}}, null_at(3)};
// 4 nulls
// [[1, 2, 3], [], [4, 5], [], [0, 6, 0]]
// [[7, 8]]
// []
// [[]]
lcw col1{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, {{7, 8}}, lcw{}, lcw{lcw{}}};
// 4 nulls
// [[1, 2, 3], [], [4, 5], NULL, [0, 6, 0]]
// [[7, 8]]
// []
// [[]]
lcw col2{{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, null_at(3)}, {{7, 8}}, lcw{}, lcw{lcw{}}};
// 6 nulls
// [[1, 2, 3], [], [4, 5], NULL, [NULL, 6, NULL]]
// [[7, 8]]
// []
// [[]]
using dlcw = cudf::test::lists_column_wrapper<double>;
dlcw col3{{{{1., 2., 3.}, {}, {4., 5.}, {}, {{0., 6., 0.}, nulls_at({0, 2})}}, null_at(3)},
{{7., 8.}},
dlcw{},
dlcw{dlcw{}}};
// 4 nulls
// [[1, 2, 3], [], [4, 5], NULL, [0, 6, 0]]
// [[7, 8]]
// []
// NULL
using ui16lcw = cudf::test::lists_column_wrapper<uint16_t>;
cudf::test::lists_column_wrapper<uint16_t> col4{
{{{{1, 2, 3}, {}, {4, 5}, {}, {0, 6, 0}}, null_at(3)}, {{7, 8}}, ui16lcw{}, ui16lcw{ui16lcw{}}},
null_at(3)};
// 6 nulls
// [[1, 2, 3], [], [4, 5], NULL, [NULL, 6, NULL]]
// [[7, 8]]
// []
// NULL
lcw col5{{{{{1, 2, 3}, {}, {4, 5}, {}, {{0, 6, 0}, nulls_at({0, 2})}}, null_at(3)},
{{7, 8}},
lcw{},
lcw{lcw{}}},
null_at(3)};
// 4 nulls
using strlcw = cudf::test::lists_column_wrapper<cudf::string_view>;
cudf::test::lists_column_wrapper<cudf::string_view> col6{
{{"Monday", "Monday", "Friday"}, {}, {"Monday", "Friday"}, {}, {"Sunday", "Funday"}},
{{"bee", "sting"}},
strlcw{},
strlcw{strlcw{}}};
// 11 nulls
// [[[NULL,2,NULL,4]], [[NULL,6,NULL], [8,9]]]
// [NULL, [[13],[14,15,16]], NULL]
// [NULL, [], NULL, [[]]]
// NULL
lcw col7{{
{{{{1, 2, 3, 4}, nulls_at({0, 2})}}, {{{5, 6, 7}, nulls_at({0, 2})}, {8, 9}}},
{{{{10, 11}, {12}}, {{13}, {14, 15, 16}}, {{17, 18}}}, nulls_at({0, 2})},
{{lcw{lcw{}}, lcw{}, lcw{}, lcw{lcw{}}}, nulls_at({0, 2})},
lcw{lcw{lcw{}}},
},
null_at(3)};
table_view expected({col0, col1, col2, col3, col4, col5, col6, col7});
int64_t const expected_null_counts[] = {4, 4, 4, 6, 4, 6, 4, 11};
auto const filepath = temp_env->get_temp_filepath("ColumnIndexListWithNulls.parquet");
auto out_opts = cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.write_v2_headers(is_v2)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
// loop over offsets, read each page header, make sure it's a data page and that
// the first row index is correct
auto const oi = read_offset_index(source, chunk);
for (size_t o = 0; o < oi.page_locations.size(); o++) {
auto const& page_loc = oi.page_locations[o];
auto const ph = read_page_header(source, page_loc);
EXPECT_EQ(ph.type, expected_hdr_type);
// check null counts in V2 header
if (is_v2) { EXPECT_EQ(ph.data_page_header_v2.num_nulls, expected_null_counts[c]); }
}
// check null counts in column chunk stats and page indexes
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
EXPECT_EQ(stats.null_count, expected_null_counts[c]);
// should only be one page
EXPECT_FALSE(ci.null_pages[0]);
EXPECT_EQ(ci.null_counts[0], expected_null_counts[c]);
}
}
}
TEST_F(ParquetWriterTest, CheckColumnIndexTruncation)
{
char const* coldata[] = {
// in-range 7 bit. should truncate to "yyyyyyyz"
"yyyyyyyyy",
// max 7 bit. should truncate to "x7fx7fx7fx7fx7fx7fx7fx80", since it's
// considered binary, not UTF-8. If UTF-8 it should not truncate.
"\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f",
// max binary. this should not truncate
"\xff\xff\xff\xff\xff\xff\xff\xff\xff",
// in-range 2-byte UTF8 (U+00E9). should truncate to "éééê"
"ééééé",
// max 2-byte UTF8 (U+07FF). should not truncate
"߿߿߿߿߿",
// in-range 3-byte UTF8 (U+0800). should truncate to "ࠀࠁ"
"ࠀࠀࠀ",
// max 3-byte UTF8 (U+FFFF). should not truncate
"\xef\xbf\xbf\xef\xbf\xbf\xef\xbf\xbf",
// in-range 4-byte UTF8 (U+10000). should truncate to "𐀀𐀁"
"𐀀𐀀𐀀",
// max unicode (U+10FFFF). should truncate to \xf4\x8f\xbf\xbf\xf4\x90\x80\x80,
// which is no longer valid unicode, but is still ok UTF-8???
"\xf4\x8f\xbf\xbf\xf4\x8f\xbf\xbf\xf4\x8f\xbf\xbf",
// max 4-byte UTF8 (U+1FFFFF). should not truncate
"\xf7\xbf\xbf\xbf\xf7\xbf\xbf\xbf\xf7\xbf\xbf\xbf"};
// NOTE: UTF8 min is initialized with 0xf7bfbfbf. Binary values larger
// than that will not become minimum value (when written as UTF-8).
char const* truncated_min[] = {"yyyyyyyy",
"\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f",
"\xf7\xbf\xbf\xbf",
"éééé",
"߿߿߿߿",
"ࠀࠀ",
"\xef\xbf\xbf\xef\xbf\xbf",
"𐀀𐀀",
"\xf4\x8f\xbf\xbf\xf4\x8f\xbf\xbf",
"\xf7\xbf\xbf\xbf"};
char const* truncated_max[] = {"yyyyyyyz",
"\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x80",
"\xff\xff\xff\xff\xff\xff\xff\xff\xff",
"éééê",
"߿߿߿߿߿",
"ࠀࠁ",
"\xef\xbf\xbf\xef\xbf\xbf\xef\xbf\xbf",
"𐀀𐀁",
"\xf4\x8f\xbf\xbf\xf4\x90\x80\x80",
"\xf7\xbf\xbf\xbf\xf7\xbf\xbf\xbf\xf7\xbf\xbf\xbf"};
auto cols = [&]() {
using string_wrapper = column_wrapper<cudf::string_view>;
std::vector<std::unique_ptr<column>> cols;
for (auto const str : coldata) {
cols.push_back(string_wrapper{str}.release());
}
return cols;
}();
auto expected = std::make_unique<table>(std::move(cols));
auto const filepath = temp_env->get_temp_filepath("CheckColumnIndexTruncation.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected->view())
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.column_index_truncate_length(8);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
// check trunc(page.min) <= stats.min && trun(page.max) >= stats.max
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0);
EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0);
// check that truncated values == expected
EXPECT_EQ(memcmp(ci.min_values[0].data(), truncated_min[c], ci.min_values[0].size()), 0);
EXPECT_EQ(memcmp(ci.max_values[0].data(), truncated_max[c], ci.max_values[0].size()), 0);
}
}
}
TEST_F(ParquetWriterTest, BinaryColumnIndexTruncation)
{
std::vector<uint8_t> truncated_min[] = {{0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe},
{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
std::vector<uint8_t> truncated_max[] = {{0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff},
{0xff},
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
cudf::test::lists_column_wrapper<uint8_t> col0{
{0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe}};
cudf::test::lists_column_wrapper<uint8_t> col1{
{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
cudf::test::lists_column_wrapper<uint8_t> col2{
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
auto expected = table_view{{col0, col1, col2}};
cudf::io::table_input_metadata output_metadata(expected);
output_metadata.column_metadata[0].set_name("col_binary0").set_output_as_binary(true);
output_metadata.column_metadata[1].set_name("col_binary1").set_output_as_binary(true);
output_metadata.column_metadata[2].set_name("col_binary2").set_output_as_binary(true);
auto const filepath = temp_env->get_temp_filepath("BinaryColumnIndexTruncation.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(output_metadata))
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.column_index_truncate_length(8);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
for (size_t r = 0; r < fmd.row_groups.size(); r++) {
auto const& rg = fmd.row_groups[r];
for (size_t c = 0; c < rg.columns.size(); c++) {
auto const& chunk = rg.columns[c];
auto const ci = read_column_index(source, chunk);
auto const stats = get_statistics(chunk);
// check trunc(page.min) <= stats.min && trun(page.max) >= stats.max
auto const ptype = fmd.schema[c + 1].type;
auto const ctype = fmd.schema[c + 1].converted_type;
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
EXPECT_TRUE(compare_binary(ci.min_values[0], stats.min_value.value(), ptype, ctype) <= 0);
EXPECT_TRUE(compare_binary(ci.max_values[0], stats.max_value.value(), ptype, ctype) >= 0);
// check that truncated values == expected
EXPECT_EQ(ci.min_values[0], truncated_min[c]);
EXPECT_EQ(ci.max_values[0], truncated_max[c]);
}
}
}
TEST_F(ParquetReaderTest, EmptyColumnsParam)
{
srand(31337);
auto const expected = create_random_fixed_table<int>(2, 4, false);
std::vector<char> out_buffer;
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&out_buffer}, *expected);
cudf::io::write_parquet(args);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(
cudf::io::source_info{out_buffer.data(), out_buffer.size()})
.columns({});
auto const result = cudf::io::read_parquet(read_opts);
EXPECT_EQ(result.tbl->num_columns(), 0);
EXPECT_EQ(result.tbl->num_rows(), 0);
}
TEST_F(ParquetReaderTest, BinaryAsStrings)
{
std::vector<char const*> strings{
"Monday", "Wednesday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"};
auto const num_rows = strings.size();
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
auto seq_col3 = random_values<uint8_t>(num_rows);
auto validity = cudf::test::iterators::no_nulls();
column_wrapper<int> int_col{seq_col0.begin(), seq_col0.end(), validity};
column_wrapper<cudf::string_view> string_col{strings.begin(), strings.end()};
column_wrapper<float> float_col{seq_col2.begin(), seq_col2.end(), validity};
cudf::test::lists_column_wrapper<uint8_t> list_int_col{
{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'M', 'o', 'n', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'u', 'n', 'd', 'a', 'y'}};
auto output = table_view{{int_col, string_col, float_col, string_col, list_int_col}};
cudf::io::table_input_metadata output_metadata(output);
output_metadata.column_metadata[0].set_name("col_other");
output_metadata.column_metadata[1].set_name("col_string");
output_metadata.column_metadata[2].set_name("col_float");
output_metadata.column_metadata[3].set_name("col_string2").set_output_as_binary(true);
output_metadata.column_metadata[4].set_name("col_binary").set_output_as_binary(true);
auto filepath = temp_env->get_temp_filepath("BinaryReadStrings.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, output)
.metadata(std::move(output_metadata));
cudf::io::write_parquet(out_opts);
auto expected_string = table_view{{int_col, string_col, float_col, string_col, string_col}};
auto expected_mixed = table_view{{int_col, string_col, float_col, list_int_col, list_int_col}};
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema({{}, {}, {}, {}, {}});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_string, result.tbl->view());
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_string, result.tbl->view());
std::vector<cudf::io::reader_column_schema> md{
{},
{},
{},
cudf::io::reader_column_schema().set_convert_binary_to_strings(false),
cudf::io::reader_column_schema().set_convert_binary_to_strings(false)};
cudf::io::parquet_reader_options mixed_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema(md);
result = cudf::io::read_parquet(mixed_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_mixed, result.tbl->view());
}
TEST_F(ParquetReaderTest, NestedByteArray)
{
constexpr auto num_rows = 8;
auto seq_col0 = random_values<int>(num_rows);
auto seq_col2 = random_values<float>(num_rows);
auto seq_col3 = random_values<uint8_t>(num_rows);
auto const validity = cudf::test::iterators::no_nulls();
column_wrapper<int> int_col{seq_col0.begin(), seq_col0.end(), validity};
column_wrapper<float> float_col{seq_col2.begin(), seq_col2.end(), validity};
cudf::test::lists_column_wrapper<uint8_t> list_list_int_col{
{{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'}},
{{'M', 'o', 'n', 'd', 'a', 'y'}, {'F', 'r', 'i', 'd', 'a', 'y'}},
{{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'}},
{{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'u', 'n', 'd', 'a', 'y'}},
{{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'}},
{{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'},
{'F', 'u', 'n', 'd', 'a', 'y'}},
{{'M', 'o', 'n', 'd', 'a', 'y'},
{'W', 'e', 'd', 'n', 'e', 's', 'd', 'a', 'y'},
{'F', 'r', 'i', 'd', 'a', 'y'}},
{{'M', 'o', 'n', 'd', 'a', 'y'}, {'F', 'r', 'i', 'd', 'a', 'y'}}};
auto const expected = table_view{{int_col, float_col, list_list_int_col}};
cudf::io::table_input_metadata output_metadata(expected);
output_metadata.column_metadata[0].set_name("col_other");
output_metadata.column_metadata[1].set_name("col_float");
output_metadata.column_metadata[2].set_name("col_binary").child(1).set_output_as_binary(true);
auto filepath = temp_env->get_temp_filepath("NestedByteArray.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(output_metadata));
cudf::io::write_parquet(out_opts);
auto source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
EXPECT_EQ(fmd.schema[5].type, cudf::io::parquet::detail::Type::BYTE_ARRAY);
std::vector<cudf::io::reader_column_schema> md{
{},
{},
cudf::io::reader_column_schema().add_child(
cudf::io::reader_column_schema().set_convert_binary_to_strings(false))};
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema(md);
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(ParquetWriterTest, ByteArrayStats)
{
// check that byte array min and max statistics are written as expected. If a byte array is
// written as a string, max utf8 is 0xf7bfbfbf and so the minimum value will be set to that value
// instead of a potential minimum higher than that.
std::vector<uint8_t> expected_col0_min{0xf0};
std::vector<uint8_t> expected_col0_max{0xf0, 0xf5, 0xf5};
std::vector<uint8_t> expected_col1_min{0xfe, 0xfe, 0xfe};
std::vector<uint8_t> expected_col1_max{0xfe, 0xfe, 0xfe};
cudf::test::lists_column_wrapper<uint8_t> list_int_col0{
{0xf0}, {0xf0, 0xf5, 0xf3}, {0xf0, 0xf5, 0xf5}};
cudf::test::lists_column_wrapper<uint8_t> list_int_col1{
{0xfe, 0xfe, 0xfe}, {0xfe, 0xfe, 0xfe}, {0xfe, 0xfe, 0xfe}};
auto expected = table_view{{list_int_col0, list_int_col1}};
cudf::io::table_input_metadata output_metadata(expected);
output_metadata.column_metadata[0].set_name("col_binary0").set_output_as_binary(true);
output_metadata.column_metadata[1].set_name("col_binary1").set_output_as_binary(true);
auto filepath = temp_env->get_temp_filepath("ByteArrayStats.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(output_metadata));
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema({{}, {}});
auto result = cudf::io::read_parquet(in_opts);
auto source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
EXPECT_EQ(fmd.schema[1].type, cudf::io::parquet::detail::Type::BYTE_ARRAY);
EXPECT_EQ(fmd.schema[2].type, cudf::io::parquet::detail::Type::BYTE_ARRAY);
auto const stats0 = get_statistics(fmd.row_groups[0].columns[0]);
auto const stats1 = get_statistics(fmd.row_groups[0].columns[1]);
EXPECT_EQ(expected_col0_min, stats0.min_value);
EXPECT_EQ(expected_col0_max, stats0.max_value);
EXPECT_EQ(expected_col1_min, stats1.min_value);
EXPECT_EQ(expected_col1_max, stats1.max_value);
}
TEST_F(ParquetReaderTest, StructByteArray)
{
constexpr auto num_rows = 100;
auto seq_col0 = random_values<uint8_t>(num_rows);
auto const validity = cudf::test::iterators::no_nulls();
column_wrapper<uint8_t> int_col{seq_col0.begin(), seq_col0.end(), validity};
cudf::test::lists_column_wrapper<uint8_t> list_of_int{{seq_col0.begin(), seq_col0.begin() + 50},
{seq_col0.begin() + 50, seq_col0.end()}};
auto struct_col = cudf::test::structs_column_wrapper{{list_of_int}, validity};
auto const expected = table_view{{struct_col}};
EXPECT_EQ(1, expected.num_columns());
cudf::io::table_input_metadata output_metadata(expected);
output_metadata.column_metadata[0]
.set_name("struct_binary")
.child(0)
.set_name("a")
.set_output_as_binary(true);
auto filepath = temp_env->get_temp_filepath("StructByteArray.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(output_metadata));
cudf::io::write_parquet(out_opts);
std::vector<cudf::io::reader_column_schema> md{cudf::io::reader_column_schema().add_child(
cudf::io::reader_column_schema().set_convert_binary_to_strings(false))};
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.set_column_schema(md);
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(ParquetReaderTest, NestingOptimizationTest)
{
// test nesting levels > cudf::io::parquet::detail::max_cacheable_nesting_decode_info deep.
constexpr cudf::size_type num_nesting_levels = 16;
static_assert(num_nesting_levels > cudf::io::parquet::detail::max_cacheable_nesting_decode_info);
constexpr cudf::size_type rows_per_level = 2;
constexpr cudf::size_type num_values = (1 << num_nesting_levels) * rows_per_level;
auto value_iter = thrust::make_counting_iterator(0);
auto validity =
cudf::detail::make_counting_transform_iterator(0, [](cudf::size_type i) { return i % 2; });
cudf::test::fixed_width_column_wrapper<int> values(value_iter, value_iter + num_values, validity);
// ~256k values with num_nesting_levels = 16
int total_values_produced = num_values;
auto prev_col = values.release();
for (int idx = 0; idx < num_nesting_levels; idx++) {
auto const depth = num_nesting_levels - idx;
auto const num_rows = (1 << (num_nesting_levels - idx));
auto offsets_iter = cudf::detail::make_counting_transform_iterator(
0, [depth, rows_per_level](cudf::size_type i) { return i * rows_per_level; });
total_values_produced += (num_rows + 1);
cudf::test::fixed_width_column_wrapper<cudf::size_type> offsets(offsets_iter,
offsets_iter + num_rows + 1);
auto c = cudf::make_lists_column(num_rows, offsets.release(), std::move(prev_col), 0, {});
prev_col = std::move(c);
}
auto const& expect = prev_col;
auto filepath = temp_env->get_temp_filepath("NestingDecodeCache.parquet");
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, table_view{{*expect}});
cudf::io::write_parquet(opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(*expect, result.tbl->get_column(0));
}
TEST_F(ParquetWriterTest, SingleValueDictionaryTest)
{
constexpr unsigned int expected_bits = 1;
constexpr unsigned int nrows = 1'000'000U;
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "a unique string value suffixed with 1"; });
auto const col0 = cudf::test::strings_column_wrapper(elements, elements + nrows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("SingleValueDictionaryTest.parquet");
// set row group size so that there will be only one row group
// no compression so we can easily read page data
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::NONE)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.row_group_size_rows(nrows);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// make sure dictionary was used
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto used_dict = [&fmd]() {
for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) {
if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or
enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) {
return true;
}
}
return false;
};
EXPECT_TRUE(used_dict());
// and check that the correct number of bits was used
auto const oi = read_offset_index(source, fmd.row_groups[0].columns[0]);
auto const nbits = read_dict_bits(source, oi.page_locations[0]);
EXPECT_EQ(nbits, expected_bits);
}
TEST_F(ParquetWriterTest, DictionaryNeverTest)
{
constexpr unsigned int nrows = 1'000U;
// only one value, so would normally use dictionary
auto elements = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "a unique string value suffixed with 1"; });
auto const col0 = cudf::test::strings_column_wrapper(elements, elements + nrows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("DictionaryNeverTest.parquet");
// no compression so we can easily read page data
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::NONE)
.dictionary_policy(cudf::io::dictionary_policy::NEVER);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// make sure dictionary was not used
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto used_dict = [&fmd]() {
for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) {
if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or
enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) {
return true;
}
}
return false;
};
EXPECT_FALSE(used_dict());
}
TEST_F(ParquetWriterTest, DictionaryAdaptiveTest)
{
constexpr unsigned int nrows = 65'536U;
// cardinality is chosen to result in a dictionary > 1MB in size
constexpr unsigned int cardinality = 32'768U;
// single value will have a small dictionary
auto elements0 = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "a unique string value suffixed with 1"; });
auto const col0 = cudf::test::strings_column_wrapper(elements0, elements0 + nrows);
// high cardinality will have a large dictionary
auto elements1 = cudf::detail::make_counting_transform_iterator(0, [cardinality](auto i) {
return "a unique string value suffixed with " + std::to_string(i % cardinality);
});
auto const col1 = cudf::test::strings_column_wrapper(elements1, elements1 + nrows);
auto const expected = table_view{{col0, col1}};
auto const filepath = temp_env->get_temp_filepath("DictionaryAdaptiveTest.parquet");
// no compression so we can easily read page data
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::ZSTD)
.dictionary_policy(cudf::io::dictionary_policy::ADAPTIVE);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// make sure dictionary was used as expected. col0 should use one,
// col1 should not.
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto used_dict = [&fmd](int col) {
for (auto enc : fmd.row_groups[0].columns[col].meta_data.encodings) {
if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or
enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) {
return true;
}
}
return false;
};
EXPECT_TRUE(used_dict(0));
EXPECT_FALSE(used_dict(1));
}
TEST_F(ParquetWriterTest, DictionaryAlwaysTest)
{
constexpr unsigned int nrows = 65'536U;
// cardinality is chosen to result in a dictionary > 1MB in size
constexpr unsigned int cardinality = 32'768U;
// single value will have a small dictionary
auto elements0 = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "a unique string value suffixed with 1"; });
auto const col0 = cudf::test::strings_column_wrapper(elements0, elements0 + nrows);
// high cardinality will have a large dictionary
auto elements1 = cudf::detail::make_counting_transform_iterator(0, [cardinality](auto i) {
return "a unique string value suffixed with " + std::to_string(i % cardinality);
});
auto const col1 = cudf::test::strings_column_wrapper(elements1, elements1 + nrows);
auto const expected = table_view{{col0, col1}};
auto const filepath = temp_env->get_temp_filepath("DictionaryAlwaysTest.parquet");
// no compression so we can easily read page data
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::ZSTD)
.dictionary_policy(cudf::io::dictionary_policy::ALWAYS);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// make sure dictionary was used for both columns
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto used_dict = [&fmd](int col) {
for (auto enc : fmd.row_groups[0].columns[col].meta_data.encodings) {
if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or
enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) {
return true;
}
}
return false;
};
EXPECT_TRUE(used_dict(0));
EXPECT_TRUE(used_dict(1));
}
TEST_F(ParquetWriterTest, DictionaryPageSizeEst)
{
// one page
constexpr unsigned int nrows = 20'000U;
// this test is creating a pattern of repeating then non-repeating values to trigger
// a "worst-case" for page size estimation in the presence of a dictionary. have confirmed
// that this fails for values over 16 in the final term of `max_RLE_page_size()`.
// The output of the iterator will be 'CCCCCRRRRRCCCCCRRRRR...` where 'C' is a changing
// value, and 'R' repeats. The encoder will turn this into a literal run of 8 values
// (`CCCCCRRR`) followed by a repeated run of 2 (`RR`). This pattern then repeats, getting
// as close as possible to a condition of repeated 8 value literal runs.
auto elements0 = cudf::detail::make_counting_transform_iterator(0, [](auto i) {
if ((i / 5) % 2 == 1) {
return std::string("non-unique string");
} else {
return "a unique string value suffixed with " + std::to_string(i);
}
});
auto const col0 = cudf::test::strings_column_wrapper(elements0, elements0 + nrows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("DictionaryPageSizeEst.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::ZSTD)
.dictionary_policy(cudf::io::dictionary_policy::ALWAYS);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_P(ParquetSizedTest, DictionaryTest)
{
unsigned int const cardinality = (1 << (GetParam() - 1)) + 1;
unsigned int const nrows = std::max(cardinality * 3 / 2, 3'000'000U);
auto elements = cudf::detail::make_counting_transform_iterator(0, [cardinality](auto i) {
return "a unique string value suffixed with " + std::to_string(i % cardinality);
});
auto const col0 = cudf::test::strings_column_wrapper(elements, elements + nrows);
auto const expected = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("DictionaryTest.parquet");
// set row group size so that there will be only one row group
// no compression so we can easily read page data
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.compression(cudf::io::compression_type::NONE)
.stats_level(cudf::io::statistics_freq::STATISTICS_COLUMN)
.dictionary_policy(cudf::io::dictionary_policy::ALWAYS)
.row_group_size_rows(nrows)
.row_group_size_bytes(512 * 1024 * 1024);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options default_in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(default_in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
// make sure dictionary was used
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto used_dict = [&fmd]() {
for (auto enc : fmd.row_groups[0].columns[0].meta_data.encodings) {
if (enc == cudf::io::parquet::detail::Encoding::PLAIN_DICTIONARY or
enc == cudf::io::parquet::detail::Encoding::RLE_DICTIONARY) {
return true;
}
}
return false;
};
EXPECT_TRUE(used_dict());
// and check that the correct number of bits was used
auto const oi = read_offset_index(source, fmd.row_groups[0].columns[0]);
auto const nbits = read_dict_bits(source, oi.page_locations[0]);
EXPECT_EQ(nbits, GetParam());
}
TYPED_TEST(ParquetReaderSourceTest, BufferSourceTypes)
{
using T = TypeParam;
srand(31337);
auto table = create_random_fixed_table<int>(5, 5, true);
std::vector<char> out_buffer;
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer), *table);
cudf::io::write_parquet(out_opts);
{
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(
cudf::host_span<T>(reinterpret_cast<T*>(out_buffer.data()), out_buffer.size())));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*table, result.tbl->view());
}
{
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(cudf::host_span<T const>(
reinterpret_cast<T const*>(out_buffer.data()), out_buffer.size())));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*table, result.tbl->view());
}
}
TYPED_TEST(ParquetReaderSourceTest, BufferSourceArrayTypes)
{
using T = TypeParam;
srand(31337);
auto table = create_random_fixed_table<int>(5, 5, true);
std::vector<char> out_buffer;
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info(&out_buffer), *table);
cudf::io::write_parquet(out_opts);
auto full_table = cudf::concatenate(std::vector<table_view>({*table, *table}));
{
auto spans = std::vector<cudf::host_span<T>>{
cudf::host_span<T>(reinterpret_cast<T*>(out_buffer.data()), out_buffer.size()),
cudf::host_span<T>(reinterpret_cast<T*>(out_buffer.data()), out_buffer.size())};
cudf::io::parquet_reader_options in_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info(cudf::host_span<cudf::host_span<T>>(spans.data(), spans.size())));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*full_table, result.tbl->view());
}
{
auto spans = std::vector<cudf::host_span<T const>>{
cudf::host_span<T const>(reinterpret_cast<T const*>(out_buffer.data()), out_buffer.size()),
cudf::host_span<T const>(reinterpret_cast<T const*>(out_buffer.data()), out_buffer.size())};
cudf::io::parquet_reader_options in_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info(cudf::host_span<cudf::host_span<T const>>(spans.data(), spans.size())));
auto const result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*full_table, result.tbl->view());
}
}
TEST_F(ParquetWriterTest, UserNullability)
{
auto weight_col = cudf::test::fixed_width_column_wrapper<float>{{57.5, 51.1, 15.3}};
auto ages_col = cudf::test::fixed_width_column_wrapper<int32_t>{{30, 27, 5}};
auto struct_col = cudf::test::structs_column_wrapper{weight_col, ages_col};
auto expected = table_view({struct_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_nullability(false);
expected_metadata.column_metadata[0].child(0).set_nullability(true);
auto filepath = temp_env->get_temp_filepath("SingleWriteNullable.parquet");
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_parquet(write_opts);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(read_opts);
EXPECT_FALSE(result.tbl->view().column(0).nullable());
EXPECT_TRUE(result.tbl->view().column(0).child(0).nullable());
EXPECT_FALSE(result.tbl->view().column(0).child(1).nullable());
}
TEST_F(ParquetWriterTest, UserNullabilityInvalid)
{
auto valids =
cudf::detail::make_counting_transform_iterator(0, [&](int index) { return index % 2; });
auto col = cudf::test::fixed_width_column_wrapper<double>{{57.5, 51.1, 15.3}, valids};
auto expected = table_view({col});
auto filepath = temp_env->get_temp_filepath("SingleWriteNullableInvalid.parquet");
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected);
// Should work without the nullability option
EXPECT_NO_THROW(cudf::io::write_parquet(write_opts));
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_nullability(false);
write_opts.set_metadata(std::move(expected_metadata));
// Can't write a column with nulls as not nullable
EXPECT_THROW(cudf::io::write_parquet(write_opts), cudf::logic_error);
}
TEST_F(ParquetReaderTest, SingleLevelLists)
{
unsigned char list_bytes[] = {
0x50, 0x41, 0x52, 0x31, 0x15, 0x00, 0x15, 0x28, 0x15, 0x28, 0x15, 0xa7, 0xce, 0x91, 0x8c, 0x06,
0x1c, 0x15, 0x04, 0x15, 0x00, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x02, 0x02, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x15,
0x02, 0x19, 0x3c, 0x48, 0x0c, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x15, 0x02, 0x00, 0x35, 0x00, 0x18, 0x01, 0x66, 0x15, 0x02, 0x15, 0x06, 0x4c, 0x3c, 0x00,
0x00, 0x00, 0x15, 0x02, 0x25, 0x04, 0x18, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x00, 0x16, 0x02,
0x19, 0x1c, 0x19, 0x1c, 0x26, 0x08, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x06, 0x19, 0x28, 0x01,
0x66, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x15, 0x00, 0x16, 0x04, 0x16, 0x56, 0x16, 0x56, 0x26,
0x08, 0x3c, 0x18, 0x04, 0x01, 0x00, 0x00, 0x00, 0x18, 0x04, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00,
0x28, 0x04, 0x01, 0x00, 0x00, 0x00, 0x18, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x1c, 0x15,
0x00, 0x15, 0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x16, 0x56, 0x16, 0x02, 0x26, 0x08, 0x16, 0x56,
0x14, 0x00, 0x00, 0x28, 0x13, 0x52, 0x41, 0x50, 0x49, 0x44, 0x53, 0x20, 0x53, 0x70, 0x61, 0x72,
0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x19, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x9f, 0x00,
0x00, 0x00, 0x50, 0x41, 0x52, 0x31};
// read single level list reproducing parquet file
cudf::io::parquet_reader_options read_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info{reinterpret_cast<char const*>(list_bytes), sizeof(list_bytes)});
auto table = cudf::io::read_parquet(read_opts);
auto const c0 = table.tbl->get_column(0);
EXPECT_TRUE(c0.type().id() == cudf::type_id::LIST);
auto const lc = cudf::lists_column_view(c0);
auto const child = lc.child();
EXPECT_TRUE(child.type().id() == cudf::type_id::INT32);
}
TEST_F(ParquetReaderTest, ChunkedSingleLevelLists)
{
unsigned char list_bytes[] = {
0x50, 0x41, 0x52, 0x31, 0x15, 0x00, 0x15, 0x28, 0x15, 0x28, 0x15, 0xa7, 0xce, 0x91, 0x8c, 0x06,
0x1c, 0x15, 0x04, 0x15, 0x00, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x02, 0x02, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x15,
0x02, 0x19, 0x3c, 0x48, 0x0c, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x15, 0x02, 0x00, 0x35, 0x00, 0x18, 0x01, 0x66, 0x15, 0x02, 0x15, 0x06, 0x4c, 0x3c, 0x00,
0x00, 0x00, 0x15, 0x02, 0x25, 0x04, 0x18, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x00, 0x16, 0x02,
0x19, 0x1c, 0x19, 0x1c, 0x26, 0x08, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x06, 0x19, 0x28, 0x01,
0x66, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x15, 0x00, 0x16, 0x04, 0x16, 0x56, 0x16, 0x56, 0x26,
0x08, 0x3c, 0x18, 0x04, 0x01, 0x00, 0x00, 0x00, 0x18, 0x04, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00,
0x28, 0x04, 0x01, 0x00, 0x00, 0x00, 0x18, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x1c, 0x15,
0x00, 0x15, 0x00, 0x15, 0x02, 0x00, 0x00, 0x00, 0x16, 0x56, 0x16, 0x02, 0x26, 0x08, 0x16, 0x56,
0x14, 0x00, 0x00, 0x28, 0x13, 0x52, 0x41, 0x50, 0x49, 0x44, 0x53, 0x20, 0x53, 0x70, 0x61, 0x72,
0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x19, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x9f, 0x00,
0x00, 0x00, 0x50, 0x41, 0x52, 0x31};
auto reader = cudf::io::chunked_parquet_reader(
1L << 31,
cudf::io::parquet_reader_options::builder(
cudf::io::source_info{reinterpret_cast<char const*>(list_bytes), sizeof(list_bytes)}));
int iterations = 0;
while (reader.has_next() && iterations < 10) {
auto chunk = reader.read_chunk();
}
EXPECT_TRUE(iterations < 10);
}
TEST_F(ParquetWriterTest, CompStats)
{
auto table = create_random_fixed_table<int>(1, 100000, true);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&unused_buffer}, table->view())
.compression_statistics(stats);
cudf::io::write_parquet(opts);
EXPECT_NE(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_FALSE(std::isnan(stats->compression_ratio()));
}
TEST_F(ParquetChunkedWriterTest, CompStats)
{
auto table = create_random_fixed_table<int>(1, 100000, true);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::chunked_parquet_writer_options opts =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{&unused_buffer})
.compression_statistics(stats);
cudf::io::parquet_chunked_writer(opts).write(*table);
EXPECT_NE(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_FALSE(std::isnan(stats->compression_ratio()));
auto const single_table_comp_stats = *stats;
cudf::io::parquet_chunked_writer(opts).write(*table);
EXPECT_EQ(stats->compression_ratio(), single_table_comp_stats.compression_ratio());
EXPECT_EQ(stats->num_compressed_bytes(), 2 * single_table_comp_stats.num_compressed_bytes());
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
}
void expect_compression_stats_empty(std::shared_ptr<cudf::io::writer_compression_statistics> stats)
{
EXPECT_EQ(stats->num_compressed_bytes(), 0);
EXPECT_EQ(stats->num_failed_bytes(), 0);
EXPECT_EQ(stats->num_skipped_bytes(), 0);
EXPECT_TRUE(std::isnan(stats->compression_ratio()));
}
TEST_F(ParquetWriterTest, CompStatsEmptyTable)
{
auto table_no_rows = create_random_fixed_table<int>(20, 0, false);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{&unused_buffer},
table_no_rows->view())
.compression_statistics(stats);
cudf::io::write_parquet(opts);
expect_compression_stats_empty(stats);
}
TEST_F(ParquetChunkedWriterTest, CompStatsEmptyTable)
{
auto table_no_rows = create_random_fixed_table<int>(20, 0, false);
auto const stats = std::make_shared<cudf::io::writer_compression_statistics>();
std::vector<char> unused_buffer;
cudf::io::chunked_parquet_writer_options opts =
cudf::io::chunked_parquet_writer_options::builder(cudf::io::sink_info{&unused_buffer})
.compression_statistics(stats);
cudf::io::parquet_chunked_writer(opts).write(*table_no_rows);
expect_compression_stats_empty(stats);
}
TEST_F(ParquetReaderTest, ReorderedReadMultipleFiles)
{
constexpr auto num_rows = 50'000;
constexpr auto cardinality = 20'000;
// table 1
auto str1 = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "cat " + std::to_string(i % cardinality); });
auto cols1 = cudf::test::strings_column_wrapper(str1, str1 + num_rows);
auto int1 =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % cardinality; });
auto coli1 = cudf::test::fixed_width_column_wrapper<int>(int1, int1 + num_rows);
auto const expected1 = table_view{{cols1, coli1}};
auto const swapped1 = table_view{{coli1, cols1}};
auto const filepath1 = temp_env->get_temp_filepath("LargeReorderedRead1.parquet");
auto out_opts1 =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath1}, expected1)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(out_opts1);
// table 2
auto str2 = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return "dog " + std::to_string(i % cardinality); });
auto cols2 = cudf::test::strings_column_wrapper(str2, str2 + num_rows);
auto int2 = cudf::detail::make_counting_transform_iterator(
0, [](auto i) { return (i % cardinality) + cardinality; });
auto coli2 = cudf::test::fixed_width_column_wrapper<int>(int2, int2 + num_rows);
auto const expected2 = table_view{{cols2, coli2}};
auto const swapped2 = table_view{{coli2, cols2}};
auto const filepath2 = temp_env->get_temp_filepath("LargeReorderedRead2.parquet");
auto out_opts2 =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath2}, expected2)
.compression(cudf::io::compression_type::NONE);
cudf::io::write_parquet(out_opts2);
// read in both files swapping the columns
auto read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{{filepath1, filepath2}})
.columns({"_col1", "_col0"});
auto result = cudf::io::read_parquet(read_opts);
auto sliced = cudf::slice(result.tbl->view(), {0, num_rows, num_rows, 2 * num_rows});
CUDF_TEST_EXPECT_TABLES_EQUAL(sliced[0], swapped1);
CUDF_TEST_EXPECT_TABLES_EQUAL(sliced[1], swapped2);
}
// Test fixture for metadata tests
struct ParquetMetadataReaderTest : public cudf::test::BaseFixture {
std::string print(cudf::io::parquet_column_schema schema, int depth = 0)
{
std::string child_str;
for (auto const& child : schema.children()) {
child_str += print(child, depth + 1);
}
return std::string(depth, ' ') + schema.name() + "\n" + child_str;
}
};
TEST_F(ParquetMetadataReaderTest, TestBasic)
{
auto const num_rows = 1200;
auto ints = random_values<int>(num_rows);
auto floats = random_values<float>(num_rows);
column_wrapper<int> int_col(ints.begin(), ints.end());
column_wrapper<float> float_col(floats.begin(), floats.end());
table_view expected({int_col, float_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("int_col");
expected_metadata.column_metadata[1].set_name("float_col");
auto filepath = temp_env->get_temp_filepath("MetadataTest.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_parquet(out_opts);
auto meta = read_parquet_metadata(cudf::io::source_info{filepath});
EXPECT_EQ(meta.num_rows(), num_rows);
std::string expected_schema = R"(schema
int_col
float_col
)";
EXPECT_EQ(expected_schema, print(meta.schema().root()));
EXPECT_EQ(meta.schema().root().name(), "schema");
EXPECT_EQ(meta.schema().root().type_kind(), cudf::io::parquet::TypeKind::UNDEFINED_TYPE);
ASSERT_EQ(meta.schema().root().num_children(), 2);
EXPECT_EQ(meta.schema().root().child(0).name(), "int_col");
EXPECT_EQ(meta.schema().root().child(1).name(), "float_col");
}
TEST_F(ParquetMetadataReaderTest, TestNested)
{
auto const num_rows = 1200;
auto const lists_per_row = 4;
auto const num_child_rows = num_rows * lists_per_row;
auto keys = random_values<int>(num_child_rows);
auto vals = random_values<float>(num_child_rows);
column_wrapper<int> keys_col(keys.begin(), keys.end());
column_wrapper<float> vals_col(vals.begin(), vals.end());
auto s_col = cudf::test::structs_column_wrapper({keys_col, vals_col}).release();
std::vector<int> row_offsets(num_rows + 1);
for (int idx = 0; idx < num_rows + 1; ++idx) {
row_offsets[idx] = idx * lists_per_row;
}
column_wrapper<int> offsets(row_offsets.begin(), row_offsets.end());
auto list_col =
cudf::make_lists_column(num_rows, offsets.release(), std::move(s_col), 0, rmm::device_buffer{});
table_view expected({*list_col, *list_col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("maps");
expected_metadata.column_metadata[0].set_list_column_as_map();
expected_metadata.column_metadata[1].set_name("lists");
expected_metadata.column_metadata[1].child(1).child(0).set_name("int_field");
expected_metadata.column_metadata[1].child(1).child(1).set_name("float_field");
auto filepath = temp_env->get_temp_filepath("MetadataTest.orc");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
cudf::io::write_parquet(out_opts);
auto meta = read_parquet_metadata(cudf::io::source_info{filepath});
EXPECT_EQ(meta.num_rows(), num_rows);
std::string expected_schema = R"(schema
maps
key_value
key
value
lists
list
element
int_field
float_field
)";
EXPECT_EQ(expected_schema, print(meta.schema().root()));
EXPECT_EQ(meta.schema().root().name(), "schema");
EXPECT_EQ(meta.schema().root().type_kind(),
cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // struct
ASSERT_EQ(meta.schema().root().num_children(), 2);
auto const& out_map_col = meta.schema().root().child(0);
EXPECT_EQ(out_map_col.name(), "maps");
EXPECT_EQ(out_map_col.type_kind(), cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // map
ASSERT_EQ(out_map_col.num_children(), 1);
EXPECT_EQ(out_map_col.child(0).name(), "key_value"); // key_value (named in parquet writer)
ASSERT_EQ(out_map_col.child(0).num_children(), 2);
EXPECT_EQ(out_map_col.child(0).child(0).name(), "key"); // key (named in parquet writer)
EXPECT_EQ(out_map_col.child(0).child(1).name(), "value"); // value (named in parquet writer)
EXPECT_EQ(out_map_col.child(0).child(0).type_kind(), cudf::io::parquet::TypeKind::INT32); // int
EXPECT_EQ(out_map_col.child(0).child(1).type_kind(),
cudf::io::parquet::TypeKind::FLOAT); // float
auto const& out_list_col = meta.schema().root().child(1);
EXPECT_EQ(out_list_col.name(), "lists");
EXPECT_EQ(out_list_col.type_kind(), cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // list
// TODO repetition type?
ASSERT_EQ(out_list_col.num_children(), 1);
EXPECT_EQ(out_list_col.child(0).name(), "list"); // list (named in parquet writer)
ASSERT_EQ(out_list_col.child(0).num_children(), 1);
auto const& out_list_struct_col = out_list_col.child(0).child(0);
EXPECT_EQ(out_list_struct_col.name(), "element"); // elements (named in parquet writer)
EXPECT_EQ(out_list_struct_col.type_kind(),
cudf::io::parquet::TypeKind::UNDEFINED_TYPE); // struct
ASSERT_EQ(out_list_struct_col.num_children(), 2);
auto const& out_int_col = out_list_struct_col.child(0);
EXPECT_EQ(out_int_col.name(), "int_field");
EXPECT_EQ(out_int_col.type_kind(), cudf::io::parquet::TypeKind::INT32);
auto const& out_float_col = out_list_struct_col.child(1);
EXPECT_EQ(out_float_col.name(), "float_field");
EXPECT_EQ(out_float_col.type_kind(), cudf::io::parquet::TypeKind::FLOAT);
}
TEST_F(ParquetWriterTest, NoNullsAsNonNullable)
{
auto valids = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
column_wrapper<int32_t> col{{1, 2, 3}, valids};
table_view expected({col});
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_nullability(false);
auto filepath = temp_env->get_temp_filepath("NonNullable.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata));
// Writer should be able to write a column without nulls as non-nullable
EXPECT_NO_THROW(cudf::io::write_parquet(out_opts));
}
TEST_F(ParquetReaderTest, FilterSimple)
{
srand(31337);
auto written_table = create_random_fixed_table<int>(9, 9, false);
auto filepath = temp_env->get_temp_filepath("FilterSimple.parquet");
cudf::io::parquet_writer_options args =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, *written_table);
cudf::io::write_parquet(args);
// Filtering AST - table[0] < RAND_MAX/2
auto literal_value = cudf::numeric_scalar<decltype(RAND_MAX)>(RAND_MAX / 2);
auto literal = cudf::ast::literal(literal_value);
auto col_ref_0 = cudf::ast::column_reference(0);
auto filter_expression = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_ref_0, literal);
auto predicate = cudf::compute_column(*written_table, filter_expression);
EXPECT_EQ(predicate->view().type().id(), cudf::type_id::BOOL8)
<< "Predicate filter should return a boolean";
auto expected = cudf::apply_boolean_mask(*written_table, *predicate);
// To make sure AST filters out some elements
EXPECT_LT(expected->num_rows(), written_table->num_rows());
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.filter(filter_expression);
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
auto create_parquet_with_stats(std::string const& filename)
{
auto col0 = testdata::ascending<uint32_t>();
auto col1 = testdata::descending<int64_t>();
auto col2 = testdata::unordered<double>();
auto const expected = table_view{{col0, col1, col2}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("col_uint32");
expected_metadata.column_metadata[1].set_name("col_int64");
expected_metadata.column_metadata[2].set_name("col_double");
auto const filepath = temp_env->get_temp_filepath(filename);
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(std::move(expected_metadata))
.row_group_size_rows(8000)
.stats_level(cudf::io::statistics_freq::STATISTICS_ROWGROUP);
cudf::io::write_parquet(out_opts);
std::vector<std::unique_ptr<column>> columns;
columns.push_back(col0.release());
columns.push_back(col1.release());
columns.push_back(col2.release());
return std::pair{cudf::table{std::move(columns)}, filepath};
}
TEST_F(ParquetReaderTest, FilterIdentity)
{
auto [src, filepath] = create_parquet_with_stats("FilterIdentity.parquet");
// Filtering AST - identity function, always true.
auto literal_value = cudf::numeric_scalar<bool>(true);
auto literal = cudf::ast::literal(literal_value);
auto filter_expression = cudf::ast::operation(cudf::ast::ast_operator::IDENTITY, literal);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.filter(filter_expression);
auto result = cudf::io::read_parquet(read_opts);
cudf::io::parquet_reader_options read_opts2 =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result2 = cudf::io::read_parquet(read_opts2);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *result2.tbl);
}
TEST_F(ParquetReaderTest, FilterReferenceExpression)
{
auto [src, filepath] = create_parquet_with_stats("FilterReferenceExpression.parquet");
// Filtering AST - table[0] < 150
auto literal_value = cudf::numeric_scalar<uint32_t>(150);
auto literal = cudf::ast::literal(literal_value);
auto col_ref_0 = cudf::ast::column_reference(0);
auto filter_expression = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_ref_0, literal);
// Expected result
auto predicate = cudf::compute_column(src, filter_expression);
auto expected = cudf::apply_boolean_mask(src, *predicate);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.filter(filter_expression);
auto result = cudf::io::read_parquet(read_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
TEST_F(ParquetReaderTest, FilterNamedExpression)
{
auto [src, filepath] = create_parquet_with_stats("NamedExpression.parquet");
// Filtering AST - table["col_uint32"] < 150
auto literal_value = cudf::numeric_scalar<uint32_t>(150);
auto literal = cudf::ast::literal(literal_value);
auto col_name_0 = cudf::ast::column_name_reference("col_uint32");
auto parquet_filter = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_name_0, literal);
auto col_ref_0 = cudf::ast::column_reference(0);
auto table_filter = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_ref_0, literal);
// Expected result
auto predicate = cudf::compute_column(src, table_filter);
auto expected = cudf::apply_boolean_mask(src, *predicate);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.filter(parquet_filter);
auto result = cudf::io::read_parquet(read_opts);
// tests
CUDF_TEST_EXPECT_TABLES_EQUAL(*result.tbl, *expected);
}
// Test for Types - numeric, chrono, string.
template <typename T>
struct ParquetReaderPredicatePushdownTest : public ParquetReaderTest {};
// These chrono types are not supported because parquet writer does not have a type to represent
// them.
using UnsupportedChronoTypes =
cudf::test::Types<cudf::timestamp_s, cudf::duration_D, cudf::duration_s>;
// Also fixed point types unsupported, because AST does not support them yet.
using SupportedTestTypes = cudf::test::RemoveIf<cudf::test::ContainedIn<UnsupportedChronoTypes>,
cudf::test::ComparableTypes>;
TYPED_TEST_SUITE(ParquetReaderPredicatePushdownTest, SupportedTestTypes);
template <typename T>
auto create_parquet_typed_with_stats(std::string const& filename)
{
auto col0 = testdata::ascending<T>();
auto col1 = testdata::descending<T>();
auto col2 = testdata::unordered<T>();
auto const written_table = table_view{{col0, col1, col2}};
auto const filepath = temp_env->get_temp_filepath("FilterTyped.parquet");
{
cudf::io::table_input_metadata expected_metadata(written_table);
expected_metadata.column_metadata[0].set_name("col0");
expected_metadata.column_metadata[1].set_name("col1");
expected_metadata.column_metadata[2].set_name("col2");
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, written_table)
.metadata(std::move(expected_metadata))
.row_group_size_rows(8000);
cudf::io::write_parquet(out_opts);
}
std::vector<std::unique_ptr<column>> columns;
columns.push_back(col0.release());
columns.push_back(col1.release());
columns.push_back(col2.release());
return std::pair{cudf::table{std::move(columns)}, filepath};
}
TYPED_TEST(ParquetReaderPredicatePushdownTest, FilterTyped)
{
using T = TypeParam;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterTyped.parquet");
auto const written_table = src.view();
// Filtering AST
auto literal_value = []() {
if constexpr (cudf::is_timestamp<T>()) {
// table[0] < 10000 timestamp days/seconds/milliseconds/microseconds/nanoseconds
return cudf::timestamp_scalar<T>(T(typename T::duration(10000))); // i (0-20,000)
} else if constexpr (cudf::is_duration<T>()) {
// table[0] < 10000 day/seconds/milliseconds/microseconds/nanoseconds
return cudf::duration_scalar<T>(T(10000)); // i (0-20,000)
} else if constexpr (std::is_same_v<T, cudf::string_view>) {
// table[0] < "000010000"
return cudf::string_scalar("000010000"); // i (0-20,000)
} else {
// table[0] < 0 or 100u
return cudf::numeric_scalar<T>((100 - 100 * std::is_signed_v<T>)); // i/100 (-100-100/ 0-200)
}
}();
auto literal = cudf::ast::literal(literal_value);
auto col_name_0 = cudf::ast::column_name_reference("col0");
auto filter_expression = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_name_0, literal);
auto col_ref_0 = cudf::ast::column_reference(0);
auto ref_filter = cudf::ast::operation(cudf::ast::ast_operator::LESS, col_ref_0, literal);
// Expected result
auto predicate = cudf::compute_column(written_table, ref_filter);
EXPECT_EQ(predicate->view().type().id(), cudf::type_id::BOOL8)
<< "Predicate filter should return a boolean";
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
// Reading with Predicate Pushdown
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath})
.filter(filter_expression);
auto result = cudf::io::read_parquet(read_opts);
auto result_table = result.tbl->view();
// tests
EXPECT_EQ(int(written_table.column(0).type().id()), int(result_table.column(0).type().id()))
<< "col0 type mismatch";
// To make sure AST filters out some elements
EXPECT_LT(expected->num_rows(), written_table.num_rows());
EXPECT_EQ(result_table.num_rows(), expected->num_rows());
EXPECT_EQ(result_table.num_columns(), expected->num_columns());
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result_table);
}
TEST_F(ParquetReaderTest, FilterMultiple1)
{
using T = cudf::string_view;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterMultiple1.parquet");
auto const written_table = src.view();
// Filtering AST - 10000 < table[0] < 12000
std::string const low = "000010000";
std::string const high = "000012000";
auto lov = cudf::string_scalar(low, true);
auto hiv = cudf::string_scalar(high, true);
auto filter_col = cudf::ast::column_reference(0);
auto lo_lit = cudf::ast::literal(lov);
auto hi_lit = cudf::ast::literal(hiv);
auto expr_1 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col, lo_lit);
auto expr_2 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col, hi_lit);
auto expr_3 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_1, expr_2);
// Expected result
auto predicate = cudf::compute_column(written_table, expr_3);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
auto si = cudf::io::source_info(filepath);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr_3);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
// tests
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
}
TEST_F(ParquetReaderTest, FilterMultiple2)
{
// multiple conditions on same column.
using T = cudf::string_view;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterMultiple2.parquet");
auto const written_table = src.view();
// 0-8000, 8001-16000, 16001-20000
// Filtering AST
// (table[0] >= "000010000" AND table[0] < "000012000") OR
// (table[0] >= "000017000" AND table[0] < "000019000")
std::string const low1 = "000010000";
std::string const high1 = "000012000";
auto lov = cudf::string_scalar(low1, true);
auto hiv = cudf::string_scalar(high1, true);
auto filter_col = cudf::ast::column_reference(0);
auto lo_lit = cudf::ast::literal(lov);
auto hi_lit = cudf::ast::literal(hiv);
auto expr_1 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col, lo_lit);
auto expr_2 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col, hi_lit);
auto expr_3 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_1, expr_2);
std::string const low2 = "000017000";
std::string const high2 = "000019000";
auto lov2 = cudf::string_scalar(low2, true);
auto hiv2 = cudf::string_scalar(high2, true);
auto lo_lit2 = cudf::ast::literal(lov2);
auto hi_lit2 = cudf::ast::literal(hiv2);
auto expr_4 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col, lo_lit2);
auto expr_5 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col, hi_lit2);
auto expr_6 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_4, expr_5);
auto expr_7 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_OR, expr_3, expr_6);
// Expected result
auto predicate = cudf::compute_column(written_table, expr_7);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
auto si = cudf::io::source_info(filepath);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr_7);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
// tests
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
}
TEST_F(ParquetReaderTest, FilterMultiple3)
{
// multiple conditions with reference to multiple columns.
// index and name references mixed.
using T = uint32_t;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterMultiple3.parquet");
auto const written_table = src.view();
// Filtering AST - (table[0] >= 70 AND table[0] < 90) OR (table[1] >= 100 AND table[1] < 120)
// row groups min, max:
// table[0] 0-80, 81-160, 161-200.
// table[1] 200-121, 120-41, 40-0.
auto filter_col1 = cudf::ast::column_reference(0);
auto filter_col2 = cudf::ast::column_name_reference("col1");
T constexpr low1 = 70;
T constexpr high1 = 90;
T constexpr low2 = 100;
T constexpr high2 = 120;
auto lov = cudf::numeric_scalar(low1, true);
auto hiv = cudf::numeric_scalar(high1, true);
auto lo_lit1 = cudf::ast::literal(lov);
auto hi_lit1 = cudf::ast::literal(hiv);
auto expr_1 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col1, lo_lit1);
auto expr_2 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col1, hi_lit1);
auto expr_3 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_1, expr_2);
auto lov2 = cudf::numeric_scalar(low2, true);
auto hiv2 = cudf::numeric_scalar(high2, true);
auto lo_lit2 = cudf::ast::literal(lov2);
auto hi_lit2 = cudf::ast::literal(hiv2);
auto expr_4 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col2, lo_lit2);
auto expr_5 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col2, hi_lit2);
auto expr_6 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_4, expr_5);
// expression to test
auto expr_7 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_OR, expr_3, expr_6);
// Expected result
auto filter_col2_ref = cudf::ast::column_reference(1);
auto expr_4_ref =
cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col2_ref, lo_lit2);
auto expr_5_ref = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col2_ref, hi_lit2);
auto expr_6_ref =
cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_4_ref, expr_5_ref);
auto expr_7_ref = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_OR, expr_3, expr_6_ref);
auto predicate = cudf::compute_column(written_table, expr_7_ref);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
auto si = cudf::io::source_info(filepath);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr_7);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
// tests
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
}
TEST_F(ParquetReaderTest, FilterSupported)
{
using T = uint32_t;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterSupported.parquet");
auto const written_table = src.view();
// Filtering AST - ((table[0] > 70 AND table[0] <= 90) OR (table[1] >= 100 AND table[1] < 120))
// AND (table[1] != 110)
// row groups min, max:
// table[0] 0-80, 81-160, 161-200.
// table[1] 200-121, 120-41, 40-0.
auto filter_col1 = cudf::ast::column_reference(0);
auto filter_col2 = cudf::ast::column_reference(1);
T constexpr low1 = 70;
T constexpr high1 = 90;
T constexpr low2 = 100;
T constexpr high2 = 120;
T constexpr skip_value = 110;
auto lov = cudf::numeric_scalar(low1, true);
auto hiv = cudf::numeric_scalar(high1, true);
auto lo_lit1 = cudf::ast::literal(lov);
auto hi_lit1 = cudf::ast::literal(hiv);
auto expr_1 = cudf::ast::operation(cudf::ast::ast_operator::GREATER, filter_col1, lo_lit1);
auto expr_2 = cudf::ast::operation(cudf::ast::ast_operator::LESS_EQUAL, filter_col1, hi_lit1);
auto expr_3 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_1, expr_2);
auto lov2 = cudf::numeric_scalar(low2, true);
auto hiv2 = cudf::numeric_scalar(high2, true);
auto lo_lit2 = cudf::ast::literal(lov2);
auto hi_lit2 = cudf::ast::literal(hiv2);
auto expr_4 = cudf::ast::operation(cudf::ast::ast_operator::GREATER_EQUAL, filter_col2, lo_lit2);
auto expr_5 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col2, hi_lit2);
auto expr_6 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_4, expr_5);
auto expr_7 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_OR, expr_3, expr_6);
auto skip_ov = cudf::numeric_scalar(skip_value, true);
auto skip_lit = cudf::ast::literal(skip_ov);
auto expr_8 = cudf::ast::operation(cudf::ast::ast_operator::NOT_EQUAL, filter_col2, skip_lit);
auto expr_9 = cudf::ast::operation(cudf::ast::ast_operator::LOGICAL_AND, expr_7, expr_8);
// Expected result
auto predicate = cudf::compute_column(written_table, expr_9);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
auto si = cudf::io::source_info(filepath);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr_9);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
// tests
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
}
TEST_F(ParquetReaderTest, FilterSupported2)
{
using T = uint32_t;
constexpr auto num_rows = 4000;
auto elements0 =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i / 2000; });
auto elements1 =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i / 1000; });
auto elements2 =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i / 500; });
auto col0 = cudf::test::fixed_width_column_wrapper<T>(elements0, elements0 + num_rows);
auto col1 = cudf::test::fixed_width_column_wrapper<T>(elements1, elements1 + num_rows);
auto col2 = cudf::test::fixed_width_column_wrapper<T>(elements2, elements2 + num_rows);
auto const written_table = table_view{{col0, col1, col2}};
auto const filepath = temp_env->get_temp_filepath("FilterSupported2.parquet");
{
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, written_table)
.row_group_size_rows(1000);
cudf::io::write_parquet(out_opts);
}
auto si = cudf::io::source_info(filepath);
auto filter_col0 = cudf::ast::column_reference(0);
auto filter_col1 = cudf::ast::column_reference(1);
auto filter_col2 = cudf::ast::column_reference(2);
auto s_value = cudf::numeric_scalar<T>(1, true);
auto lit_value = cudf::ast::literal(s_value);
auto test_expr = [&](auto& expr) {
// Expected result
auto predicate = cudf::compute_column(written_table, expr);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
// tests
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
};
// row groups min, max:
// table[0] 0-0, 0-0, 1-1, 1-1
// table[1] 0-0, 1-1, 2-2, 3-3
// table[2] 0-1, 2-3, 4-5, 6-7
// Filtering AST - table[i] == 1
{
auto expr0 = cudf::ast::operation(cudf::ast::ast_operator::EQUAL, filter_col0, lit_value);
test_expr(expr0);
auto expr1 = cudf::ast::operation(cudf::ast::ast_operator::EQUAL, filter_col1, lit_value);
test_expr(expr1);
auto expr2 = cudf::ast::operation(cudf::ast::ast_operator::EQUAL, filter_col2, lit_value);
test_expr(expr2);
}
// Filtering AST - table[i] != 1
{
auto expr0 = cudf::ast::operation(cudf::ast::ast_operator::NOT_EQUAL, filter_col0, lit_value);
test_expr(expr0);
auto expr1 = cudf::ast::operation(cudf::ast::ast_operator::NOT_EQUAL, filter_col1, lit_value);
test_expr(expr1);
auto expr2 = cudf::ast::operation(cudf::ast::ast_operator::NOT_EQUAL, filter_col2, lit_value);
test_expr(expr2);
}
}
// Error types - type mismatch, invalid column name, invalid literal type, invalid operator,
// non-bool filter output type.
TEST_F(ParquetReaderTest, FilterErrors)
{
using T = uint32_t;
auto const [src, filepath] = create_parquet_typed_with_stats<T>("FilterErrors.parquet");
auto const written_table = src.view();
auto si = cudf::io::source_info(filepath);
// Filtering AST - invalid column index
{
auto filter_col1 = cudf::ast::column_reference(3);
T constexpr low = 100;
auto lov = cudf::numeric_scalar(low, true);
auto low_lot = cudf::ast::literal(lov);
auto expr = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col1, low_lot);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr);
EXPECT_THROW(cudf::io::read_parquet(builder), cudf::logic_error);
}
// Filtering AST - invalid column name
{
auto filter_col1 = cudf::ast::column_name_reference("col3");
T constexpr low = 100;
auto lov = cudf::numeric_scalar(low, true);
auto low_lot = cudf::ast::literal(lov);
auto expr = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col1, low_lot);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr);
EXPECT_THROW(cudf::io::read_parquet(builder), cudf::logic_error);
}
// Filtering AST - incompatible literal type
{
auto filter_col1 = cudf::ast::column_name_reference("col0");
auto filter_col2 = cudf::ast::column_reference(1);
int64_t constexpr low = 100;
auto lov = cudf::numeric_scalar(low, true);
auto low_lot = cudf::ast::literal(lov);
auto expr1 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col1, low_lot);
auto expr2 = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col2, low_lot);
auto builder1 = cudf::io::parquet_reader_options::builder(si).filter(expr1);
EXPECT_THROW(cudf::io::read_parquet(builder1), cudf::logic_error);
auto builder2 = cudf::io::parquet_reader_options::builder(si).filter(expr2);
EXPECT_THROW(cudf::io::read_parquet(builder2), cudf::logic_error);
}
// Filtering AST - "table[0] + 110" is invalid filter expression
{
auto filter_col1 = cudf::ast::column_reference(0);
T constexpr add_value = 110;
auto add_v = cudf::numeric_scalar(add_value, true);
auto add_lit = cudf::ast::literal(add_v);
auto expr_8 = cudf::ast::operation(cudf::ast::ast_operator::ADD, filter_col1, add_lit);
auto si = cudf::io::source_info(filepath);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr_8);
EXPECT_THROW(cudf::io::read_parquet(builder), cudf::logic_error);
// Expected result throw to show that the filter expression is invalid,
// not a limitation of the parquet predicate pushdown.
auto predicate = cudf::compute_column(written_table, expr_8);
EXPECT_THROW(cudf::apply_boolean_mask(written_table, *predicate), cudf::logic_error);
}
// Filtering AST - INT64(table[0] < 100) non-bool expression
{
auto filter_col1 = cudf::ast::column_reference(0);
T constexpr low = 100;
auto lov = cudf::numeric_scalar(low, true);
auto low_lot = cudf::ast::literal(lov);
auto bool_expr = cudf::ast::operation(cudf::ast::ast_operator::LESS, filter_col1, low_lot);
auto cast = cudf::ast::operation(cudf::ast::ast_operator::CAST_TO_INT64, bool_expr);
auto builder = cudf::io::parquet_reader_options::builder(si).filter(cast);
EXPECT_THROW(cudf::io::read_parquet(builder), cudf::logic_error);
EXPECT_NO_THROW(cudf::compute_column(written_table, cast));
auto predicate = cudf::compute_column(written_table, cast);
EXPECT_NE(predicate->view().type().id(), cudf::type_id::BOOL8);
}
}
// Filter without stats information in file.
TEST_F(ParquetReaderTest, FilterNoStats)
{
using T = uint32_t;
constexpr auto num_rows = 16000;
auto elements =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i / 1000; });
auto col0 = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows);
auto const written_table = table_view{{col0}};
auto const filepath = temp_env->get_temp_filepath("FilterNoStats.parquet");
{
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, written_table)
.row_group_size_rows(8000)
.stats_level(cudf::io::statistics_freq::STATISTICS_NONE);
cudf::io::write_parquet(out_opts);
}
auto si = cudf::io::source_info(filepath);
auto filter_col0 = cudf::ast::column_reference(0);
auto s_value = cudf::numeric_scalar<T>(1, true);
auto lit_value = cudf::ast::literal(s_value);
// row groups min, max:
// table[0] 0-0, 1-1, 2-2, 3-3
// Filtering AST - table[0] > 1
auto expr = cudf::ast::operation(cudf::ast::ast_operator::GREATER, filter_col0, lit_value);
// Expected result
auto predicate = cudf::compute_column(written_table, expr);
auto expected = cudf::apply_boolean_mask(written_table, *predicate);
// tests
auto builder = cudf::io::parquet_reader_options::builder(si).filter(expr);
auto table_with_metadata = cudf::io::read_parquet(builder);
auto result = table_with_metadata.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUAL(expected->view(), result);
}
// Filter for float column with NaN values
TEST_F(ParquetReaderTest, FilterFloatNAN)
{
constexpr auto num_rows = 24000;
auto elements = cudf::detail::make_counting_transform_iterator(
0, [num_rows](auto i) { return i > num_rows / 2 ? NAN : i; });
auto col0 = cudf::test::fixed_width_column_wrapper<float>(elements, elements + num_rows);
auto col1 = cudf::test::fixed_width_column_wrapper<double>(elements, elements + num_rows);
auto const written_table = table_view{{col0, col1}};
auto const filepath = temp_env->get_temp_filepath("FilterFloatNAN.parquet");
{
const cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, written_table)
.row_group_size_rows(8000);
cudf::io::write_parquet(out_opts);
}
auto si = cudf::io::source_info(filepath);
auto filter_col0 = cudf::ast::column_reference(0);
auto filter_col1 = cudf::ast::column_reference(1);
auto s0_value = cudf::numeric_scalar<float>(NAN, true);
auto lit0_value = cudf::ast::literal(s0_value);
auto s1_value = cudf::numeric_scalar<double>(NAN, true);
auto lit1_value = cudf::ast::literal(s1_value);
// row groups min, max:
// table[0] 0-0, 1-1, 2-2, 3-3
// Filtering AST - table[0] == NAN, table[1] != NAN
auto expr_eq = cudf::ast::operation(cudf::ast::ast_operator::EQUAL, filter_col0, lit0_value);
auto expr_neq = cudf::ast::operation(cudf::ast::ast_operator::NOT_EQUAL, filter_col1, lit1_value);
// Expected result
auto predicate0 = cudf::compute_column(written_table, expr_eq);
auto expected0 = cudf::apply_boolean_mask(written_table, *predicate0);
auto predicate1 = cudf::compute_column(written_table, expr_neq);
auto expected1 = cudf::apply_boolean_mask(written_table, *predicate1);
// tests
auto builder0 = cudf::io::parquet_reader_options::builder(si).filter(expr_eq);
auto table_with_metadata0 = cudf::io::read_parquet(builder0);
auto result0 = table_with_metadata0.tbl->view();
auto builder1 = cudf::io::parquet_reader_options::builder(si).filter(expr_neq);
auto table_with_metadata1 = cudf::io::read_parquet(builder1);
auto result1 = table_with_metadata1.tbl->view();
CUDF_TEST_EXPECT_TABLES_EQUAL(expected0->view(), result0);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected1->view(), result1);
}
TEST_F(ParquetWriterTest, TimestampMicrosINT96NoOverflow)
{
using namespace cuda::std::chrono;
using namespace cudf::io;
column_wrapper<cudf::timestamp_us> big_ts_col{
sys_days{year{3023} / month{7} / day{14}} + 7h + 38min + 45s + 418688us,
sys_days{year{723} / month{3} / day{21}} + 14h + 20min + 13s + microseconds{781ms}};
table_view expected({big_ts_col});
auto filepath = temp_env->get_temp_filepath("BigINT96Timestamp.parquet");
auto const out_opts =
parquet_writer_options::builder(sink_info{filepath}, expected).int96_timestamps(true).build();
write_parquet(out_opts);
auto const in_opts = parquet_reader_options::builder(source_info(filepath))
.timestamp_type(cudf::data_type(cudf::type_id::TIMESTAMP_MICROSECONDS))
.build();
auto const result = read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TEST_F(ParquetWriterTest, PreserveNullability)
{
constexpr auto num_rows = 100;
auto const col0_data = random_values<int32_t>(num_rows);
auto const col1_data = random_values<int32_t>(num_rows);
auto const col0_validity = cudf::test::iterators::no_nulls();
auto const col1_validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0; });
column_wrapper<int32_t> col0{col0_data.begin(), col0_data.end(), col0_validity};
column_wrapper<int32_t> col1{col1_data.begin(), col1_data.end(), col1_validity};
auto const col2 = make_parquet_list_list_col<int>(0, num_rows, 5, 8, true);
auto const expected = table_view{{col0, col1, *col2}};
cudf::io::table_input_metadata expected_metadata(expected);
expected_metadata.column_metadata[0].set_name("mandatory");
expected_metadata.column_metadata[0].set_nullability(false);
expected_metadata.column_metadata[1].set_name("optional");
expected_metadata.column_metadata[1].set_nullability(true);
expected_metadata.column_metadata[2].set_name("lists");
expected_metadata.column_metadata[2].set_nullability(true);
// offsets is a cudf thing that's not part of the parquet schema so it won't have nullability set
expected_metadata.column_metadata[2].child(0).set_name("offsets");
expected_metadata.column_metadata[2].child(1).set_name("element");
expected_metadata.column_metadata[2].child(1).set_nullability(false);
expected_metadata.column_metadata[2].child(1).child(0).set_name("offsets");
expected_metadata.column_metadata[2].child(1).child(1).set_name("element");
expected_metadata.column_metadata[2].child(1).child(1).set_nullability(true);
auto const filepath = temp_env->get_temp_filepath("PreserveNullability.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.metadata(expected_metadata);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options const in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto const result = cudf::io::read_parquet(in_opts);
auto const read_metadata = cudf::io::table_input_metadata{result.metadata};
// test that expected_metadata matches read_metadata
std::function<void(cudf::io::column_in_metadata, cudf::io::column_in_metadata)>
compare_names_and_nullability = [&](auto lhs, auto rhs) {
EXPECT_EQ(lhs.get_name(), rhs.get_name());
ASSERT_EQ(lhs.is_nullability_defined(), rhs.is_nullability_defined());
if (lhs.is_nullability_defined()) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
ASSERT_EQ(lhs.num_children(), rhs.num_children());
for (int i = 0; i < lhs.num_children(); ++i) {
compare_names_and_nullability(lhs.child(i), rhs.child(i));
}
};
ASSERT_EQ(expected_metadata.column_metadata.size(), read_metadata.column_metadata.size());
for (size_t i = 0; i < expected_metadata.column_metadata.size(); ++i) {
compare_names_and_nullability(expected_metadata.column_metadata[i],
read_metadata.column_metadata[i]);
}
}
TEST_P(ParquetV2Test, CheckEncodings)
{
using cudf::io::parquet::detail::Encoding;
constexpr auto num_rows = 100'000;
auto const is_v2 = GetParam();
auto const validity = cudf::test::iterators::no_nulls();
// data should be PLAIN for v1, RLE for V2
auto col0_data =
cudf::detail::make_counting_transform_iterator(0, [](auto i) -> bool { return i % 2 == 0; });
// data should be PLAIN for v1, DELTA_BINARY_PACKED for v2
auto col1_data = random_values<int32_t>(num_rows);
// data should be PLAIN_DICTIONARY for v1, PLAIN and RLE_DICTIONARY for v2
auto col2_data = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return 1; });
cudf::test::fixed_width_column_wrapper<bool> col0{col0_data, col0_data + num_rows, validity};
column_wrapper<int32_t> col1{col1_data.begin(), col1_data.end(), validity};
column_wrapper<int32_t> col2{col2_data, col2_data + num_rows, validity};
auto expected = table_view{{col0, col1, col2}};
auto const filename = is_v2 ? "CheckEncodingsV2.parquet" : "CheckEncodingsV1.parquet";
auto filepath = temp_env->get_temp_filepath(filename);
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.max_page_size_rows(num_rows)
.write_v2_headers(is_v2);
cudf::io::write_parquet(out_opts);
// make sure the expected encodings are present
auto contains = [](auto const& vec, auto const& enc) {
return std::find(vec.begin(), vec.end(), enc) != vec.end();
};
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
auto const& chunk0_enc = fmd.row_groups[0].columns[0].meta_data.encodings;
auto const& chunk1_enc = fmd.row_groups[0].columns[1].meta_data.encodings;
auto const& chunk2_enc = fmd.row_groups[0].columns[2].meta_data.encodings;
if (is_v2) {
// col0 should have RLE for rep/def and data
EXPECT_TRUE(chunk0_enc.size() == 1);
EXPECT_TRUE(contains(chunk0_enc, Encoding::RLE));
// col1 should have RLE for rep/def and DELTA_BINARY_PACKED for data
EXPECT_TRUE(chunk1_enc.size() == 2);
EXPECT_TRUE(contains(chunk1_enc, Encoding::RLE));
EXPECT_TRUE(contains(chunk1_enc, Encoding::DELTA_BINARY_PACKED));
// col2 should have RLE for rep/def, PLAIN for dict, and RLE_DICTIONARY for data
EXPECT_TRUE(chunk2_enc.size() == 3);
EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE));
EXPECT_TRUE(contains(chunk2_enc, Encoding::PLAIN));
EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE_DICTIONARY));
} else {
// col0 should have RLE for rep/def and PLAIN for data
EXPECT_TRUE(chunk0_enc.size() == 2);
EXPECT_TRUE(contains(chunk0_enc, Encoding::RLE));
EXPECT_TRUE(contains(chunk0_enc, Encoding::PLAIN));
// col1 should have RLE for rep/def and PLAIN for data
EXPECT_TRUE(chunk1_enc.size() == 2);
EXPECT_TRUE(contains(chunk1_enc, Encoding::RLE));
EXPECT_TRUE(contains(chunk1_enc, Encoding::PLAIN));
// col2 should have RLE for rep/def and PLAIN_DICTIONARY for data and dict
EXPECT_TRUE(chunk2_enc.size() == 2);
EXPECT_TRUE(contains(chunk2_enc, Encoding::RLE));
EXPECT_TRUE(contains(chunk2_enc, Encoding::PLAIN_DICTIONARY));
}
}
// removing duration_D, duration_s, and timestamp_s as they don't appear to be supported properly.
// see definition of UnsupportedChronoTypes above.
using DeltaDecimalTypes = cudf::test::Types<numeric::decimal32, numeric::decimal64>;
using DeltaBinaryTypes =
cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::ChronoTypes, DeltaDecimalTypes>;
using SupportedDeltaTestTypes =
cudf::test::RemoveIf<cudf::test::ContainedIn<UnsupportedChronoTypes>, DeltaBinaryTypes>;
TYPED_TEST_SUITE(ParquetWriterDeltaTest, SupportedDeltaTestTypes);
TYPED_TEST(ParquetWriterDeltaTest, SupportedDeltaTestTypes)
{
using T = TypeParam;
auto col0 = testdata::ascending<T>();
auto col1 = testdata::unordered<T>();
auto const expected = table_view{{col0, col1}};
auto const filepath = temp_env->get_temp_filepath("DeltaBinaryPacked.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected)
.write_v2_headers(true)
.dictionary_policy(cudf::io::dictionary_policy::NEVER);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, result.tbl->view());
}
TYPED_TEST(ParquetWriterDeltaTest, SupportedDeltaTestTypesSliced)
{
using T = TypeParam;
constexpr int num_rows = 4'000;
auto col0 = testdata::ascending<T>();
auto col1 = testdata::unordered<T>();
auto const expected = table_view{{col0, col1}};
auto expected_slice = cudf::slice(expected, {num_rows, 2 * num_rows});
ASSERT_EQ(expected_slice[0].num_rows(), num_rows);
auto const filepath = temp_env->get_temp_filepath("DeltaBinaryPackedSliced.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected_slice)
.write_v2_headers(true)
.dictionary_policy(cudf::io::dictionary_policy::NEVER);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_slice, result.tbl->view());
}
TYPED_TEST(ParquetWriterDeltaTest, SupportedDeltaListSliced)
{
using T = TypeParam;
constexpr int num_slice = 4'000;
constexpr int num_rows = 32 * 1024;
std::mt19937 gen(6542);
std::bernoulli_distribution bn(0.7f);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [&](int index) { return bn(gen); });
auto values = thrust::make_counting_iterator(0);
// list<T>
constexpr int vals_per_row = 4;
auto c1_offset_iter = cudf::detail::make_counting_transform_iterator(
0, [vals_per_row](cudf::size_type idx) { return idx * vals_per_row; });
cudf::test::fixed_width_column_wrapper<cudf::size_type> c1_offsets(c1_offset_iter,
c1_offset_iter + num_rows + 1);
cudf::test::fixed_width_column_wrapper<T> c1_vals(
values, values + (num_rows * vals_per_row), valids);
auto [null_mask, null_count] = cudf::test::detail::make_null_mask(valids, valids + num_rows);
auto _c1 = cudf::make_lists_column(
num_rows, c1_offsets.release(), c1_vals.release(), null_count, std::move(null_mask));
auto c1 = cudf::purge_nonempty_nulls(*_c1);
auto const expected = table_view{{*c1}};
auto expected_slice = cudf::slice(expected, {num_slice, 2 * num_slice});
ASSERT_EQ(expected_slice[0].num_rows(), num_slice);
auto const filepath = temp_env->get_temp_filepath("DeltaBinaryPackedListSliced.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, expected_slice)
.write_v2_headers(true)
.dictionary_policy(cudf::io::dictionary_policy::NEVER);
cudf::io::write_parquet(out_opts);
cudf::io::parquet_reader_options in_opts =
cudf::io::parquet_reader_options::builder(cudf::io::source_info{filepath});
auto result = cudf::io::read_parquet(in_opts);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected_slice, result.tbl->view());
}
TEST_F(ParquetWriterTest, EmptyMinStringStatistics)
{
char const* const min_val = "";
char const* const max_val = "zzz";
std::vector<char const*> strings{min_val, max_val, "pining", "for", "the", "fjords"};
column_wrapper<cudf::string_view> string_col{strings.begin(), strings.end()};
auto const output = table_view{{string_col}};
auto const filepath = temp_env->get_temp_filepath("EmptyMinStringStatistics.parquet");
cudf::io::parquet_writer_options out_opts =
cudf::io::parquet_writer_options::builder(cudf::io::sink_info{filepath}, output);
cudf::io::write_parquet(out_opts);
auto const source = cudf::io::datasource::create(filepath);
cudf::io::parquet::detail::FileMetaData fmd;
read_footer(source, &fmd);
ASSERT_TRUE(fmd.row_groups.size() > 0);
ASSERT_TRUE(fmd.row_groups[0].columns.size() > 0);
auto const& chunk = fmd.row_groups[0].columns[0];
auto const stats = get_statistics(chunk);
ASSERT_TRUE(stats.min_value.has_value());
ASSERT_TRUE(stats.max_value.has_value());
auto const min_value = std::string{reinterpret_cast<char const*>(stats.min_value.value().data()),
stats.min_value.value().size()};
auto const max_value = std::string{reinterpret_cast<char const*>(stats.max_value.value().data()),
stats.max_value.value().size()};
EXPECT_EQ(min_value, std::string(min_val));
EXPECT_EQ(max_value, std::string(max_val));
}
TEST_F(ParquetReaderTest, RepeatedNoAnnotations)
{
constexpr unsigned char repeated_bytes[] = {
0x50, 0x41, 0x52, 0x31, 0x15, 0x04, 0x15, 0x30, 0x15, 0x30, 0x4c, 0x15, 0x0c, 0x15, 0x00, 0x12,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, 0x15, 0x0a, 0x15, 0x0a,
0x2c, 0x15, 0x0c, 0x15, 0x10, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x03, 0x03, 0x88, 0xc6, 0x02,
0x26, 0x80, 0x01, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x10, 0x19, 0x18, 0x02, 0x69, 0x64, 0x15,
0x00, 0x16, 0x0c, 0x16, 0x78, 0x16, 0x78, 0x26, 0x54, 0x26, 0x08, 0x00, 0x00, 0x15, 0x04, 0x15,
0x40, 0x15, 0x40, 0x4c, 0x15, 0x08, 0x15, 0x00, 0x12, 0x00, 0x00, 0xe3, 0x0c, 0x23, 0x4b, 0x01,
0x00, 0x00, 0x00, 0xc7, 0x35, 0x3a, 0x42, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x6b, 0x74, 0x84, 0x00,
0x00, 0x00, 0x00, 0x55, 0xa1, 0xae, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x15, 0x22, 0x15,
0x22, 0x2c, 0x15, 0x10, 0x15, 0x10, 0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x03, 0xc0, 0x03, 0x00, 0x00, 0x00, 0x03, 0x90, 0xaa, 0x02, 0x03, 0x94, 0x03, 0x26, 0xda, 0x02,
0x1c, 0x15, 0x04, 0x19, 0x25, 0x00, 0x10, 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e,
0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x06, 0x6e, 0x75, 0x6d,
0x62, 0x65, 0x72, 0x15, 0x00, 0x16, 0x10, 0x16, 0xa0, 0x01, 0x16, 0xa0, 0x01, 0x26, 0x96, 0x02,
0x26, 0xba, 0x01, 0x00, 0x00, 0x15, 0x04, 0x15, 0x24, 0x15, 0x24, 0x4c, 0x15, 0x04, 0x15, 0x00,
0x12, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x68, 0x6f, 0x6d, 0x65, 0x06, 0x00, 0x00, 0x00, 0x6d,
0x6f, 0x62, 0x69, 0x6c, 0x65, 0x15, 0x00, 0x15, 0x20, 0x15, 0x20, 0x2c, 0x15, 0x10, 0x15, 0x10,
0x15, 0x06, 0x15, 0x06, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0xc0, 0x03, 0x00, 0x00, 0x00,
0x03, 0x90, 0xef, 0x01, 0x03, 0x04, 0x26, 0xcc, 0x04, 0x1c, 0x15, 0x0c, 0x19, 0x25, 0x00, 0x10,
0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05,
0x70, 0x68, 0x6f, 0x6e, 0x65, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x15, 0x00, 0x16, 0x10, 0x16, 0x82,
0x01, 0x16, 0x82, 0x01, 0x26, 0x8a, 0x04, 0x26, 0xca, 0x03, 0x00, 0x00, 0x15, 0x02, 0x19, 0x6c,
0x48, 0x04, 0x75, 0x73, 0x65, 0x72, 0x15, 0x04, 0x00, 0x15, 0x02, 0x25, 0x00, 0x18, 0x02, 0x69,
0x64, 0x00, 0x35, 0x02, 0x18, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65,
0x72, 0x73, 0x15, 0x02, 0x00, 0x35, 0x04, 0x18, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x15, 0x04,
0x00, 0x15, 0x04, 0x25, 0x00, 0x18, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x00, 0x15, 0x0c,
0x25, 0x02, 0x18, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x25, 0x00, 0x00, 0x16, 0x00, 0x19, 0x1c, 0x19,
0x3c, 0x26, 0x80, 0x01, 0x1c, 0x15, 0x02, 0x19, 0x25, 0x00, 0x10, 0x19, 0x18, 0x02, 0x69, 0x64,
0x15, 0x00, 0x16, 0x0c, 0x16, 0x78, 0x16, 0x78, 0x26, 0x54, 0x26, 0x08, 0x00, 0x00, 0x26, 0xda,
0x02, 0x1c, 0x15, 0x04, 0x19, 0x25, 0x00, 0x10, 0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65,
0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x06, 0x6e, 0x75,
0x6d, 0x62, 0x65, 0x72, 0x15, 0x00, 0x16, 0x10, 0x16, 0xa0, 0x01, 0x16, 0xa0, 0x01, 0x26, 0x96,
0x02, 0x26, 0xba, 0x01, 0x00, 0x00, 0x26, 0xcc, 0x04, 0x1c, 0x15, 0x0c, 0x19, 0x25, 0x00, 0x10,
0x19, 0x38, 0x0c, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x05,
0x70, 0x68, 0x6f, 0x6e, 0x65, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x15, 0x00, 0x16, 0x10, 0x16, 0x82,
0x01, 0x16, 0x82, 0x01, 0x26, 0x8a, 0x04, 0x26, 0xca, 0x03, 0x00, 0x00, 0x16, 0x9a, 0x03, 0x16,
0x0c, 0x00, 0x28, 0x49, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x2d, 0x72, 0x73, 0x20, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x30, 0x2e, 0x33, 0x2e, 0x30, 0x20, 0x28, 0x62, 0x75,
0x69, 0x6c, 0x64, 0x20, 0x62, 0x34, 0x35, 0x63, 0x65, 0x37, 0x63, 0x62, 0x61, 0x32, 0x31, 0x39,
0x39, 0x66, 0x32, 0x32, 0x64, 0x39, 0x33, 0x32, 0x36, 0x39, 0x63, 0x31, 0x35, 0x30, 0x64, 0x38,
0x61, 0x38, 0x33, 0x39, 0x31, 0x36, 0x63, 0x36, 0x39, 0x62, 0x35, 0x65, 0x29, 0x00, 0x32, 0x01,
0x00, 0x00, 0x50, 0x41, 0x52, 0x31};
auto read_opts = cudf::io::parquet_reader_options::builder(
cudf::io::source_info{reinterpret_cast<char const*>(repeated_bytes), sizeof(repeated_bytes)});
auto result = cudf::io::read_parquet(read_opts);
EXPECT_EQ(result.tbl->view().column(0).size(), 6);
EXPECT_EQ(result.tbl->view().num_columns(), 2);
column_wrapper<int32_t> col0{1, 2, 3, 4, 5, 6};
column_wrapper<int64_t> child0{{5555555555l, 1111111111l, 1111111111l, 2222222222l, 3333333333l}};
cudf::test::strings_column_wrapper child1{{"-", "home", "home", "-", "mobile"}, {0, 1, 1, 0, 1}};
auto struct_col = cudf::test::structs_column_wrapper{{child0, child1}};
auto list_offsets_column =
cudf::test::fixed_width_column_wrapper<cudf::size_type>{0, 0, 0, 0, 1, 2, 5}.release();
auto num_list_rows = list_offsets_column->size() - 1;
auto mask = cudf::create_null_mask(6, cudf::mask_state::ALL_VALID);
cudf::set_null_mask(static_cast<cudf::bitmask_type*>(mask.data()), 0, 2, false);
auto list_col = cudf::make_lists_column(
num_list_rows, std::move(list_offsets_column), struct_col.release(), 2, std::move(mask));
std::vector<std::unique_ptr<cudf::column>> struct_children;
struct_children.push_back(std::move(list_col));
auto outer_struct =
cudf::test::structs_column_wrapper{{std::move(struct_children)}, {0, 0, 1, 1, 1, 1}};
table_view expected{{col0, outer_struct}};
CUDF_TEST_EXPECT_TABLES_EQUAL(result.tbl->view(), expected);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/json_test.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/default_stream.hpp>
#include <cudf_test/iterator_utilities.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/io/arrow_io_source.hpp>
#include <cudf/io/json.hpp>
#include <cudf/strings/convert/convert_fixed_point.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <limits>
#include <thrust/iterator/constant_iterator.h>
#include <arrow/io/api.h>
#include <fstream>
#include <type_traits>
#define wrapper cudf::test::fixed_width_column_wrapper
using float_wrapper = wrapper<float>;
using float64_wrapper = wrapper<double>;
using int_wrapper = wrapper<int>;
using int8_wrapper = wrapper<int8_t>;
using int16_wrapper = wrapper<int16_t>;
using int64_wrapper = wrapper<int64_t>;
using timestamp_ms_wrapper = wrapper<cudf::timestamp_ms, cudf::timestamp_ms::rep>;
using bool_wrapper = wrapper<bool>;
using cudf::data_type;
using cudf::type_id;
using cudf::type_to_id;
template <typename T>
auto dtype()
{
return data_type{type_to_id<T>()};
}
template <typename T, typename SourceElementT = T>
using column_wrapper =
typename std::conditional<std::is_same_v<T, cudf::string_view>,
cudf::test::strings_column_wrapper,
cudf::test::fixed_width_column_wrapper<T, SourceElementT>>::type;
cudf::test::TempDirTestEnvironment* const temp_env =
static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
template <typename T>
std::vector<std::string> prepend_zeros(std::vector<T> const& input,
int zero_count = 0,
bool add_positive_sign = false)
{
std::vector<std::string> output(input.size());
std::transform(input.begin(), input.end(), output.begin(), [=](T const& num) {
auto str = std::to_string(num);
bool is_negative = (str[0] == '-');
if (is_negative) {
str.insert(1, zero_count, '0');
return str;
} else if (add_positive_sign) {
return "+" + std::string(zero_count, '0') + str;
} else {
str.insert(0, zero_count, '0');
return str;
}
});
return output;
}
template <>
std::vector<std::string> prepend_zeros<std::string>(std::vector<std::string> const& input,
int zero_count,
bool add_positive_sign)
{
std::vector<std::string> output(input.size());
std::transform(input.begin(), input.end(), output.begin(), [=](std::string const& num) {
auto str = num;
bool is_negative = (str[0] == '-');
if (is_negative) {
str.insert(1, zero_count, '0');
return str;
} else if (add_positive_sign) {
return "+" + std::string(zero_count, '0') + str;
} else {
str.insert(0, zero_count, '0');
return str;
}
});
return output;
}
// Generates a vector of uniform random values of type T
template <typename T>
inline auto random_values(size_t size)
{
std::vector<T> values(size);
using T1 = T;
using uniform_distribution =
typename std::conditional_t<std::is_same_v<T1, bool>,
std::bernoulli_distribution,
std::conditional_t<std::is_floating_point_v<T1>,
std::uniform_real_distribution<T1>,
std::uniform_int_distribution<T1>>>;
static constexpr auto seed = 0xf00d;
static std::mt19937 engine{seed};
static uniform_distribution dist{};
std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; });
return values;
}
MATCHER_P(FloatNearPointwise, tolerance, "Out-of-range")
{
return (std::get<0>(arg) > std::get<1>(arg) - tolerance &&
std::get<0>(arg) < std::get<1>(arg) + tolerance);
}
// temporary method to verify the float columns until
// CUDF_TEST_EXPECT_COLUMNS_EQUAL supports floating point
template <typename T, typename valid_t>
void check_float_column(cudf::column_view const& col,
std::vector<T> const& data,
valid_t const& validity)
{
CUDF_TEST_EXPECT_COLUMN_PROPERTIES_EQUAL(col, (wrapper<T>{data.begin(), data.end(), validity}));
EXPECT_EQ(col.null_count(), 0);
EXPECT_THAT(cudf::test::to_host<T>(col).first,
::testing::Pointwise(FloatNearPointwise(1e-6), data));
}
/**
* @brief Base test fixture for JSON reader tests
*/
struct JsonReaderTest : public cudf::test::BaseFixture {};
/**
* @brief Enum class to be used to specify the test case of parametrized tests
*/
enum class json_test_t {
// Run test with the existing JSON lines reader using row-orient input data
legacy_lines_row_orient,
// Run test with the existing JSON lines reader using record-orient input data
legacy_lines_record_orient,
// Run test with the nested JSON lines reader using record-orient input data
json_experimental_record_orient,
// Run test with the nested JSON lines reader using row-orient input data
json_experimental_row_orient
};
constexpr bool is_legacy_test(json_test_t test_opt)
{
return test_opt == json_test_t::legacy_lines_row_orient or
test_opt == json_test_t::legacy_lines_record_orient;
}
constexpr bool is_row_orient_test(json_test_t test_opt)
{
return test_opt == json_test_t::legacy_lines_row_orient or
test_opt == json_test_t::json_experimental_row_orient;
}
/**
* @brief Test fixture for parametrized JSON reader tests
*/
struct JsonReaderParamTest : public cudf::test::BaseFixture,
public testing::WithParamInterface<json_test_t> {};
/**
* @brief Test fixture for parametrized JSON reader tests, testing record orient-only for legacy
* JSON lines reader and the nested reader
*/
struct JsonReaderDualTest : public cudf::test::BaseFixture,
public testing::WithParamInterface<json_test_t> {};
/**
* @brief Test fixture for parametrized JSON reader tests that only tests the new nested JSON reader
*/
struct JsonReaderNoLegacy : public cudf::test::BaseFixture,
public testing::WithParamInterface<json_test_t> {};
/**
* @brief Generates a JSON lines string that uses the record orient
*
* @param records An array of a map of key-value pairs
* @param record_delimiter The delimiter to be used to delimit a record
* @param prefix The prefix prepended to the whole string
* @param suffix The suffix to be appended after the whole string
* @return The JSON lines string that uses the record orient
*/
std::string to_records_orient(std::vector<std::map<std::string, std::string>> const& records,
std::string record_delimiter,
std::string prefix = "",
std::string suffix = "")
{
std::string result = prefix;
for (auto record_it = std::cbegin(records); record_it != std::cend(records); record_it++) {
result += "{";
for (auto kv_pair_it = std::cbegin(*record_it); kv_pair_it != std::cend(*record_it);
kv_pair_it++) {
auto const& [key, value] = *kv_pair_it;
result += "\"" + key + "\":" + value;
result += (kv_pair_it != std::prev(std::end(*record_it))) ? ", " : "";
}
result += "}";
if (record_it != std::prev(std::end(records))) { result += record_delimiter; }
}
return (result + suffix);
}
template <typename DecimalType>
struct JsonFixedPointReaderTest : public JsonReaderTest {};
template <typename DecimalType>
struct JsonValidFixedPointReaderTest : public JsonFixedPointReaderTest<DecimalType> {
void run_test(std::vector<std::string> const& reference_strings,
numeric::scale_type scale,
bool use_legacy_parser)
{
cudf::test::strings_column_wrapper const strings(reference_strings.begin(),
reference_strings.end());
auto const expected = cudf::strings::to_fixed_point(
cudf::strings_column_view(strings), data_type{type_to_id<DecimalType>(), scale});
auto const buffer =
std::accumulate(reference_strings.begin(),
reference_strings.end(),
std::string{},
[](std::string const& acc, std::string const& rhs) {
return acc + (acc.empty() ? "" : "\n") + "{\"col0\":" + rhs + "}";
});
cudf::io::json_reader_options const in_opts =
cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.dtypes({data_type{type_to_id<DecimalType>(), scale}})
.lines(true)
.legacy(use_legacy_parser);
auto const result = cudf::io::read_json(in_opts);
auto const result_view = result.tbl->view();
ASSERT_EQ(result_view.num_columns(), 1);
EXPECT_EQ(result.metadata.schema_info[0].name, "col0");
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(*expected, result_view.column(0));
}
void run_tests(std::vector<std::string> const& reference_strings, numeric::scale_type scale)
{
// Test both parsers
run_test(reference_strings, scale, false);
run_test(reference_strings, scale, true);
}
};
TYPED_TEST_SUITE(JsonFixedPointReaderTest, cudf::test::FixedPointTypes);
TYPED_TEST_SUITE(JsonValidFixedPointReaderTest, cudf::test::FixedPointTypes);
// Parametrize qualifying JSON tests for executing both nested reader and legacy JSON lines reader
INSTANTIATE_TEST_CASE_P(JsonReaderParamTest,
JsonReaderParamTest,
::testing::Values(json_test_t::legacy_lines_row_orient,
json_test_t::legacy_lines_record_orient,
json_test_t::json_experimental_record_orient,
json_test_t::json_experimental_row_orient));
// Parametrize qualifying JSON tests for executing both nested reader and legacy JSON lines reader
INSTANTIATE_TEST_CASE_P(JsonReaderDualTest,
JsonReaderDualTest,
::testing::Values(json_test_t::legacy_lines_record_orient,
json_test_t::json_experimental_record_orient));
// Parametrize qualifying JSON tests for executing nested reader only
INSTANTIATE_TEST_CASE_P(JsonReaderNoLegacy,
JsonReaderNoLegacy,
::testing::Values(json_test_t::json_experimental_row_orient,
json_test_t::json_experimental_record_orient));
TEST_P(JsonReaderParamTest, BasicJsonLines)
{
auto const test_opt = GetParam();
std::string row_orient = "[1, 1.1]\n[2, 2.2]\n[3, 3.3]\n";
std::string record_orient = to_records_orient(
{{{"0", "1"}, {"1", "1.1"}}, {{"0", "2"}, {"1", "2.2"}}, {{"0", "3"}, {"1", "3.3"}}}, "\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.dtypes(std::vector<data_type>{dtype<int32_t>(), dtype<double>()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 3);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT32);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int_wrapper{{1, 2, 3}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
float64_wrapper{{1.1, 2.2, 3.3}, validity});
}
TEST_P(JsonReaderParamTest, FloatingPoint)
{
auto const test_opt = GetParam();
std::string row_orient =
"[5.6]\n[0.5679e2]\n[1.2e10]\n[0.07e1]\n[3000e-3]\n[12.34e0]\n[3.1e-001]\n[-73."
"98007199999998]\n";
std::string record_orient = to_records_orient({{{"0", "5.6"}},
{{"0", "0.5679e2"}},
{{"0", "1.2e10"}},
{{"0", "0.07e1"}},
{{"0", "3000e-3"}},
{{"0", "12.34e0"}},
{{"0", "3.1e-001"}},
{{"0", "-73.98007199999998"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto filepath = temp_env->get_temp_dir() + "FloatingPoint.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<float>()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::FLOAT32);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(0),
float_wrapper{{5.6, 56.79, 12000000000., 0.7, 3.000, 12.34, 0.31, -73.98007199999998},
validity});
auto const bitmask = cudf::test::bitmask_to_host(result.tbl->get_column(0));
ASSERT_EQ((1u << result.tbl->get_column(0).size()) - 1, bitmask[0]);
}
TEST_P(JsonReaderParamTest, JsonLinesStrings)
{
auto const test_opt = GetParam();
std::string row_orient = "[1, 1.1, \"aa \"]\n[2, 2.2, \" bbb\"]";
std::string record_orient = to_records_orient({{{"0", "1"}, {"1", "1.1"}, {"2", R"("aa ")"}},
{{"0", "2"}, {"1", "2.2"}, {"2", R"(" bbb")"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.dtypes({{"2", dtype<cudf::string_view>()}, {"0", dtype<int32_t>()}, {"1", dtype<double>()}})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT32);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
EXPECT_EQ(result.metadata.schema_info[2].name, "2");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int_wrapper{{1, 2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{1.1, 2.2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2),
cudf::test::strings_column_wrapper({"aa ", " bbb"}));
}
TEST_P(JsonReaderParamTest, MultiColumn)
{
auto const test_opt = GetParam();
bool const row_orient = is_row_orient_test(test_opt);
constexpr auto num_rows = 10;
auto int8_values = random_values<int8_t>(num_rows);
auto int16_values = random_values<int16_t>(num_rows);
auto int32_values = random_values<int32_t>(num_rows);
auto int64_values = random_values<int64_t>(num_rows);
auto float32_values = random_values<float>(num_rows);
auto float64_values = random_values<double>(num_rows);
auto filepath = temp_env->get_temp_dir() + "MultiColumn.json";
{
std::ostringstream line;
if (row_orient) {
for (int i = 0; i < num_rows; ++i) {
line << "[" << std::to_string(int8_values[i]) << "," << int16_values[i] << ","
<< int32_values[i] << "," << int64_values[i] << "," << float32_values[i] << ","
<< float64_values[i] << "]\n";
}
} else {
std::vector<std::map<std::string, std::string>> records;
for (int i = 0; i < num_rows; ++i) {
records.push_back({
{"0", std::to_string(int8_values[i])}, //
{"1", std::to_string(int16_values[i])}, //
{"2", std::to_string(int32_values[i])}, //
{"3", std::to_string(int64_values[i])}, //
{"4", std::to_string(float32_values[i])}, //
{"5", std::to_string(float64_values[i])}, //
});
}
line << to_records_orient(records, "\n");
}
std::ofstream outfile(filepath, std::ofstream::out);
outfile << line.str();
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<int8_t>(),
dtype<int16_t>(),
dtype<int32_t>(),
dtype<int64_t>(),
dtype<float>(),
dtype<double>()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
auto const view = result.tbl->view();
EXPECT_EQ(view.num_columns(), 6);
EXPECT_EQ(view.column(0).type().id(), cudf::type_id::INT8);
EXPECT_EQ(view.column(1).type().id(), cudf::type_id::INT16);
EXPECT_EQ(view.column(2).type().id(), cudf::type_id::INT32);
EXPECT_EQ(view.column(3).type().id(), cudf::type_id::INT64);
EXPECT_EQ(view.column(4).type().id(), cudf::type_id::FLOAT32);
EXPECT_EQ(view.column(5).type().id(), cudf::type_id::FLOAT64);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(view.column(0),
int8_wrapper{int8_values.begin(), int8_values.end(), validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(view.column(1),
int16_wrapper{int16_values.begin(), int16_values.end(), validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(view.column(2),
int_wrapper{int32_values.begin(), int32_values.end(), validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(view.column(3),
int64_wrapper{int64_values.begin(), int64_values.end(), validity});
check_float_column(view.column(4), float32_values, validity);
check_float_column(view.column(5), float64_values, validity);
}
TEST_P(JsonReaderParamTest, Booleans)
{
auto const test_opt = GetParam();
std::string row_orient = "[true]\n[true]\n[false]\n[false]\n[true]";
std::string record_orient = to_records_orient(
{
{{"0", "true"}},
{{"0", "true"}},
{{"0", "false"}},
{{"0", "false"}},
{{"0", "true"}},
},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto filepath = temp_env->get_temp_dir() + "Booleans.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<bool>()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
// Booleans are the same (integer) data type, but valued at 0 or 1
auto const view = result.tbl->view();
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::BOOL8);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
bool_wrapper{{true, true, false, false, true}, validity});
}
TEST_P(JsonReaderParamTest, Dates)
{
auto const test_opt = GetParam();
std::string row_orient =
"[05/03/2001]\n[31/10/2010]\n[20/10/1994]\n[18/10/1990]\n[1/1/1970]\n"
"[18/04/1995]\n[14/07/1994]\n[\"07/06/2006 11:20:30.400\"]\n"
"[\"16/09/2005T1:2:30.400PM\"]\n[2/2/1970]\n[null]";
std::string record_orient = to_records_orient({{{"0", R"("05/03/2001")"}},
{{"0", R"("31/10/2010")"}},
{{"0", R"("20/10/1994")"}},
{{"0", R"("18/10/1990")"}},
{{"0", R"("1/1/1970")"}},
{{"0", R"("18/04/1995")"}},
{{"0", R"("14/07/1994")"}},
{{"0", R"("07/06/2006 11:20:30.400")"}},
{{"0", R"("16/09/2005T1:2:30.400PM")"}},
{{"0", R"("2/2/1970")"}},
{{"0", R"(null)"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto filepath = temp_env->get_temp_dir() + "Dates.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({data_type{type_id::TIMESTAMP_MILLISECONDS}})
.lines(true)
.dayfirst(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::TIMESTAMP_MILLISECONDS);
auto validity = cudf::test::iterators::nulls_at({10});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
timestamp_ms_wrapper{{983750400000,
1288483200000,
782611200000,
656208000000,
0L,
798163200000,
774144000000,
1149679230400,
1126875750400,
2764800000,
0L},
validity});
}
TEST_P(JsonReaderParamTest, Durations)
{
auto const test_opt = GetParam();
std::string row_orient =
"[-2]\n[-1]\n[0]\n"
"[\"1 days\"]\n[\"0 days 23:01:00\"]\n[\"0 days 00:00:00.000000123\"]\n"
"[\"0:0:0.000123\"]\n[\"0:0:0.000123000\"]\n[\"00:00:00.100000001\"]\n"
"[-2147483648]\n[2147483647]\n[null]";
std::string record_orient = to_records_orient({{{"0", "-2"}},
{{"0", "-1"}},
{{"0", "0"}},
{{"0", R"("1 days")"}},
{{"0", R"("0 days 23:01:00")"}},
{{"0", R"("0 days 00:00:00.000000123")"}},
{{"0", R"("0:0:0.000123")"}},
{{"0", R"("0:0:0.000123000")"}},
{{"0", R"("00:00:00.100000001")"}},
{{"0", R"(-2147483648)"}},
{{"0", R"(2147483647)"}},
{{"0", R"(null)"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto filepath = temp_env->get_temp_dir() + "Durations.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({data_type{type_id::DURATION_NANOSECONDS}})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::DURATION_NANOSECONDS);
auto validity = cudf::test::iterators::nulls_at({11});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(0),
wrapper<cudf::duration_ns, cudf::duration_ns::rep>{{-2L,
-1L,
0L,
1L * 60 * 60 * 24 * 1000000000L,
(23 * 60 + 1) * 60 * 1000000000L,
123L,
123000L,
123000L,
100000001L,
-2147483648L,
2147483647L,
0L},
validity});
}
TEST_P(JsonReaderParamTest, JsonLinesDtypeInference)
{
auto const test_opt = GetParam();
std::string row_orient = "[100, 1.1, \"aa \"]\n[200, 2.2, \" bbb\"]";
std::string record_orient = to_records_orient({{{"0", "100"}, {"1", "1.1"}, {"2", R"("aa ")"}},
{{"0", "200"}, {"1", "2.2"}, {"2", R"(" bbb")"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
EXPECT_EQ(result.metadata.schema_info[2].name, "2");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int64_wrapper{{100, 200}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{1.1, 2.2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2),
cudf::test::strings_column_wrapper({"aa ", " bbb"}));
}
TEST_P(JsonReaderParamTest, JsonLinesFileInput)
{
auto const test_opt = GetParam();
std::string row_orient = "[11, 1.1]\n[22, 2.2]";
std::string record_orient =
to_records_orient({{{"0", "11"}, {"1", "1.1"}}, {{"0", "22"}, {"1", "2.2"}}}, "\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
const std::string fname = temp_env->get_temp_dir() + "JsonLinesFileTest.json";
std::ofstream outfile(fname, std::ofstream::out);
outfile << data;
outfile.close();
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{fname})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int64_wrapper{{11, 22}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{1.1, 2.2}, validity});
}
TEST_F(JsonReaderTest, JsonLinesByteRange)
{
const std::string fname = temp_env->get_temp_dir() + "JsonLinesByteRangeTest.json";
std::ofstream outfile(fname, std::ofstream::out);
outfile << "[1000]\n[2000]\n[3000]\n[4000]\n[5000]\n[6000]\n[7000]\n[8000]\n[9000]\n";
outfile.close();
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{fname})
.lines(true)
.legacy(true) // Support in new reader coming in https://github.com/rapidsai/cudf/pull/12498
.byte_range_offset(11)
.byte_range_size(20);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->num_rows(), 3);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
int64_wrapper{{3000, 4000, 5000}, validity});
}
TEST_P(JsonReaderDualTest, JsonLinesObjects)
{
auto const test_opt = GetParam();
const std::string fname = temp_env->get_temp_dir() + "JsonLinesObjectsTest.json";
std::ofstream outfile(fname, std::ofstream::out);
outfile << " {\"co\\\"l1\" : 1, \"col2\" : 2.0} \n";
outfile.close();
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{fname})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.metadata.schema_info[0].name, is_legacy_test(test_opt) ? "co\\\"l1" : "co\"l1");
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[1].name, "col2");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int64_wrapper{{1}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{2.0}, validity});
}
TEST_P(JsonReaderDualTest, JsonLinesObjectsStrings)
{
auto const test_opt = GetParam();
auto test_json_objects = [test_opt](std::string const& data) {
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.metadata.schema_info[0].name, "col1");
EXPECT_EQ(result.metadata.schema_info[1].name, "col2");
EXPECT_EQ(result.metadata.schema_info[2].name, "col3");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int64_wrapper{{100, 200}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
float64_wrapper{{1.1, 2.2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2),
cudf::test::strings_column_wrapper({"aaa", "bbb"}));
};
// simple case
test_json_objects(
"{\"col1\":100, \"col2\":1.1, \"col3\":\"aaa\"}\n"
"{\"col1\":200, \"col2\":2.2, \"col3\":\"bbb\"}\n");
// out of order fields
test_json_objects(
"{\"col1\":100, \"col2\":1.1, \"col3\":\"aaa\"}\n"
"{\"col3\":\"bbb\", \"col1\":200, \"col2\":2.2}\n");
}
TEST_P(JsonReaderDualTest, JsonLinesObjectsMissingData)
{
auto const test_opt = GetParam();
// Note: columns will be ordered based on which fields appear first
std::string const data =
"{ \"col2\":1.1, \"col3\":\"aaa\"}\n"
"{\"col1\":200, \"col3\":\"bbb\"}\n";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "col2");
EXPECT_EQ(result.metadata.schema_info[1].name, "col3");
EXPECT_EQ(result.metadata.schema_info[2].name, "col1");
auto col1_validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i != 0; });
auto col2_validity =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i == 0; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2), int64_wrapper{{0, 200}, col1_validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
float64_wrapper{{1.1, 0.}, col2_validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
cudf::test::strings_column_wrapper({"aaa", "bbb"}));
}
TEST_P(JsonReaderDualTest, JsonLinesObjectsOutOfOrder)
{
auto const test_opt = GetParam();
std::string const data =
"{\"col1\":100, \"col2\":1.1, \"col3\":\"aaa\"}\n"
"{\"col3\":\"bbb\", \"col1\":200, \"col2\":2.2}\n";
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "col1");
EXPECT_EQ(result.metadata.schema_info[1].name, "col2");
EXPECT_EQ(result.metadata.schema_info[2].name, "col3");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int64_wrapper{{100, 200}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{1.1, 2.2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2),
cudf::test::strings_column_wrapper({"aaa", "bbb"}));
}
TEST_F(JsonReaderTest, EmptyFile)
{
auto filepath = temp_env->get_temp_dir() + "EmptyFile.json";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "";
}
// New reader only - legacy reader is strict about having non-empty input
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath}).lines(true);
auto result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_columns());
}
TEST_F(JsonReaderTest, NoDataFile)
{
auto filepath = temp_env->get_temp_dir() + "NoDataFile.json";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "{}\n";
}
// New reader only - legacy reader is strict about having non-empty input
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath}).lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_columns());
}
// empty input in values orient
TEST_F(JsonReaderTest, NoDataFileValues)
{
auto filepath = temp_env->get_temp_dir() + "NoDataFileValues.csv";
{
std::ofstream outfile{filepath, std::ofstream::out};
outfile << "[]\n";
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath}).lines(true);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
EXPECT_EQ(0, view.num_columns());
}
TEST_F(JsonReaderTest, ArrowFileSource)
{
const std::string fname = temp_env->get_temp_dir() + "ArrowFileSource.csv";
std::ofstream outfile(fname, std::ofstream::out);
outfile << "[9]\n[8]\n[7]\n[6]\n[5]\n[4]\n[3]\n[2]\n";
outfile.close();
std::shared_ptr<arrow::io::ReadableFile> infile;
ASSERT_TRUE(arrow::io::ReadableFile::Open(fname).Value(&infile).ok());
auto arrow_source = cudf::io::arrow_io_source{infile};
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{&arrow_source})
.dtypes({dtype<int8_t>()})
.lines(true)
.legacy(true); // Support in new reader coming in https://github.com/rapidsai/cudf/pull/12498
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT8);
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
int8_wrapper{{9, 8, 7, 6, 5, 4, 3, 2}, validity});
}
TEST_P(JsonReaderParamTest, InvalidFloatingPoint)
{
auto const test_opt = GetParam();
std::string row_orient = "[1.2e1+]\n[3.4e2-]\n[5.6e3e]\n[7.8e3A]\n[9.0Be1]\n[1C.2]";
std::string record_orient = to_records_orient({{{"0", "1.2e1+"}},
{{"0", "3.4e2-"}},
{{"0", "5.6e3e"}},
{{"0", "7.8e3A"}},
{{"0", "9.0Be1"}},
{{"0", "1C.2"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto const filepath = temp_env->get_temp_dir() + "InvalidFloatingPoint.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.dtypes({dtype<float>()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::FLOAT32);
// ignore all data because it is all nulls.
ASSERT_EQ(6u, result.tbl->view().column(0).null_count());
}
TEST_P(JsonReaderParamTest, StringInference)
{
auto const test_opt = GetParam();
std::string row_orient = "[\"-1\"]";
std::string record_orient = to_records_orient({{{"0", R"("-1")"}}}, "\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.c_str(), data.size()})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::STRING);
}
TEST_P(JsonReaderParamTest, ParseInRangeIntegers)
{
auto const test_opt = GetParam();
bool const row_orient = is_row_orient_test(test_opt);
constexpr auto num_rows = 4;
std::vector<int64_t> small_int = {0, -10, 20, -30};
std::vector<int64_t> less_equal_int64_max = {std::numeric_limits<int64_t>::max() - 3,
std::numeric_limits<int64_t>::max() - 2,
std::numeric_limits<int64_t>::max() - 1,
std::numeric_limits<int64_t>::max()};
std::vector<int64_t> greater_equal_int64_min = {std::numeric_limits<int64_t>::min() + 3,
std::numeric_limits<int64_t>::min() + 2,
std::numeric_limits<int64_t>::min() + 1,
std::numeric_limits<int64_t>::min()};
std::vector<uint64_t> greater_int64_max = {uint64_t{std::numeric_limits<int64_t>::max()} - 1,
uint64_t{std::numeric_limits<int64_t>::max()},
uint64_t{std::numeric_limits<int64_t>::max()} + 1,
uint64_t{std::numeric_limits<int64_t>::max()} + 2};
std::vector<uint64_t> less_equal_uint64_max = {std::numeric_limits<uint64_t>::max() - 3,
std::numeric_limits<uint64_t>::max() - 2,
std::numeric_limits<uint64_t>::max() - 1,
std::numeric_limits<uint64_t>::max()};
auto input_small_int = column_wrapper<int64_t>(small_int.begin(), small_int.end());
auto input_less_equal_int64_max =
column_wrapper<int64_t>(less_equal_int64_max.begin(), less_equal_int64_max.end());
auto input_greater_equal_int64_min =
column_wrapper<int64_t>(greater_equal_int64_min.begin(), greater_equal_int64_min.end());
auto input_greater_int64_max =
column_wrapper<uint64_t>(greater_int64_max.begin(), greater_int64_max.end());
auto input_less_equal_uint64_max =
column_wrapper<uint64_t>(less_equal_uint64_max.begin(), less_equal_uint64_max.end());
auto small_int_append_zeros = prepend_zeros(small_int, 32, true);
auto less_equal_int64_max_append_zeros = prepend_zeros(less_equal_int64_max, 32, true);
auto greater_equal_int64_min_append_zeros = prepend_zeros(greater_equal_int64_min, 17);
auto greater_int64_max_append_zeros = prepend_zeros(greater_int64_max, 5);
auto less_equal_uint64_max_append_zeros = prepend_zeros(less_equal_uint64_max, 8, true);
auto filepath = temp_env->get_temp_dir() + "ParseInRangeIntegers.json";
{
std::ostringstream line;
if (row_orient) {
for (int i = 0; i < num_rows; ++i) {
line << "[" << small_int[i] << "," << less_equal_int64_max[i] << ","
<< greater_equal_int64_min[i] << "," << greater_int64_max[i] << ","
<< less_equal_uint64_max[i] << "," << small_int_append_zeros[i] << ","
<< less_equal_int64_max_append_zeros[i] << ","
<< greater_equal_int64_min_append_zeros[i] << "," << greater_int64_max_append_zeros[i]
<< "," << less_equal_uint64_max_append_zeros[i] << "]\n";
}
} else {
std::vector<std::map<std::string, std::string>> records;
for (int i = 0; i < num_rows; ++i) {
records.push_back({
{"0", std::to_string(small_int[i])}, //
{"1", std::to_string(less_equal_int64_max[i])}, //
{"2", std::to_string(greater_equal_int64_min[i])}, //
{"3", std::to_string(greater_int64_max[i])}, //
{"4", std::to_string(less_equal_uint64_max[i])}, //
{"5", small_int_append_zeros[i]}, //
{"6", less_equal_int64_max_append_zeros[i]}, //
{"7", greater_equal_int64_min_append_zeros[i]}, //
{"8", greater_int64_max_append_zeros[i]}, //
{"9", less_equal_uint64_max_append_zeros[i]}, //
});
}
line << to_records_orient(records, "\n");
}
std::ofstream outfile(filepath, std::ofstream::out);
outfile << line.str();
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_small_int, view.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_int64_max, view.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_equal_int64_min, view.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_int64_max, view.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_uint64_max, view.column(4));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_small_int, view.column(5));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_int64_max, view.column(6));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_equal_int64_min, view.column(7));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_int64_max, view.column(8));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_equal_uint64_max, view.column(9));
}
TEST_P(JsonReaderParamTest, ParseOutOfRangeIntegers)
{
auto const test_opt = GetParam();
bool const row_orient = is_row_orient_test(test_opt);
constexpr auto num_rows = 4;
std::vector<std::string> out_of_range_positive = {"111111111111111111111",
"2222222222222222222222",
"33333333333333333333333",
"444444444444444444444444"};
std::vector<std::string> out_of_range_negative = {"-111111111111111111111",
"-2222222222222222222222",
"-33333333333333333333333",
"-444444444444444444444444"};
std::vector<std::string> greater_uint64_max = {
"18446744073709551615", "18446744073709551616", "18446744073709551617", "18446744073709551618"};
std::vector<std::string> less_int64_min = {
"-9223372036854775807", "-9223372036854775808", "-9223372036854775809", "-9223372036854775810"};
std::vector<std::string> mixed_range = {
"18446744073709551613", "18446744073709551614", "18446744073709551615", "-5"};
auto input_out_of_range_positive =
column_wrapper<cudf::string_view>(out_of_range_positive.begin(), out_of_range_positive.end());
auto input_out_of_range_negative =
column_wrapper<cudf::string_view>(out_of_range_negative.begin(), out_of_range_negative.end());
auto input_greater_uint64_max =
column_wrapper<cudf::string_view>(greater_uint64_max.begin(), greater_uint64_max.end());
auto input_less_int64_min =
column_wrapper<cudf::string_view>(less_int64_min.begin(), less_int64_min.end());
auto input_mixed_range =
column_wrapper<cudf::string_view>(mixed_range.begin(), mixed_range.end());
auto out_of_range_positive_append_zeros = prepend_zeros(out_of_range_positive, 32, true);
auto out_of_range_negative_append_zeros = prepend_zeros(out_of_range_negative, 5);
auto greater_uint64_max_append_zeros = prepend_zeros(greater_uint64_max, 8, true);
auto less_int64_min_append_zeros = prepend_zeros(less_int64_min, 17);
auto mixed_range_append_zeros = prepend_zeros(mixed_range, 2, true);
auto input_out_of_range_positive_append = column_wrapper<cudf::string_view>(
out_of_range_positive_append_zeros.begin(), out_of_range_positive_append_zeros.end());
auto input_out_of_range_negative_append = column_wrapper<cudf::string_view>(
out_of_range_negative_append_zeros.begin(), out_of_range_negative_append_zeros.end());
auto input_greater_uint64_max_append = column_wrapper<cudf::string_view>(
greater_uint64_max_append_zeros.begin(), greater_uint64_max_append_zeros.end());
auto input_less_int64_min_append = column_wrapper<cudf::string_view>(
less_int64_min_append_zeros.begin(), less_int64_min_append_zeros.end());
auto input_mixed_range_append = column_wrapper<cudf::string_view>(
mixed_range_append_zeros.begin(), mixed_range_append_zeros.end());
auto filepath = temp_env->get_temp_dir() + "ParseOutOfRangeIntegers.json";
{
std::ostringstream line;
if (row_orient) {
for (int i = 0; i < num_rows; ++i) {
line << "[" << out_of_range_positive[i] << "," << out_of_range_negative[i] << ","
<< greater_uint64_max[i] << "," << less_int64_min[i] << "," << mixed_range[i] << ","
<< out_of_range_positive_append_zeros[i] << ","
<< out_of_range_negative_append_zeros[i] << "," << greater_uint64_max_append_zeros[i]
<< "," << less_int64_min_append_zeros[i] << "," << mixed_range_append_zeros[i]
<< "]\n";
}
} else {
std::vector<std::map<std::string, std::string>> records;
for (int i = 0; i < num_rows; ++i) {
records.push_back({
{"0", out_of_range_positive[i]}, //
{"1", out_of_range_negative[i]}, //
{"2", greater_uint64_max[i]}, //
{"3", less_int64_min[i]}, //
{"4", mixed_range[i]}, //
{"5", out_of_range_positive_append_zeros[i]}, //
{"6", out_of_range_negative_append_zeros[i]}, //
{"7", greater_uint64_max_append_zeros[i]}, //
{"8", less_int64_min_append_zeros[i]}, //
{"9", mixed_range_append_zeros[i]}, //
});
}
line << to_records_orient(records, "\n");
}
std::ofstream outfile(filepath, std::ofstream::out);
outfile << line.str();
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
auto const view = result.tbl->view();
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_positive, view.column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_negative, view.column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_uint64_max, view.column(2));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_int64_min, view.column(3));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_mixed_range, view.column(4));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_positive_append, view.column(5));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_out_of_range_negative_append, view.column(6));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_greater_uint64_max_append, view.column(7));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_less_int64_min_append, view.column(8));
CUDF_TEST_EXPECT_COLUMNS_EQUIVALENT(input_mixed_range_append, view.column(9));
}
TEST_P(JsonReaderParamTest, JsonLinesMultipleFileInputs)
{
auto const test_opt = GetParam();
std::vector<std::string> row_orient{"[11, 1.1]\n[22, 2.2]\n", "[33, 3.3]\n[44, 4.4]"};
std::vector<std::string> record_orient{
to_records_orient({{{"0", "11"}, {"1", "1.1"}}, {{"0", "22"}, {"1", "2.2"}}}, "\n") + "\n",
to_records_orient({{{"0", "33"}, {"1", "3.3"}}, {{"0", "44"}, {"1", "4.4"}}}, "\n") + "\n"};
auto const& data = is_row_orient_test(test_opt) ? row_orient : record_orient;
const std::string file1 = temp_env->get_temp_dir() + "JsonLinesFileTest1.json";
std::ofstream outfile(file1, std::ofstream::out);
outfile << data[0];
outfile.close();
const std::string file2 = temp_env->get_temp_dir() + "JsonLinesFileTest2.json";
std::ofstream outfile2(file2, std::ofstream::out);
outfile2 << data[1];
outfile2.close();
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{{file1, file2}})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 4);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
int64_wrapper{{11, 22, 33, 44}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
float64_wrapper{{1.1, 2.2, 3.3, 4.4}, validity});
}
TEST_P(JsonReaderNoLegacy, JsonLinesMultipleFileInputsNoNL)
{
auto const test_opt = GetParam();
// Strings for the two separate input files in row-orient that do not end with a newline
std::vector<std::string> row_orient{"[11, 1.1]\n[22, 2.2]", "[33, 3.3]\n[44, 4.4]"};
// Strings for the two separate input files in record-orient that do not end with a newline
std::vector<std::string> record_orient{
to_records_orient({{{"0", "11"}, {"1", "1.1"}}, {{"0", "22"}, {"1", "2.2"}}}, "\n"),
to_records_orient({{{"0", "33"}, {"1", "3.3"}}, {{"0", "44"}, {"1", "4.4"}}}, "\n")};
auto const& data = is_row_orient_test(test_opt) ? row_orient : record_orient;
const std::string file1 = temp_env->get_temp_dir() + "JsonLinesFileTest1.json";
std::ofstream outfile(file1, std::ofstream::out);
outfile << data[0];
outfile.close();
const std::string file2 = temp_env->get_temp_dir() + "JsonLinesFileTest2.json";
std::ofstream outfile2(file2, std::ofstream::out);
outfile2 << data[1];
outfile2.close();
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{{file1, file2}})
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 4);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
int64_wrapper{{11, 22, 33, 44}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
float64_wrapper{{1.1, 2.2, 3.3, 4.4}, validity});
}
TEST_F(JsonReaderTest, BadDtypeParams)
{
std::string buffer = "[1,2,3,4]";
cudf::io::json_reader_options options_vec =
cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.lines(true)
.dtypes({dtype<int8_t>()})
.legacy(true);
// should throw because there are four columns and only one dtype
EXPECT_THROW(cudf::io::read_json(options_vec), cudf::logic_error);
cudf::io::json_reader_options options_map =
cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.lines(true)
.legacy(true)
.dtypes(std::map<std::string, cudf::data_type>{{"0", dtype<int8_t>()},
{"1", dtype<int8_t>()},
{"2", dtype<int8_t>()},
{"wrong_name", dtype<int8_t>()}});
// should throw because one of the columns is not in the dtype map
EXPECT_THROW(cudf::io::read_json(options_map), cudf::logic_error);
}
TEST_F(JsonReaderTest, JsonExperimentalBasic)
{
std::string const fname = temp_env->get_temp_dir() + "JsonExperimentalBasic.json";
std::ofstream outfile(fname, std::ofstream::out);
outfile << R"([{"a":"11", "b":"1.1"},{"a":"22", "b":"2.2"}])";
outfile.close();
cudf::io::json_reader_options options =
cudf::io::json_reader_options::builder(cudf::io::source_info{fname});
auto result = cudf::io::read_json(options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.metadata.schema_info[0].name, "a");
EXPECT_EQ(result.metadata.schema_info[1].name, "b");
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
cudf::test::strings_column_wrapper({"11", "22"}));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
cudf::test::strings_column_wrapper({"1.1", "2.2"}));
}
TEST_F(JsonReaderTest, JsonExperimentalLines)
{
std::string const json_string =
R"({"a":"a0"}
{"a":"a1"}
{"a":"a2", "b":"b2"}
{"a":"a3", "c":"c3"}
{"a":"a4"})";
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.lines(true);
// Read test data via nested JSON reader
auto const table = cudf::io::read_json(json_lines_options);
// Read test data via legacy, non-nested JSON lines reader
json_lines_options.enable_legacy(true);
auto const legacy_reader_table = cudf::io::read_json(json_lines_options);
// Verify that the data read via non-nested JSON lines reader matches the data read via nested
// JSON reader
CUDF_TEST_EXPECT_TABLES_EQUAL(legacy_reader_table.tbl->view(), table.tbl->view());
}
TEST_F(JsonReaderTest, JsonLongString)
{
// Unicode
// 0000-FFFF Basic Multilingual Plane
// 10000-10FFFF Supplementary Plane
cudf::test::strings_column_wrapper col1{
{
"\"\\/\b\f\n\r\t",
"\"",
"\\",
"/",
"\b",
"\f\n",
"\r\t",
"$€",
"ராபிட்ஸ்",
"C𝞵𝓓𝒻",
"", // null
"", // null
"கார்த்தி",
"CႮ≪ㇳ䍏凹沦王辿龸ꁗ믜스폶ﴠ", // 0000-FFFF
"𐀀𑿪𒐦𓃰𔙆 𖦆𗿿𘳕𚿾[↳] 𜽆𝓚𞤁🄰", // 10000-1FFFF
"𠘨𡥌𢗉𣇊𤊩𥅽𦉱𧴱𨁲𩁹𪐢𫇭𬬭𭺷𮊦屮", // 20000-2FFFF
"𰾑𱔈𲍉", // 30000-3FFFF
R"("$€ \u0024\u20ac \\u0024\\u20ac \\\u0024\\\u20ac \\\\u0024\\\\u20ac)",
R"( \\\\\\\\\\\\\\\\)",
R"(\\\\\\\\\\\\\\\\)",
R"(\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)",
R"( \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)",
R"( \\abcd)",
R"( \\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\)",
R"( \\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\)",
},
cudf::test::iterators::nulls_at({10, 11})};
cudf::test::fixed_width_column_wrapper<int16_t> repeat_times{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 19, 37, 81, 161, 323, 631, 1279, 10, 1, 2, 1, 100, 1000, 1, 3},
cudf::test::iterators::no_nulls()};
auto d_col2 = cudf::strings::repeat_strings(cudf::strings_column_view{col1}, repeat_times);
auto col2 = d_col2->view();
cudf::table_view const tbl_view{{col1, col2, repeat_times}};
cudf::io::table_metadata mt{{{"col1"}, {"col2"}, {"int16"}}};
std::vector<char> out_buffer;
auto destination = cudf::io::sink_info(&out_buffer);
auto options_builder = cudf::io::json_writer_options_builder(destination, tbl_view)
.include_nulls(true)
.metadata(mt)
.lines(true)
.na_rep("null");
cudf::io::write_json(options_builder.build(),
cudf::test::get_default_stream(),
rmm::mr::get_current_device_resource());
cudf::table_view const expected = tbl_view;
std::map<std::string, data_type> types;
types["col1"] = data_type{type_id::STRING};
types["col2"] = data_type{type_id::STRING};
types["int16"] = data_type{type_id::INT16};
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{out_buffer.data(), out_buffer.size()})
.lines(true)
.dtypes(types);
// Read test data via nested JSON reader
auto const table = cudf::io::read_json(json_lines_options);
CUDF_TEST_EXPECT_TABLES_EQUAL(expected, table.tbl->view());
}
TEST_F(JsonReaderTest, ErrorStrings)
{
// cases of invalid escape characters, invalid unicode encodings.
// Error strings will decode to nulls
auto const buffer = std::string{R"(
{"col0": "\"\a"}
{"col0": "\u"}
{"col0": "\u0"}
{"col0": "\u0b"}
{"col0": "\u00b"}
{"col0": "\u00bz"}
{"col0": "\t34567890123456\t9012345678901\ug0bc"}
{"col0": "\t34567890123456\t90123456789012\u0hbc"}
{"col0": "\t34567890123456\t90123456789012\u00ic"}
{"col0": "\u0b95\u0bbe\u0bb0\u0bcd\u0ba4\u0bcd\u0ba4\u0bbfகார்த்தி"}
)"};
// Last one is not an error case, but shows that unicode in json is copied string column output.
cudf::io::json_reader_options const in_opts =
cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.dtypes({data_type{cudf::type_id::STRING}})
.lines(true)
.legacy(false);
auto const result = cudf::io::read_json(in_opts);
auto const result_view = result.tbl->view().column(0);
EXPECT_EQ(result.metadata.schema_info[0].name, "col0");
EXPECT_EQ(result_view.null_count(), 9);
cudf::test::strings_column_wrapper expected{
{"",
"",
"",
"",
"",
"",
"",
"",
"",
"கார்த்தி\xe0\xae\x95\xe0\xae\xbe\xe0\xae\xb0\xe0\xaf\x8d\xe0\xae\xa4\xe0\xaf\x8d\xe0\xae\xa4"
"\xe0\xae\xbf"},
// unicode hex 0xe0 0xae 0x95 0xe0 0xae 0xbe 0xe0 0xae 0xb0 0xe0 0xaf 0x8d
// 0xe0 0xae 0xa4 0xe0 0xaf 0x8d 0xe0 0xae 0xa4 0xe0 0xae 0xbf
cudf::test::iterators::nulls_at({0, 1, 2, 3, 4, 5, 6, 7, 8})};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result_view, expected);
}
TEST_F(JsonReaderTest, TokenAllocation)
{
std::array<std::string const, 3> const json_inputs{
R"({"":1})",
"{}\n{}\n{}",
R"({"":{"":{"":{"":{"":{"":{"":{"":{"":{"":{"":{"":1}}}}}}}}}}}})",
};
for (auto const& json_string : json_inputs) {
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.lines(true);
EXPECT_NO_THROW(cudf::io::read_json(json_lines_options));
}
}
TEST_F(JsonReaderTest, ExperimentalLinesNoOmissions)
{
std::array<std::string const, 4> const json_inputs
// single column
{R"({"a":"a0"}
{"a":"a1"}
{"a":"a2"}
{"a":"a3"}
{"a":"a4"})",
// single column, single row
R"({"a":"a0"})",
// single row
R"({"a":"a0", "b":"b0"})",
// two column, two rows
R"({"a":"a0", "b":"b0"}
{"a":"a1", "b":"b1"})"};
for (auto const& json_string : json_inputs) {
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.lines(true);
// Read test data via nested JSON reader
auto const table = cudf::io::read_json(json_lines_options);
// Read test data via legacy, non-nested JSON lines reader
json_lines_options.enable_legacy(true);
auto const legacy_reader_table = cudf::io::read_json(json_lines_options);
// Verify that the data read via non-nested JSON lines reader matches the data read via
// nested JSON reader
CUDF_TEST_EXPECT_TABLES_EQUAL(legacy_reader_table.tbl->view(), table.tbl->view());
}
}
TEST_F(JsonReaderTest, TestColumnOrder)
{
std::string const json_string =
// Expected order:
// root: b, c, a, d
// a: 2, 0, 1
{R"({"b":"b0"}
{"c":"c1","a":{"2":null}}
{"d":"d2","a":{"0":"a2.0", "2":"a2.2"}}
{"b":"b3","a":{"1":null, "2":"a3.2"}})"};
std::vector<std::string> const root_col_names{"b", "c", "a", "d"};
std::vector<std::string> const a_child_col_names{"2", "0", "1"};
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.lines(true);
// Read in data using nested JSON reader
cudf::io::table_with_metadata new_reader_table = cudf::io::read_json(json_lines_options);
// Verify root column order (assert to avoid OOB access)
ASSERT_EQ(new_reader_table.metadata.schema_info.size(), root_col_names.size());
for (std::size_t i = 0; i < a_child_col_names.size(); i++) {
auto const& root_col_name = root_col_names[i];
EXPECT_EQ(new_reader_table.metadata.schema_info[i].name, root_col_name);
}
// Verify nested child column order (assert to avoid OOB access)
ASSERT_EQ(new_reader_table.metadata.schema_info[2].children.size(), a_child_col_names.size());
for (std::size_t i = 0; i < a_child_col_names.size(); i++) {
auto const& a_child_col_name = a_child_col_names[i];
EXPECT_EQ(new_reader_table.metadata.schema_info[2].children[i].name, a_child_col_name);
}
// Verify data of root columns
ASSERT_EQ(root_col_names.size(), new_reader_table.tbl->num_columns());
column_wrapper<cudf::string_view> root_col_data_b{{"b0", "", "", "b3"},
{true, false, false, true}};
column_wrapper<cudf::string_view> root_col_data_c{{"", "c1", "", ""},
{false, true, false, false}};
column_wrapper<cudf::string_view> root_col_data_d{{"", "", "d2", ""},
{false, false, true, false}};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(root_col_data_b, new_reader_table.tbl->get_column(0));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(root_col_data_c, new_reader_table.tbl->get_column(1));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(root_col_data_d, new_reader_table.tbl->get_column(3));
// Verify data of child columns of column 'a'
auto const col_a = new_reader_table.tbl->get_column(2);
ASSERT_EQ(a_child_col_names.size(), col_a.num_children());
column_wrapper<cudf::string_view> col_a2{{"", "", "a2.2", "a3.2"}, {false, false, true, true}};
column_wrapper<cudf::string_view> col_a0{{"", "", "a2.0", ""}, {false, false, true, false}};
// col a.1 is inferred as all-null
int8_wrapper col_a1{{0, 0, 0, 0}, {false, false, false, false}};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(col_a2, col_a.child(0));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(col_a0, col_a.child(1));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(col_a1, col_a.child(2));
}
TEST_P(JsonReaderParamTest, JsonDtypeSchema)
{
auto const test_opt = GetParam();
std::string row_orient = "[1, 1.1, \"aa \"]\n[2, 2.2, \" bbb\"]";
std::string record_orient = to_records_orient({{{"0", "1"}, {"1", "1.1"}, {"2", R"("aa ")"}},
{{"0", "2"}, {"1", "2.2"}, {"2", R"(" bbb")"}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
std::map<std::string, cudf::io::schema_element> dtype_schema{
{"2", {dtype<cudf::string_view>()}}, {"0", {dtype<int32_t>()}}, {"1", {dtype<double>()}}};
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.dtypes(dtype_schema)
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 2);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT32);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::STRING);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.metadata.schema_info[1].name, "1");
EXPECT_EQ(result.metadata.schema_info[2].name, "2");
auto validity = cudf::detail::make_counting_transform_iterator(0, [](auto i) { return true; });
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), int_wrapper{{1, 2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1), float64_wrapper{{1.1, 2.2}, validity});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(2),
cudf::test::strings_column_wrapper({"aa ", " bbb"}));
}
TEST_F(JsonReaderTest, JsonNestedDtypeSchema)
{
std::string json_string = R"( [{"a":[123, {"0": 123}], "b":1.0}, {"b":1.1}, {"b":2.1}])";
std::map<std::string, cudf::io::schema_element> dtype_schema{
{"a",
{
data_type{cudf::type_id::LIST},
{{"element", {data_type{cudf::type_id::STRUCT}, {{"0", {dtype<float>()}}}}}},
}},
{"b", {dtype<int32_t>()}},
};
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.data(), json_string.size()})
.dtypes(dtype_schema)
.lines(false);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
// Make sure we have columns "a" and "b"
ASSERT_EQ(result.tbl->num_columns(), 2);
ASSERT_EQ(result.metadata.schema_info.size(), 2);
EXPECT_EQ(result.metadata.schema_info[0].name, "a");
EXPECT_EQ(result.metadata.schema_info[1].name, "b");
// Make sure column "a" is a list column (offsets and elements)
ASSERT_EQ(result.tbl->get_column(0).num_children(), 2);
ASSERT_EQ(result.metadata.schema_info[0].children.size(), 2);
// Make sure column "b" is a leaf column
ASSERT_EQ(result.tbl->get_column(1).num_children(), 0);
ASSERT_EQ(result.metadata.schema_info[1].children.size(), 0);
// Offsets child with no other child columns
ASSERT_EQ(result.tbl->get_column(0).child(0).num_children(), 0);
ASSERT_EQ(result.metadata.schema_info[0].children[0].children.size(), 0);
EXPECT_EQ(result.metadata.schema_info[0].children[0].name, "offsets");
// Elements is the struct column with a single child column "0"
ASSERT_EQ(result.tbl->get_column(0).child(1).num_children(), 1);
ASSERT_EQ(result.metadata.schema_info[0].children[1].children.size(), 1);
EXPECT_EQ(result.metadata.schema_info[0].children[1].name, "element");
// Verify column "a" being a list column
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::LIST);
// Verify column "a->element->0" is a float column
EXPECT_EQ(result.tbl->get_column(0).child(1).child(0).type().id(), cudf::type_id::FLOAT32);
// Verify column "b" is an int column
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::INT32);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0).child(0), int_wrapper{{0, 2, 2, 2}});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0).child(1).child(0),
float_wrapper{{0.0, 123.0}, {false, true}});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(1),
int_wrapper{{1, 1, 2}, {true, true, true}});
// List column expected
auto leaf_child = float_wrapper{{0.0, 123.0}, {false, true}};
auto const validity = {1, 0, 0};
auto [null_mask, null_count] =
cudf::test::detail::make_null_mask(validity.begin(), validity.end());
auto expected = cudf::make_lists_column(
3,
int_wrapper{{0, 2, 2, 2}}.release(),
cudf::test::structs_column_wrapper{{leaf_child}, {false, true}}.release(),
null_count,
std::move(null_mask));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), *expected);
}
TEST_P(JsonReaderParamTest, JsonDtypeParsing)
{
auto const test_opt = GetParam();
// All corner cases of dtype parsing
// 0, "0", " 0", 1, "1", " 1", "a", "z", null, true, false, "null", "true", "false", nan, "nan"
// Test for dtypes: bool, int, float, str, duration, timestamp
std::string row_orient =
"[0]\n[\"0\"]\n[\" 0\"]\n[1]\n[\"1\"]\n[\" 1\"]\n[\"a\"]\n[\"z\"]\n"
"[null]\n[true]\n[false]\n[\"null\"]\n[\"true\"]\n[\"false\"]\n[nan]\n[\"nan\"]\n";
std::string record_orient = to_records_orient({{{"0", "0"}},
{{"0", "\"0\""}},
{{"0", "\" 0\""}},
{{"0", "1"}},
{{"0", "\"1\""}},
{{"0", "\" 1\""}},
{{"0", "\"a\""}},
{{"0", "\"z\""}},
{{"0", "null"}},
{{"0", "true"}},
{{"0", "false"}},
{{"0", "\"null\""}},
{{"0", "\"true\""}},
{{"0", "\"false\""}},
{{"0", "nan"}},
{{"0", "\"nan\""}}},
"\n");
std::string data = is_row_orient_test(test_opt) ? row_orient : record_orient;
auto make_validity = [](std::vector<int> const& validity) {
return cudf::detail::make_counting_transform_iterator(
0, [&](auto i) -> bool { return static_cast<bool>(validity[i]); });
};
constexpr int int_ignore{};
constexpr double double_ignore{};
constexpr bool bool_ignore{};
std::vector<int> const validity = {1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0};
auto int_col = int_wrapper{{0,
0,
int_ignore,
1,
1,
int_ignore,
int_ignore,
int_ignore,
int_ignore,
1,
0,
int_ignore,
1,
0,
int_ignore,
int_ignore},
make_validity(validity)};
auto float_col = float_wrapper{{0.0,
0.0,
double_ignore,
1.0,
1.0,
double_ignore,
double_ignore,
double_ignore,
double_ignore,
1.0,
0.0,
double_ignore,
1.0,
0.0,
double_ignore,
double_ignore},
make_validity(validity)};
auto str_col =
cudf::test::strings_column_wrapper{// clang-format off
{"0", "0", " 0", "1", "1", " 1", "a", "z", "", "true", "false", "null", "true", "false", "nan", "nan"},
cudf::test::iterators::nulls_at(std::vector<int>{8})};
// clang-format on
auto bool_col = bool_wrapper{{false,
false,
bool_ignore,
true,
true,
bool_ignore,
bool_ignore,
bool_ignore,
bool_ignore,
true,
false,
bool_ignore,
true,
false,
bool_ignore,
bool_ignore},
make_validity(validity)};
// Types to test
const std::vector<data_type> dtypes = {
dtype<int32_t>(), dtype<float>(), dtype<cudf::string_view>(), dtype<bool>()};
const std::vector<cudf::column_view> cols{cudf::column_view(int_col),
cudf::column_view(float_col),
cudf::column_view(str_col),
cudf::column_view(bool_col)};
for (size_t col_type = 0; col_type < cols.size(); col_type++) {
std::map<std::string, cudf::io::schema_element> dtype_schema{{"0", {dtypes[col_type]}}};
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{data.data(), data.size()})
.dtypes(dtype_schema)
.lines(true)
.legacy(is_legacy_test(test_opt));
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 1);
EXPECT_EQ(result.tbl->num_rows(), 16);
EXPECT_EQ(result.metadata.schema_info[0].name, "0");
EXPECT_EQ(result.tbl->get_column(0).type().id(), dtypes[col_type].id());
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0), cols[col_type]);
}
}
TYPED_TEST(JsonValidFixedPointReaderTest, SingleColumnNegativeScale)
{
this->run_tests({"1.23", "876e-2", "5.43e1", "-0.12", "0.25", "-0.23", "-0.27", "0.00", "0.00"},
numeric::scale_type{-2});
}
TYPED_TEST(JsonValidFixedPointReaderTest, SingleColumnNoScale)
{
this->run_tests({"123", "-87600e-2", "54.3e1", "-12", "25", "-23", "-27", "0", "0"},
numeric::scale_type{0});
}
TYPED_TEST(JsonValidFixedPointReaderTest, SingleColumnPositiveScale)
{
this->run_tests(
{"123000", "-87600000e-2", "54300e1", "-12000", "25000", "-23000", "-27000", "0000", "0000"},
numeric::scale_type{3});
}
TYPED_TEST(JsonFixedPointReaderTest, EmptyValues)
{
auto const buffer = std::string{"{\"col0\":}"};
cudf::io::json_reader_options const in_opts =
cudf::io::json_reader_options::builder(cudf::io::source_info{buffer.c_str(), buffer.size()})
.dtypes({data_type{type_to_id<TypeParam>(), 0}})
.lines(true)
.legacy(true); // Legacy behavior; not aligned with JSON specs
auto const result = cudf::io::read_json(in_opts);
auto const result_view = result.tbl->view();
ASSERT_EQ(result_view.num_columns(), 1);
EXPECT_EQ(result_view.num_rows(), 1);
EXPECT_EQ(result.metadata.schema_info[0].name, "col0");
EXPECT_EQ(result_view.column(0).null_count(), 1);
}
TEST_F(JsonReaderTest, UnsupportedMultipleFileInputs)
{
std::string const data = "{\"col\":0}";
auto const buffer = cudf::io::host_buffer{data.data(), data.size()};
auto const src = cudf::io::source_info{{buffer, buffer}};
cudf::io::json_reader_options const not_lines_opts = cudf::io::json_reader_options::builder(src);
EXPECT_THROW(cudf::io::read_json(not_lines_opts), cudf::logic_error);
cudf::io::json_reader_options const comp_exp_opts =
cudf::io::json_reader_options::builder(src).compression(cudf::io::compression_type::GZIP);
EXPECT_THROW(cudf::io::read_json(comp_exp_opts), cudf::logic_error);
cudf::io::json_reader_options const comp_opts =
cudf::io::json_reader_options::builder(src).compression(cudf::io::compression_type::GZIP);
EXPECT_THROW(cudf::io::read_json(comp_opts), cudf::logic_error);
}
TEST_F(JsonReaderTest, TrailingCommas)
{
std::vector<std::string> const json_lines_valid{
R"({"a":"a0",}
{"a":"a2", "b":"b2",}
{"a":"a4",})",
R"({"a":"a0"}
{"a":"a2", "b": [1, 2,]})",
R"({"a":"a0",}
{"a":"a2", "b": [1, 2,],})",
};
for (size_t i = 0; i < json_lines_valid.size(); i++) {
auto const& json_string = json_lines_valid[i];
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_parser_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.lines(true);
EXPECT_NO_THROW(cudf::io::read_json(json_parser_options)) << "Failed on test case " << i;
}
std::vector<std::string> const json_valid{
R"([{"a":"a0",}, {"a":"a2", "b":"b2",}, {"a":"a4"},])",
R"([{"a":"a0"}, {"a":"a2", "b": [1, 2,]}])",
R"([{"a":"a0",}, {"a":"a2", "b": [1, 2,],}])",
R"([{"a": 1,}, {"a": null, "b": [null,],}])",
};
for (size_t i = 0; i < json_valid.size(); i++) {
auto const& json_string = json_valid[i];
cudf::io::json_reader_options json_parser_options = cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()});
EXPECT_NO_THROW(cudf::io::read_json(json_parser_options)) << "Failed on test case " << i;
}
std::vector<std::string> const json_invalid{
R"([{"a":"a0",,}])",
R"([{"a":"a0"},,])",
R"([,{"a":"a0"}])",
R"([{,"a":"a0"}])",
R"([{,}])",
R"([,])",
R"([,,])",
R"([{,,}])",
};
for (size_t i = 0; i < json_invalid.size(); i++) {
auto const& json_string = json_invalid[i];
cudf::io::json_reader_options json_parser_options = cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()});
EXPECT_THROW(cudf::io::read_json(json_parser_options), cudf::logic_error)
<< "Failed on test case " << i;
}
}
TEST_F(JsonReaderTest, JSONLinesRecovering)
{
std::string data =
// 0 -> a: -2 (valid)
R"({"a":-2})"
"\n"
// 1 -> (invalid)
R"({"a":])"
"\n"
// 2 -> (invalid)
R"({"b":{"a":[321})"
"\n"
// 3 -> c: 1.2 (valid)
R"({"c":1.2})"
"\n"
"\n"
// 4 -> a: 4 (valid)
R"({"a":4})"
"\n"
// 5 -> (invalid)
R"({"a":5)"
"\n"
// 6 -> (invalid)
R"({"a":6 )"
"\n"
// 7 -> (invalid)
R"({"b":[7 )"
"\n"
// 8 -> a: 8 (valid)
R"({"a":8})"
"\n"
// 9 -> (invalid)
R"({"d":{"unterminated_field_name)"
"\n"
// 10 -> (invalid)
R"({"d":{)"
"\n"
// 11 -> (invalid)
R"({"d":{"123",)"
"\n"
// 12 -> a: 12 (valid)
R"({"a":12})";
auto filepath = temp_env->get_temp_dir() + "RecoveringLines.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.lines(true)
.recovery_mode(cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 2);
EXPECT_EQ(result.tbl->num_rows(), 13);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::FLOAT64);
std::vector<bool> a_validity{
true, false, false, false, true, false, false, false, true, false, false, false, true};
std::vector<bool> c_validity{
false, false, false, true, false, false, false, false, false, false, false, false, false};
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(0),
int64_wrapper{{-2, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 12}, a_validity.cbegin()});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(1),
float64_wrapper{{0.0, 0.0, 0.0, 1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
c_validity.cbegin()});
}
TEST_F(JsonReaderTest, JSONLinesRecoveringIgnoreExcessChars)
{
/**
* @brief Spark has the specific need to ignore extra characters that come after the first record
* on a JSON line
*/
std::string data =
// 0 -> a: -2 (valid)
R"({"a":-2}{})"
"\n"
// 1 -> (invalid)
R"({"b":{}should_be_invalid})"
"\n"
// 2 -> b (valid)
R"({"b":{"a":3} })"
"\n"
// 3 -> c: (valid)
R"({"c":1.2 } )"
"\n"
"\n"
// 4 -> (valid)
R"({"a":4} 123)"
"\n"
// 5 -> (valid)
R"({"a":5}//Comment after record)"
"\n"
// 6 -> (valid)
R"({"a":6} //Comment after whitespace)"
"\n"
// 7 -> (invalid)
R"({"a":5 //Invalid Comment within record})";
auto filepath = temp_env->get_temp_dir() + "RecoveringLinesExcessChars.json";
{
std::ofstream outfile(filepath, std::ofstream::out);
outfile << data;
}
cudf::io::json_reader_options in_options =
cudf::io::json_reader_options::builder(cudf::io::source_info{filepath})
.lines(true)
.recovery_mode(cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL);
cudf::io::table_with_metadata result = cudf::io::read_json(in_options);
EXPECT_EQ(result.tbl->num_columns(), 3);
EXPECT_EQ(result.tbl->num_rows(), 8);
EXPECT_EQ(result.tbl->get_column(0).type().id(), cudf::type_id::INT64);
EXPECT_EQ(result.tbl->get_column(1).type().id(), cudf::type_id::STRUCT);
EXPECT_EQ(result.tbl->get_column(2).type().id(), cudf::type_id::FLOAT64);
std::vector<bool> a_validity{true, false, false, false, true, true, true, false};
std::vector<bool> b_validity{false, false, true, false, false, false, false, false};
std::vector<bool> c_validity{false, false, false, true, false, false, false, false};
// Child column b->a
auto b_a_col = int64_wrapper({0, 0, 3, 0, 0, 0, 0, 0});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(result.tbl->get_column(0),
int64_wrapper{{-2, 0, 0, 0, 4, 5, 6, 0}, a_validity.cbegin()});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(1), cudf::test::structs_column_wrapper({b_a_col}, b_validity.cbegin()));
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
result.tbl->get_column(2),
float64_wrapper{{0.0, 0.0, 0.0, 1.2, 0.0, 0.0, 0.0, 0.0}, c_validity.cbegin()});
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/json_chunked_reader.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/table_utilities.hpp>
#include <io/json/read_json.hpp>
/**
* @brief Base test fixture for JSON reader tests
*/
struct JsonReaderTest : public cudf::test::BaseFixture {};
// function to extract first delimiter in the string in each chunk,
// collate together and form byte_range for each chunk,
// parse separately.
std::vector<cudf::io::table_with_metadata> skeleton_for_parellel_chunk_reader(
cudf::host_span<std::unique_ptr<cudf::io::datasource>> sources,
cudf::io::json_reader_options const& reader_opts,
int32_t chunk_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace cudf::io::json::detail;
using cudf::size_type;
// assuming single source.
size_t total_source_size = 0;
for (auto const& source : sources) {
total_source_size += source->size();
}
size_t num_chunks = (total_source_size + chunk_size - 1) / chunk_size;
constexpr size_type no_min_value = -1;
// Get the first delimiter in each chunk.
std::vector<size_type> first_delimiter_index(num_chunks);
auto reader_opts_chunk = reader_opts;
for (size_t i = 0; i < num_chunks; i++) {
auto const chunk_start = i * chunk_size;
reader_opts_chunk.set_byte_range_offset(chunk_start);
reader_opts_chunk.set_byte_range_size(chunk_size);
first_delimiter_index[i] =
find_first_delimiter_in_chunk(sources, reader_opts_chunk, '\n', stream);
if (first_delimiter_index[i] != no_min_value) { first_delimiter_index[i] += chunk_start; }
}
// Process and allocate record start, end for each worker.
using record_range = std::pair<size_type, size_type>;
std::vector<record_range> record_ranges;
record_ranges.reserve(num_chunks);
first_delimiter_index[0] = 0;
auto prev = first_delimiter_index[0];
for (size_t i = 1; i < num_chunks; i++) {
if (first_delimiter_index[i] == no_min_value) continue;
record_ranges.push_back({prev, first_delimiter_index[i]});
prev = first_delimiter_index[i];
}
record_ranges.push_back({prev, total_source_size});
std::vector<cudf::io::table_with_metadata> tables;
// Process each chunk in parallel.
for (auto const& [chunk_start, chunk_end] : record_ranges) {
if (chunk_start == -1 or chunk_end == -1) continue;
reader_opts_chunk.set_byte_range_offset(chunk_start);
reader_opts_chunk.set_byte_range_size(chunk_end - chunk_start);
tables.push_back(read_json(sources, reader_opts_chunk, stream, mr));
}
// assume all records have same number of columns, and inferred same type. (or schema is passed)
// TODO a step before to merge all columns, types and infer final schema.
return tables;
}
TEST_F(JsonReaderTest, ByteRange)
{
std::string const json_string = R"(
{ "a": { "y" : 6}, "b" : [1, 2, 3], "c": 11 }
{ "a": { "y" : 6}, "b" : [4, 5 ], "c": 12 }
{ "a": { "y" : 6}, "b" : [6 ], "c": 13 }
{ "a": { "y" : 6}, "b" : [7 ], "c": 14 })";
// Initialize parsing options (reading json lines)
cudf::io::json_reader_options json_lines_options =
cudf::io::json_reader_options::builder(
cudf::io::source_info{json_string.c_str(), json_string.size()})
.compression(cudf::io::compression_type::NONE)
.lines(true);
// Read full test data via existing, nested JSON lines reader
cudf::io::table_with_metadata current_reader_table = cudf::io::read_json(json_lines_options);
auto datasources = cudf::io::datasource::create(json_lines_options.get_source().host_buffers());
// Test for different chunk sizes
for (auto chunk_size : {7, 10, 15, 20, 40, 50, 100, 200, 500}) {
auto const tables = skeleton_for_parellel_chunk_reader(datasources,
json_lines_options,
chunk_size,
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
auto table_views = std::vector<cudf::table_view>(tables.size());
std::transform(tables.begin(), tables.end(), table_views.begin(), [](auto& table) {
return table.tbl->view();
});
auto result = cudf::concatenate(table_views);
// Verify that the data read via chunked reader matches the data read via nested JSON reader
// cannot use EQUAL due to concatenate removing null mask
CUDF_TEST_EXPECT_TABLES_EQUIVALENT(current_reader_table.tbl->view(), result->view());
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/tests
|
rapidsai_public_repos/cudf/cpp/tests/io/arrow_io_source_test.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/io/arrow_io_source.hpp>
#include <cudf/io/json.hpp>
#include <cudf/io/parquet.hpp>
#include <arrow/filesystem/filesystem.h>
#include <arrow/filesystem/s3fs.h>
#include <arrow/io/api.h>
#include <arrow/util/config.h>
#include <fstream>
#include <memory>
#include <string>
// Global environment for temporary files
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
// Base test fixture for tests
struct ArrowIOTest : public cudf::test::BaseFixture {};
TEST_F(ArrowIOTest, URIFileSystem)
{
const std::string file_name = temp_env->get_temp_dir() + "JsonLinesFileTest.json";
std::ofstream outfile(file_name, std::ofstream::out);
outfile << "{\"a\":11, \"b\":1.1}\n{\"a\":22, \"b\":2.2}";
outfile.close();
std::string file_uri = "file://" + file_name;
auto datasource = std::make_unique<cudf::io::arrow_io_source>(file_uri);
// Populate the JSON Reader Options
cudf::io::json_reader_options options =
cudf::io::json_reader_options::builder(cudf::io::source_info(datasource.get())).lines(true);
// Read the JSON file from the LocalFileSystem
cudf::io::table_with_metadata tbl = cudf::io::read_json(options);
ASSERT_EQ(2, tbl.tbl->num_columns());
ASSERT_EQ(2, tbl.tbl->num_rows());
}
TEST_F(ArrowIOTest, S3FileSystem)
{
std::string s3_uri = "s3://rapidsai-data/cudf/test/tips.parquet?region=us-east-2";
// Check to see if Arrow was built with support for S3. If not, ensure this
// test throws. If so, validate the S3 file contents.
auto const s3_unsupported = arrow::fs::FileSystemFromUri(s3_uri).status().IsNotImplemented();
if (s3_unsupported) {
EXPECT_THROW(std::make_unique<cudf::io::arrow_io_source>(s3_uri), cudf::logic_error);
} else {
auto datasource = std::make_unique<cudf::io::arrow_io_source>(s3_uri);
// Populate the Parquet Reader Options
cudf::io::source_info src(datasource.get());
std::vector<std::string> single_column;
single_column.insert(single_column.begin(), "total_bill");
cudf::io::parquet_reader_options_builder builder(src);
cudf::io::parquet_reader_options options = builder.columns(single_column).build();
// Read the Parquet file from S3
cudf::io::table_with_metadata tbl = cudf::io::read_parquet(options);
ASSERT_EQ(1, tbl.tbl->num_columns()); // Only single column specified in reader_options
ASSERT_EQ(244, tbl.tbl->num_rows()); // known number of rows from the S3 file
}
#ifdef ARROW_S3
if (!s3_unsupported) {
// Verify that we are using Arrow with S3, and call finalize
// https://github.com/apache/arrow/issues/36974
// This needs to be in a separate conditional to ensure we call
// finalize after all arrow_io_source instances have been deleted.
[[maybe_unused]] auto _ = arrow::fs::EnsureS3Finalized();
}
#endif
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/comp/decomp_test.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/comp/gpuinflate.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <src/io/comp/nvcomp_adapter.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf_test/base_fixture.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <vector>
using cudf::device_span;
/**
* @brief Base test fixture for decompression
*
* Calls into Decompressor fixture to dispatch actual decompression work,
* whose interface and setup is different for each codec.
*/
template <typename Decompressor>
struct DecompressTest : public cudf::test::BaseFixture {
std::vector<uint8_t> vector_from_string(char const* str) const
{
return std::vector<uint8_t>(reinterpret_cast<uint8_t const*>(str),
reinterpret_cast<uint8_t const*>(str) + strlen(str));
}
void Decompress(std::vector<uint8_t>* decompressed,
uint8_t const* compressed,
size_t compressed_size)
{
auto stream = cudf::get_default_stream();
rmm::device_buffer src{compressed, compressed_size, stream};
rmm::device_uvector<uint8_t> dst{decompressed->size(), stream};
cudf::detail::hostdevice_vector<device_span<uint8_t const>> inf_in(1, stream);
inf_in[0] = {static_cast<uint8_t const*>(src.data()), src.size()};
inf_in.host_to_device_async(stream);
cudf::detail::hostdevice_vector<device_span<uint8_t>> inf_out(1, stream);
inf_out[0] = dst;
inf_out.host_to_device_async(stream);
cudf::detail::hostdevice_vector<cudf::io::compression_result> inf_stat(1, stream);
inf_stat[0] = {};
inf_stat.host_to_device_async(stream);
static_cast<Decompressor*>(this)->dispatch(inf_in, inf_out, inf_stat);
CUDF_CUDA_TRY(cudaMemcpyAsync(
decompressed->data(), dst.data(), dst.size(), cudaMemcpyDefault, stream.value()));
inf_stat.device_to_host_sync(stream);
ASSERT_EQ(inf_stat[0].status, cudf::io::compression_status::SUCCESS);
}
};
/**
* @brief Derived fixture for GZIP decompression
*/
struct GzipDecompressTest : public DecompressTest<GzipDecompressTest> {
void dispatch(device_span<device_span<uint8_t const>> d_inf_in,
device_span<device_span<uint8_t>> d_inf_out,
device_span<cudf::io::compression_result> d_inf_stat)
{
cudf::io::gpuinflate(d_inf_in,
d_inf_out,
d_inf_stat,
cudf::io::gzip_header_included::YES,
cudf::get_default_stream());
}
};
/**
* @brief Derived fixture for Snappy decompression
*/
struct SnappyDecompressTest : public DecompressTest<SnappyDecompressTest> {
void dispatch(device_span<device_span<uint8_t const>> d_inf_in,
device_span<device_span<uint8_t>> d_inf_out,
device_span<cudf::io::compression_result> d_inf_stat)
{
cudf::io::gpu_unsnap(d_inf_in, d_inf_out, d_inf_stat, cudf::get_default_stream());
}
};
/**
* @brief Derived fixture for Brotli decompression
*/
struct BrotliDecompressTest : public DecompressTest<BrotliDecompressTest> {
void dispatch(device_span<device_span<uint8_t const>> d_inf_in,
device_span<device_span<uint8_t>> d_inf_out,
device_span<cudf::io::compression_result> d_inf_stat)
{
rmm::device_buffer d_scratch{cudf::io::get_gpu_debrotli_scratch_size(1),
cudf::get_default_stream()};
cudf::io::gpu_debrotli(d_inf_in,
d_inf_out,
d_inf_stat,
d_scratch.data(),
d_scratch.size(),
cudf::get_default_stream());
}
};
struct NvcompConfigTest : public cudf::test::BaseFixture {};
TEST_F(GzipDecompressTest, HelloWorld)
{
constexpr char uncompressed[] = "hello world";
constexpr uint8_t compressed[] = {
0x1f, 0x8b, 0x8, 0x0, 0x9, 0x63, 0x99, 0x5c, 0x2, 0xff, 0xcb, 0x48, 0xcd, 0xc9, 0xc9, 0x57,
0x28, 0xcf, 0x2f, 0xca, 0x49, 0x1, 0x0, 0x85, 0x11, 0x4a, 0xd, 0xb, 0x0, 0x0, 0x0};
std::vector<uint8_t> input = vector_from_string(uncompressed);
std::vector<uint8_t> output(input.size());
Decompress(&output, compressed, sizeof(compressed));
EXPECT_EQ(output, input);
}
TEST_F(SnappyDecompressTest, HelloWorld)
{
constexpr char uncompressed[] = "hello world";
constexpr uint8_t compressed[] = {
0xb, 0x28, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64};
std::vector<uint8_t> input = vector_from_string(uncompressed);
std::vector<uint8_t> output(input.size());
Decompress(&output, compressed, sizeof(compressed));
EXPECT_EQ(output, input);
}
TEST_F(SnappyDecompressTest, ShortLiteralAfterLongCopyAtStartup)
{
constexpr char uncompressed[] = "Aaaaaaaaaaaah!";
constexpr uint8_t compressed[] = {14, 0x0, 'A', 0x0, 'a', (10 - 4) * 4 + 1, 1, 0x4, 'h', '!'};
std::vector<uint8_t> input = vector_from_string(uncompressed);
std::vector<uint8_t> output(input.size());
Decompress(&output, compressed, sizeof(compressed));
EXPECT_EQ(output, input);
}
TEST_F(BrotliDecompressTest, HelloWorld)
{
constexpr char uncompressed[] = "hello world";
constexpr uint8_t compressed[] = {
0xb, 0x5, 0x80, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x3};
std::vector<uint8_t> input = vector_from_string(uncompressed);
std::vector<uint8_t> output(input.size());
Decompress(&output, compressed, sizeof(compressed));
EXPECT_EQ(output, input);
}
TEST_F(NvcompConfigTest, Compression)
{
using cudf::io::nvcomp::compression_type;
auto const& comp_disabled = cudf::io::nvcomp::is_compression_disabled;
EXPECT_FALSE(comp_disabled(compression_type::DEFLATE, {2, 5, 0, true, true, 0}));
// version 2.5 required
EXPECT_TRUE(comp_disabled(compression_type::DEFLATE, {2, 4, 0, true, true, 0}));
// all integrations enabled required
EXPECT_TRUE(comp_disabled(compression_type::DEFLATE, {2, 5, 0, false, true, 0}));
EXPECT_FALSE(comp_disabled(compression_type::ZSTD, {2, 4, 0, true, true, 0}));
EXPECT_FALSE(comp_disabled(compression_type::ZSTD, {2, 4, 0, false, true, 0}));
// 2.4 version required
EXPECT_TRUE(comp_disabled(compression_type::ZSTD, {2, 3, 1, false, true, 0}));
// stable integrations enabled required
EXPECT_TRUE(comp_disabled(compression_type::ZSTD, {2, 4, 0, false, false, 0}));
EXPECT_FALSE(comp_disabled(compression_type::SNAPPY, {2, 5, 0, true, true, 0}));
EXPECT_FALSE(comp_disabled(compression_type::SNAPPY, {2, 4, 0, false, true, 0}));
// stable integrations enabled required
EXPECT_TRUE(comp_disabled(compression_type::SNAPPY, {2, 3, 0, false, false, 0}));
}
TEST_F(NvcompConfigTest, Decompression)
{
using cudf::io::nvcomp::compression_type;
auto const& decomp_disabled = cudf::io::nvcomp::is_decompression_disabled;
EXPECT_FALSE(decomp_disabled(compression_type::DEFLATE, {2, 5, 0, true, true, 7}));
// version 2.5 required
EXPECT_TRUE(decomp_disabled(compression_type::DEFLATE, {2, 4, 0, true, true, 7}));
// all integrations enabled required
EXPECT_TRUE(decomp_disabled(compression_type::DEFLATE, {2, 5, 0, false, true, 7}));
EXPECT_FALSE(decomp_disabled(compression_type::ZSTD, {2, 4, 0, true, true, 7}));
EXPECT_FALSE(decomp_disabled(compression_type::ZSTD, {2, 3, 2, false, true, 6}));
EXPECT_FALSE(decomp_disabled(compression_type::ZSTD, {2, 3, 0, true, true, 6}));
// 2.3.1 and earlier requires all integrations to be enabled
EXPECT_TRUE(decomp_disabled(compression_type::ZSTD, {2, 3, 1, false, true, 7}));
// 2.3 version required
EXPECT_TRUE(decomp_disabled(compression_type::ZSTD, {2, 2, 0, true, true, 7}));
// stable integrations enabled required
EXPECT_TRUE(decomp_disabled(compression_type::ZSTD, {2, 4, 0, false, false, 7}));
// 2.4.0 disabled on Pascal
EXPECT_TRUE(decomp_disabled(compression_type::ZSTD, {2, 4, 0, true, true, 6}));
EXPECT_FALSE(decomp_disabled(compression_type::SNAPPY, {2, 4, 0, true, true, 7}));
EXPECT_FALSE(decomp_disabled(compression_type::SNAPPY, {2, 3, 0, false, true, 7}));
EXPECT_FALSE(decomp_disabled(compression_type::SNAPPY, {2, 2, 0, false, true, 7}));
// stable integrations enabled required
EXPECT_TRUE(decomp_disabled(compression_type::SNAPPY, {2, 2, 0, false, false, 7}));
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/fst/logical_stack_test.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf/types.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <src/io/fst/logical_stack.cuh>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <cstdlib>
#include <iostream>
#include <iterator>
#include <stack>
#include <vector>
namespace {
namespace fst = cudf::io::fst;
/**
* @brief Generates the sparse representation of stack operations to feed into the logical
* stack
*
* @param begin Forward input iterator to the first item of symbols that are checked for whether
* they push or pop
* @param end Forward input iterator to one one past the last item of symbols that are checked for
* whether they push or pop
* @param to_stack_op A function object that takes an instance of InputItT's value type and
* returns the kind of stack operation such item represents (i.e., of type stack_op_type)
* @param stack_symbol_out Forward output iterator to which symbols that either push or pop are
* assigned
* @param stack_op_index_out Forward output iterator to which the indexes of symbols that either
* push or pop are assigned
* @return Pair of iterators to one past the last item of the items written to \p stack_symbol_out
* and \p stack_op_index_out, respectively
*/
template <typename InputItT,
typename ToStackOpTypeT,
typename StackSymbolOutItT,
typename StackOpIndexOutItT>
std::pair<StackSymbolOutItT, StackOpIndexOutItT> to_sparse_stack_symbols(
InputItT begin,
InputItT end,
ToStackOpTypeT to_stack_op,
StackSymbolOutItT stack_symbol_out,
StackOpIndexOutItT stack_op_index_out)
{
std::size_t index = 0;
for (auto it = begin; it < end; it++) {
fst::stack_op_type op_type = to_stack_op(*it);
if (op_type == fst::stack_op_type::PUSH || op_type == fst::stack_op_type::POP) {
*stack_symbol_out = *it;
*stack_op_index_out = index;
stack_symbol_out++;
stack_op_index_out++;
}
index++;
}
return std::make_pair(stack_symbol_out, stack_op_index_out);
}
/**
* @brief Reads in a sequence of items that represent stack operations, applies these operations to
* a stack, and, for every operation being read in, outputs what was the symbol on top of the stack
* before the operations was applied. In case the stack is empty before any operation,
* \p empty_stack will be output instead.
*
* @tparam InputItT Forward input iterator type to items representing stack operations
* @tparam ToStackOpTypeT A transform function object class that maps an item representing a stack
* operation to the stack_op_type of such item
* @tparam StackSymbolT Type representing items being pushed onto the stack
* @tparam TopOfStackOutItT A forward output iterator type being assigned items of StackSymbolT
* @param[in] begin Forward iterator to the beginning of the items representing stack operations
* @param[in] end Iterator to one past the last item representing the stack operation
* @param[in] to_stack_op A function object that takes an instance of InputItT's value type and
* returns the kind of stack operation such item represents (i.e., of type stack_op_type)
* @param[in] empty_stack A symbol that will be written to top_of_stack_out_it whenever the stack
* was empty
* @param[out] top_of_stack The output iterator to which the item will be written to
* @return TopOfStackOutItT Iterators to one past the last element that was written
*/
template <typename InputItT,
typename ToStackOpTypeT,
typename StackSymbolT,
typename TopOfStackOutItT>
TopOfStackOutItT to_top_of_stack(InputItT begin,
InputItT end,
ToStackOpTypeT to_stack_op,
StackSymbolT empty_stack,
TopOfStackOutItT top_of_stack_out_it)
{
// This is the data structure that keeps track of the full stack state for each input symbol
std::stack<StackSymbolT> stack_state;
for (auto it = begin; it < end; it++) {
// Write what is currently on top of the stack when reading in the current symbol
*top_of_stack_out_it = stack_state.empty() ? empty_stack : stack_state.top();
top_of_stack_out_it++;
auto const& current = *it;
fst::stack_op_type op_type = to_stack_op(current);
// Check whether this symbol corresponds to a push or pop operation and modify the stack
// accordingly
if (op_type == fst::stack_op_type::PUSH) {
stack_state.push(current);
} else if (op_type == fst::stack_op_type::POP) {
stack_state.pop();
}
}
return top_of_stack_out_it;
}
/**
* @brief Function object used to filter for brackets and braces that represent push and pop
* operations
*
*/
struct JSONToStackOp {
template <typename StackSymbolT>
constexpr CUDF_HOST_DEVICE fst::stack_op_type operator()(StackSymbolT const& stack_symbol) const
{
return (stack_symbol == '{' || stack_symbol == '[') ? fst::stack_op_type::PUSH
: (stack_symbol == '}' || stack_symbol == ']') ? fst::stack_op_type::POP
: fst::stack_op_type::READ;
}
};
} // namespace
// Base test fixture for tests
struct LogicalStackTest : public cudf::test::BaseFixture {};
TEST_F(LogicalStackTest, GroundTruth)
{
// Type sufficient to cover any stack level (must be a signed type)
using StackLevelT = int8_t;
using SymbolT = char;
using SymbolOffsetT = uint32_t;
// The stack symbol that we'll fill everywhere where there's nothing on the stack
constexpr SymbolT empty_stack_symbol = '_';
// This just has to be a stack symbol that may not be confused with a symbol that would push
constexpr SymbolT read_symbol = 'x';
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
// Test input,
std::string input = R"( {)"
R"(category": "reference",)"
R"("index:" [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} )"
R"({)"
R"("category": "reference",)"
R"("index:" [4,{},null,{"a":[]}],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} {} [] [ ])";
// Repeat input sample 1024x
for (std::size_t i = 0; i < 10; i++)
input += input;
// Input's size
std::size_t string_size = input.size();
// Getting the symbols that actually modify the stack (i.e., symbols that push or pop)
std::string stack_symbols{};
std::vector<SymbolOffsetT> stack_op_indexes;
stack_op_indexes.reserve(string_size);
// Get the sparse representation of stack operations
to_sparse_stack_symbols(std::cbegin(input),
std::cend(input),
JSONToStackOp{},
std::back_inserter(stack_symbols),
std::back_inserter(stack_op_indexes));
rmm::device_uvector<SymbolT> d_stack_ops{stack_symbols.size(), stream_view};
rmm::device_uvector<SymbolOffsetT> d_stack_op_indexes{stack_op_indexes.size(), stream_view};
cudf::detail::hostdevice_vector<SymbolT> top_of_stack_gpu{string_size, stream_view};
cudf::device_span<SymbolOffsetT> d_stack_op_idx_span{d_stack_op_indexes};
CUDF_CUDA_TRY(cudaMemcpyAsync(d_stack_ops.data(),
stack_symbols.data(),
stack_symbols.size() * sizeof(SymbolT),
cudaMemcpyDefault,
stream.value()));
CUDF_CUDA_TRY(cudaMemcpyAsync(d_stack_op_indexes.data(),
stack_op_indexes.data(),
stack_op_indexes.size() * sizeof(SymbolOffsetT),
cudaMemcpyDefault,
stream.value()));
// Run algorithm
fst::sparse_stack_op_to_top_of_stack<fst::stack_op_support::NO_RESET_SUPPORT, StackLevelT>(
d_stack_ops.data(),
d_stack_op_idx_span,
JSONToStackOp{},
top_of_stack_gpu.device_ptr(),
empty_stack_symbol,
read_symbol,
string_size,
stream.value());
// Async copy results from device to host
top_of_stack_gpu.device_to_host_async(stream_view);
// Get CPU-side results for verification
std::string top_of_stack_cpu{};
top_of_stack_cpu.reserve(string_size);
to_top_of_stack(std::cbegin(input),
std::cend(input),
JSONToStackOp{},
empty_stack_symbol,
std::back_inserter(top_of_stack_cpu));
// Make sure results have been copied back to host
stream.synchronize();
// Verify results
ASSERT_EQ(string_size, top_of_stack_cpu.size());
ASSERT_EQ(top_of_stack_gpu.size(), top_of_stack_cpu.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(top_of_stack_gpu.host_ptr(), top_of_stack_cpu, string_size);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/fst/fst_test.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/fst/lookup_tables.cuh>
#include <io/utilities/hostdevice_vector.hpp>
#include <tests/io/fst/common.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/types.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <cstdlib>
#include <vector>
namespace {
//------------------------------------------------------------------------------
// CPU-BASED IMPLEMENTATIONS FOR VERIFICATION
//------------------------------------------------------------------------------
/**
* @brief CPU-based implementation of a finite-state transducer (FST).
*
* @tparam InputItT Forward input iterator type to symbols fed into the FST
* @tparam StateT Type representing states of the finite-state machine
* @tparam SymbolGroupLutT Sequence container of symbol groups. Each symbol group is a sequence
* container to symbols within that group.
* @tparam TransitionTableT Two-dimensional container type
* @tparam TransducerTableT Two-dimensional container type
* @tparam OutputItT Forward output iterator type
* @tparam IndexOutputItT Forward output iterator type
* @param[in] begin Forward iterator to the beginning of the symbol sequence
* @param[in] end Forward iterator to one past the last element of the symbol sequence
* @param[in] init_state The starting state of the finite-state machine
* @param[in] symbol_group_lut Sequence container of symbol groups. Each symbol group is a sequence
* container to symbols within that group. The index of the symbol group containing a symbol being
* read will be used as symbol_gid of the transition and translation tables.
* @param[in] transition_table The two-dimensional transition table, i.e.,
* transition_table[state][symbol_gid] -> new_state
* @param[in] translation_table The two-dimensional transducer table, i.e.,
* translation_table[state][symbol_gid] -> range_of_output_symbols
* @param[out] out_tape A forward output iterator to which the transduced input will be written
* @param[out] out_index_tape A forward output iterator to which indexes of the symbols that
* actually caused some output are written to
* @return A pair of iterators to one past the last element of (1) the transduced output symbol
* sequence and (2) the indexes of
*/
template <typename InputItT,
typename StateT,
typename SymbolGroupLutT,
typename TransitionTableT,
typename TransducerTableT,
typename OutputItT,
typename IndexOutputItT>
static std::pair<OutputItT, IndexOutputItT> fst_baseline(InputItT begin,
InputItT end,
StateT const& init_state,
SymbolGroupLutT symbol_group_lut,
TransitionTableT transition_table,
TransducerTableT translation_table,
OutputItT out_tape,
IndexOutputItT out_index_tape)
{
// Initialize "FSM" with starting state
StateT state = init_state;
// To track the symbol offset within the input that caused the FST to output
std::size_t in_offset = 0;
for (auto it = begin; it < end; it++) {
// The symbol currently being read
auto const& symbol = *it;
// Iterate over symbol groups and search for the first symbol group containing the current
// symbol, if no match is found we use cend(symbol_group_lut) as the "catch-all" symbol group
auto symbol_group_it =
std::find_if(std::cbegin(symbol_group_lut), std::cend(symbol_group_lut), [symbol](auto& sg) {
return std::find(std::cbegin(sg), std::cend(sg), symbol) != std::cend(sg);
});
auto symbol_group = std::distance(std::cbegin(symbol_group_lut), symbol_group_it);
// Output the translated symbols to the output tape
out_tape = std::copy(std::cbegin(translation_table[state][symbol_group]),
std::cend(translation_table[state][symbol_group]),
out_tape);
auto out_size = std::distance(std::cbegin(translation_table[state][symbol_group]),
std::cend(translation_table[state][symbol_group]));
out_index_tape = std::fill_n(out_index_tape, out_size, in_offset);
// Transition the state of the finite-state machine
state = static_cast<char>(transition_table[state][symbol_group]);
// Continue with next symbol from input tape
in_offset++;
}
return {out_tape, out_index_tape};
}
} // namespace
// Base test fixture for tests
struct FstTest : public cudf::test::BaseFixture {};
TEST_F(FstTest, GroundTruth)
{
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
// Type sufficiently large to index symbols within the input and output (may be unsigned)
using SymbolOffsetT = uint32_t;
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
// Test input
std::string input = R"( {)"
R"("category": "reference",)"
R"("index:" [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} )"
R"({)"
R"("category": "reference",)"
R"("index:" [4,{},null,{"a":[]}],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} {} [] [ ])";
size_t string_size = input.size() * (1 << 10);
auto d_input_scalar = cudf::make_string_scalar(input);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_input_scalar);
const cudf::size_type repeat_times = string_size / input.size();
auto d_input_string = cudf::strings::repeat_string(d_string_scalar, repeat_times);
auto& d_input = static_cast<cudf::scalar_type_t<std::string>&>(*d_input_string);
input = d_input.to_string(stream);
// Prepare input & output buffers
constexpr std::size_t single_item = 1;
cudf::detail::hostdevice_vector<SymbolT> output_gpu(input.size(), stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> output_gpu_size(single_item, stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> out_indexes_gpu(input.size(), stream_view);
// Run algorithm
auto parser = cudf::io::fst::detail::make_fst(
cudf::io::fst::detail::make_symbol_group_lut(pda_sgs),
cudf::io::fst::detail::make_transition_table(pda_state_tt),
cudf::io::fst::detail::make_translation_table<TT_NUM_STATES * NUM_SYMBOL_GROUPS>(pda_out_tt),
stream);
// Allocate device-side temporary storage & run algorithm
parser.Transduce(d_input.data(),
static_cast<SymbolOffsetT>(d_input.size()),
output_gpu.device_ptr(),
out_indexes_gpu.device_ptr(),
output_gpu_size.device_ptr(),
start_state,
stream.value());
// Async copy results from device to host
output_gpu.device_to_host_async(stream.view());
out_indexes_gpu.device_to_host_async(stream.view());
output_gpu_size.device_to_host_async(stream.view());
// Prepare CPU-side results for verification
std::string output_cpu{};
std::vector<SymbolOffsetT> out_index_cpu{};
output_cpu.reserve(input.size());
out_index_cpu.reserve(input.size());
// Run CPU-side algorithm
fst_baseline(std::begin(input),
std::end(input),
start_state,
pda_sgs,
pda_state_tt,
pda_out_tt,
std::back_inserter(output_cpu),
std::back_inserter(out_index_cpu));
// Make sure results have been copied back to host
stream.synchronize();
// Verify results
ASSERT_EQ(output_gpu_size[0], output_cpu.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(output_gpu, output_cpu, output_cpu.size());
CUDF_TEST_EXPECT_VECTOR_EQUAL(out_indexes_gpu, out_index_cpu, output_cpu.size());
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/fst/common.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <array>
#include <string>
#include <vector>
//------------------------------------------------------------------------------
// TEST FST SPECIFICATIONS
//------------------------------------------------------------------------------
// FST to check for brackets and braces outside of pairs of quotes
enum class dfa_states : char {
// The state being active while being outside of a string. When encountering an opening bracket or
// curly brace, we push it onto the stack. When encountering a closing bracket or brace, we pop it
// from the stack.
TT_OOS = 0U,
// The state being active while being within a string (e.g., field name or a string value). We do
// not push or pop from the stack while being in this state.
TT_STR,
// The state being active after encountering an escape symbol (e.g., '\') while being in the
// TT_STR state.
TT_ESC,
// Total number of states
TT_NUM_STATES
};
/**
* @brief Definition of the symbol groups
*/
enum class dfa_symbol_group_id : uint32_t {
OPENING_BRACE, ///< Opening brace SG: {
OPENING_BRACKET, ///< Opening bracket SG: [
CLOSING_BRACE, ///< Closing brace SG: }
CLOSING_BRACKET, ///< Closing bracket SG: ]
QUOTE_CHAR, ///< Quote character SG: "
ESCAPE_CHAR, ///< Escape character SG: '\'
OTHER_SYMBOLS, ///< SG implicitly matching all other characters
NUM_SYMBOL_GROUPS ///< Total number of symbol groups
};
// Aliases for readability of the transition table
constexpr auto TT_OOS = dfa_states::TT_OOS;
constexpr auto TT_STR = dfa_states::TT_STR;
constexpr auto TT_ESC = dfa_states::TT_ESC;
constexpr auto TT_NUM_STATES = static_cast<char>(dfa_states::TT_NUM_STATES);
constexpr auto NUM_SYMBOL_GROUPS = static_cast<uint32_t>(dfa_symbol_group_id::NUM_SYMBOL_GROUPS);
// Transition table
std::array<std::array<dfa_states, NUM_SYMBOL_GROUPS>, TT_NUM_STATES> const pda_state_tt{
{/* IN_STATE { [ } ] " \ OTHER */
/* TT_OOS */ {{TT_OOS, TT_OOS, TT_OOS, TT_OOS, TT_STR, TT_OOS, TT_OOS}},
/* TT_STR */ {{TT_STR, TT_STR, TT_STR, TT_STR, TT_OOS, TT_ESC, TT_STR}},
/* TT_ESC */ {{TT_STR, TT_STR, TT_STR, TT_STR, TT_STR, TT_STR, TT_STR}}}};
// Translation table (i.e., for each transition, what are the symbols that we output)
std::array<std::array<std::vector<char>, NUM_SYMBOL_GROUPS>, TT_NUM_STATES> const pda_out_tt{
{/* IN_STATE { [ } ] " \ OTHER */
/* TT_OOS */ {{{'{'}, {'['}, {'}'}, {']'}, {'x'}, {'x'}, {'x'}}},
/* TT_STR */ {{{'x'}, {'x'}, {'x'}, {'x'}, {'x'}, {'x'}, {'x'}}},
/* TT_ESC */ {{{'x'}, {'x'}, {'x'}, {'x'}, {'x'}, {'x'}, {'x'}}}}};
// The i-th string representing all the characters of a symbol group
std::array<std::string, NUM_SYMBOL_GROUPS - 1> const pda_sgs{"{", "[", "}", "]", "\"", "\\"};
// The DFA's starting state
constexpr char start_state = static_cast<char>(dfa_states::TT_OOS);
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/text/multibyte_split_test.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/output_builder.cuh>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/table_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/io/text/byte_range_info.hpp>
#include <cudf/io/text/data_chunk_source_factories.hpp>
#include <cudf/io/text/multibyte_split.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
using cudf::test::strings_column_wrapper;
// 😀 | F0 9F 98 80 | 11110000 10011111 10011000 10000000
// 😎 | F0 9F 98 8E | 11110000 10011111 10011000 10001110
struct MultibyteSplitTest : public cudf::test::BaseFixture {};
TEST_F(MultibyteSplitTest, Simple)
{
auto delimiter = std::string(":");
auto host_input = std::string("abc:def");
auto expected = strings_column_wrapper{"abc:", "def"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, NondeterministicMatching)
{
auto delimiter = std::string("abac");
auto host_input = std::string("ababacabacab");
auto expected = strings_column_wrapper{"ababac", "abac", "ab"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, NoDelimiter)
{
auto delimiter = std::string(":");
auto host_input = std::string("abcdefg");
auto expected = strings_column_wrapper{"abcdefg"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, DelimiterAtEnd)
{
auto delimiter = std::string(":");
auto host_input = std::string("abcdefg:");
auto expected = strings_column_wrapper{"abcdefg:"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, DelimiterAtEndByteRange)
{
auto delimiter = std::string(":");
auto host_input = std::string("abcdefg:");
auto expected = strings_column_wrapper{"abcdefg:"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(
*source,
delimiter,
cudf::io::text::byte_range_info{0, static_cast<int64_t>(host_input.size())});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, DelimiterAtEndByteRange2)
{
auto delimiter = std::string(":");
auto host_input = std::string("abcdefg:");
auto expected = strings_column_wrapper{"abcdefg:"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(
*source,
delimiter,
cudf::io::text::byte_range_info{0, static_cast<int64_t>(host_input.size() - 1)});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, LargeInputSparse)
{
auto host_input = std::string(1024 * 1024 * 32, '.');
auto host_expected = std::vector<std::string>();
host_input[host_input.size() / 2] = '|';
host_expected.emplace_back(host_input.substr(0, host_input.size() / 2 + 1));
host_expected.emplace_back(host_input.substr(host_input.size() / 2 + 1));
auto expected = strings_column_wrapper{host_expected.begin(), host_expected.end()};
auto delimiter = std::string("|");
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, LargeInput)
{
auto host_input = std::string();
auto host_expected = std::vector<std::string>();
for (auto i = 0; i < (2 * 32 * 128 * 1024); i++) {
host_input += "...:|";
host_expected.emplace_back(std::string("...:|"));
}
auto expected = strings_column_wrapper{host_expected.begin(), host_expected.end()};
auto delimiter = std::string("...:|");
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, OverlappingMatchErasure)
{
auto delimiter = "::";
auto host_input = std::string(
":::::"
":::::");
auto expected = strings_column_wrapper{":::::", ":::::"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter);
// CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out); // this use case it not yet supported.
}
TEST_F(MultibyteSplitTest, DelimiterErasure)
{
auto delimiter = "\r\n";
auto host_input = std::string("line\r\nanother line\r\nthird line\r\n");
auto expected = strings_column_wrapper{"line", "another line", "third line"};
cudf::io::text::parse_options options;
options.strip_delimiters = true;
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter, options);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, DelimiterErasureByteRange)
{
auto delimiter = "\r\n";
auto host_input = std::string("line\r\nanother line\r\nthird line\r\n");
auto expected = strings_column_wrapper{"line", "another line", "third line"};
cudf::io::text::parse_options options;
options.strip_delimiters = true;
options.byte_range = cudf::io::text::byte_range_info(0, host_input.size() - 1);
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter, options);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, DelimiterErasureOverlap)
{
auto delimiter = "::";
auto host_input = std::string("::a:::b::c::::d");
auto expected = strings_column_wrapper{"", "a", "", "b", "c", "", "", "d"};
cudf::io::text::parse_options options;
options.strip_delimiters = true;
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiter, options);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out);
}
TEST_F(MultibyteSplitTest, HandpickedInput)
{
auto delimiters = "::|";
auto host_input = std::string(
"aaa::|"
"bbb::|"
"ccc::|"
"ddd::|"
"eee::|"
"fff::|"
"ggg::|"
"hhh::|"
"___::|"
"here::|"
"is::|"
"another::|"
"simple::|"
"text::|"
"separated::|"
"by::|"
"emojis::|"
"which::|"
"are::|"
"multiple::|"
"bytes::|"
"and::|"
"used::|"
"as::|"
"delimiters.::|"
"::|"
"::|"
"::|");
auto expected = strings_column_wrapper{
"aaa::|", "bbb::|", "ccc::|", "ddd::|", "eee::|", "fff::|",
"ggg::|", "hhh::|", "___::|", "here::|", "is::|", "another::|",
"simple::|", "text::|", "separated::|", "by::|", "emojis::|", "which::|",
"are::|", "multiple::|", "bytes::|", "and::|", "used::|", "as::|",
"delimiters.::|", "::|", "::|", "::|"};
auto source = cudf::io::text::make_source(host_input);
auto out = cudf::io::text::multibyte_split(*source, delimiters);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, LargeInputMultipleRange)
{
auto host_input = std::string();
for (auto i = 0; i < (2 * 32 * 128 * 1024); i++) {
host_input += "...:|";
}
auto delimiter = std::string("...:|");
auto source = cudf::io::text::make_source(host_input);
auto byte_ranges = cudf::io::text::create_byte_range_infos_consecutive(host_input.size(), 3);
auto out0 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[0]);
auto out1 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[1]);
auto out2 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[2]);
auto out_views = std::vector<cudf::column_view>({out0->view(), out1->view(), out2->view()});
auto out = cudf::concatenate(out_views);
auto expected = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, LargeInputSparseMultipleRange)
{
auto host_input = std::string();
for (auto i = 0; i < (2 * 32 * 128 * 1024); i++) {
host_input += ".....";
}
auto delimiter = std::string("...:|");
host_input[host_input.size() / 2] = ':';
host_input[host_input.size() / 2 + 1] = '|';
auto source = cudf::io::text::make_source(host_input);
auto byte_ranges = cudf::io::text::create_byte_range_infos_consecutive(host_input.size(), 3);
auto out0 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[0]);
auto out1 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[1]);
auto out2 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[2]);
auto out_views = std::vector<cudf::column_view>({out0->view(), out1->view(), out2->view()});
auto out = cudf::concatenate(out_views);
auto expected = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, LargeInputMultipleRangeSingleByte)
{
auto host_input = std::string();
for (auto i = 0; i < (2 * 32 * 128 * 1024); i++) {
host_input += "...:|";
}
auto delimiter = std::string("|");
auto source = cudf::io::text::make_source(host_input);
auto byte_ranges = cudf::io::text::create_byte_range_infos_consecutive(host_input.size(), 3);
auto out0 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[0]);
auto out1 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[1]);
auto out2 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[2]);
auto out_views = std::vector<cudf::column_view>({out0->view(), out1->view(), out2->view()});
auto out = cudf::concatenate(out_views);
auto expected = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, LargeInputSparseMultipleRangeSingleByte)
{
auto host_input = std::string();
for (auto i = 0; i < (2 * 32 * 128 * 1024); i++) {
host_input += ".....";
}
auto delimiter = std::string("|");
host_input[host_input.size() / 2] = '|';
auto source = cudf::io::text::make_source(host_input);
auto byte_ranges = cudf::io::text::create_byte_range_infos_consecutive(host_input.size(), 3);
auto out0 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[0]);
auto out1 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[1]);
auto out2 = cudf::io::text::multibyte_split(*source, delimiter, byte_ranges[2]);
auto out_views = std::vector<cudf::column_view>({out0->view(), out1->view(), out2->view()});
auto out = cudf::concatenate(out_views);
auto expected = cudf::io::text::multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, SmallInputAllPossibleRanges)
{
using namespace cudf::io::text;
auto host_input = std::string();
for (auto i = 0; i < 5; i++) {
host_input += "::";
}
auto delimiter = std::string("::");
auto source = make_source(host_input);
// for all possible ways to split the input, check that each field is only output once
int size = static_cast<int>(host_input.size());
for (int split1 = 1; split1 < size; split1++) {
SCOPED_TRACE(split1);
for (int split2 = split1 + 1; split2 < size; split2++) {
SCOPED_TRACE(split2);
auto out1 = multibyte_split(*source, delimiter, byte_range_info{0, split1});
auto out2 = multibyte_split(*source, delimiter, byte_range_info{split1, split2 - split1});
auto out3 = multibyte_split(*source, delimiter, byte_range_info{split2, size - split2});
auto out_views = std::vector<cudf::column_view>({out1->view(), out2->view(), out3->view()});
auto out = cudf::concatenate(out_views);
auto expected = multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
}
}
TEST_F(MultibyteSplitTest, SmallInputAllPossibleRangesSingleByte)
{
using namespace cudf::io::text;
auto host_input = std::string();
for (auto i = 0; i < 5; i++) {
host_input += std::to_string(i) + ":";
}
auto delimiter = std::string(":");
auto source = make_source(host_input);
// for all possible ways to split the input, check that each field is only output once
int size = static_cast<int>(host_input.size());
for (int split1 = 1; split1 < size; split1++) {
SCOPED_TRACE(split1);
for (int split2 = split1 + 1; split2 < size; split2++) {
SCOPED_TRACE(split2);
auto out1 = multibyte_split(*source, delimiter, byte_range_info{0, split1});
auto out2 = multibyte_split(*source, delimiter, byte_range_info{split1, split2 - split1});
auto out3 = multibyte_split(*source, delimiter, byte_range_info{split2, size - split2});
auto out_views = std::vector<cudf::column_view>({out1->view(), out2->view(), out3->view()});
auto out = cudf::concatenate(out_views);
auto expected = multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(
expected->view(), *out, cudf::test::debug_output_level::ALL_ERRORS);
}
}
}
TEST_F(MultibyteSplitTest, SingletonRangeAtEnd)
{
// we want a delimiter at the end of the file to not create a new empty row even if it is the only
// character in the byte range
using namespace cudf::io::text;
auto host_input = std::string("ab:cd:");
auto delimiter = std::string(":");
auto source = make_source(host_input);
auto expected = strings_column_wrapper{};
auto out = multibyte_split(*source, delimiter, byte_range_info{5, 1});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, EmptyInput)
{
using namespace cudf::io::text;
auto host_input = std::string();
auto delimiter = std::string("::");
auto source = make_source(host_input);
auto expected = strings_column_wrapper{};
auto out = multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, EmptyInputSingleByte)
{
using namespace cudf::io::text;
auto host_input = std::string();
auto delimiter = std::string(":");
auto source = make_source(host_input);
auto expected = strings_column_wrapper{};
auto out = multibyte_split(*source, delimiter);
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, EmptyRange)
{
using namespace cudf::io::text;
auto host_input = std::string("ab::cd");
auto delimiter = std::string("::");
auto source = make_source(host_input);
auto expected = strings_column_wrapper{};
auto out = multibyte_split(*source, delimiter, byte_range_info{4, 0});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, EmptyRangeSingleByte)
{
using namespace cudf::io::text;
auto host_input = std::string("ab:cd");
auto delimiter = std::string(":");
auto source = make_source(host_input);
auto expected = strings_column_wrapper{};
auto out = multibyte_split(*source, delimiter, byte_range_info{3, 0});
CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, *out, cudf::test::debug_output_level::ALL_ERRORS);
}
TEST_F(MultibyteSplitTest, EmptySplitDeviceSpan)
{
cudf::split_device_span<int> span;
ASSERT_EQ(span.size(), 0);
ASSERT_EQ(span.head().size(), 0);
ASSERT_EQ(span.head().data(), nullptr);
ASSERT_EQ(span.tail().size(), 0);
ASSERT_EQ(span.tail().data(), nullptr);
}
TEST_F(MultibyteSplitTest, SplitDeviceSpan)
{
int i = 0;
int j = 1;
cudf::split_device_span<int> span{{&i, 1}, {&j, 1}};
ASSERT_EQ(span.size(), 2);
ASSERT_EQ(span.head().size(), 1);
ASSERT_EQ(span.head().data(), &i);
ASSERT_EQ(span.tail().size(), 1);
ASSERT_EQ(span.tail().data(), &j);
ASSERT_EQ(&span[0], &i);
ASSERT_EQ(&span[1], &j);
ASSERT_EQ(&*span.begin(), &i);
ASSERT_EQ(&*(span.begin() + 1), &j);
ASSERT_NE(span.begin() + 1, span.end());
ASSERT_EQ(span.begin() + 2, span.end());
}
TEST_F(MultibyteSplitTest, OutputBuilder)
{
auto const stream = cudf::get_default_stream();
cudf::output_builder<char> builder{10, 4, stream};
auto const output = builder.next_output(stream);
ASSERT_GE(output.size(), 10);
ASSERT_EQ(output.tail().size(), 0);
ASSERT_EQ(output.tail().data(), nullptr);
ASSERT_EQ(builder.size(), 0);
builder.advance_output(1, stream);
ASSERT_EQ(builder.size(), 1);
auto const output2 = builder.next_output(stream);
ASSERT_EQ(output2.head().data(), output.head().data() + 1);
builder.advance_output(10, stream);
ASSERT_EQ(builder.size(), 11);
auto const output3 = builder.next_output(stream);
ASSERT_EQ(output3.head().size(), 9);
ASSERT_EQ(output3.head().data(), output.head().data() + 11);
ASSERT_EQ(output3.tail().size(), 40);
builder.advance_output(9, stream);
ASSERT_EQ(builder.size(), 20);
auto const output4 = builder.next_output(stream);
ASSERT_EQ(output4.head().size(), 0);
ASSERT_EQ(output4.tail().size(), output3.tail().size());
ASSERT_EQ(output4.tail().data(), output3.tail().data());
builder.advance_output(1, stream);
auto const output5 = builder.next_output(stream);
ASSERT_EQ(output5.head().size(), 39);
ASSERT_EQ(output5.head().data(), output4.tail().data() + 1);
ASSERT_EQ(output5.tail().size(), 0);
ASSERT_EQ(output5.tail().data(), nullptr);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp/tests/io
|
rapidsai_public_repos/cudf/cpp/tests/io/text/data_chunk_source_test.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf/io/text/data_chunk_source_factories.hpp>
#include <cudf/io/text/detail/bgzip_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <fstream>
#include <random>
auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>(
::testing::AddGlobalTestEnvironment(new cudf::test::TempDirTestEnvironment));
struct DataChunkSourceTest : public cudf::test::BaseFixture {};
std::string chunk_to_host(cudf::io::text::device_data_chunk const& chunk)
{
std::string result(chunk.size(), '\0');
CUDF_CUDA_TRY(cudaMemcpy(result.data(), chunk.data(), chunk.size(), cudaMemcpyDefault));
return result;
}
void test_source(std::string const& content, cudf::io::text::data_chunk_source const& source)
{
{
// full contents
auto reader = source.create_reader();
auto const chunk = reader->get_next_chunk(content.size(), cudf::get_default_stream());
ASSERT_EQ(chunk->size(), content.size());
ASSERT_EQ(chunk_to_host(*chunk), content);
}
{
// skipping contents
auto reader = source.create_reader();
reader->skip_bytes(4);
auto const chunk = reader->get_next_chunk(content.size(), cudf::get_default_stream());
ASSERT_EQ(chunk->size(), content.size() - 4);
ASSERT_EQ(chunk_to_host(*chunk), content.substr(4));
}
{
// reading multiple chunks, starting with a small one
auto reader = source.create_reader();
auto const chunk1 = reader->get_next_chunk(5, cudf::get_default_stream());
auto const chunk2 = reader->get_next_chunk(content.size() - 5, cudf::get_default_stream());
ASSERT_EQ(chunk1->size(), 5);
ASSERT_EQ(chunk2->size(), content.size() - 5);
ASSERT_EQ(chunk_to_host(*chunk1), content.substr(0, 5));
ASSERT_EQ(chunk_to_host(*chunk2), content.substr(5));
}
{
// reading multiple chunks
auto reader = source.create_reader();
auto const chunk1 = reader->get_next_chunk(content.size() / 2, cudf::get_default_stream());
auto const chunk2 =
reader->get_next_chunk(content.size() - content.size() / 2, cudf::get_default_stream());
ASSERT_EQ(chunk1->size(), content.size() / 2);
ASSERT_EQ(chunk2->size(), content.size() - content.size() / 2);
ASSERT_EQ(chunk_to_host(*chunk1), content.substr(0, content.size() / 2));
ASSERT_EQ(chunk_to_host(*chunk2), content.substr(content.size() / 2));
}
{
// reading too many bytes
auto reader = source.create_reader();
auto const chunk = reader->get_next_chunk(content.size() + 10, cudf::get_default_stream());
ASSERT_EQ(chunk->size(), content.size());
ASSERT_EQ(chunk_to_host(*chunk), content);
auto next_chunk = reader->get_next_chunk(1, cudf::get_default_stream());
ASSERT_EQ(next_chunk->size(), 0);
}
{
// skipping past the end
auto reader = source.create_reader();
reader->skip_bytes(content.size() + 10);
auto const next_chunk = reader->get_next_chunk(1, cudf::get_default_stream());
ASSERT_EQ(next_chunk->size(), 0);
}
}
TEST_F(DataChunkSourceTest, DataSourceHost)
{
std::string const content = "host buffer source";
auto const datasource =
cudf::io::datasource::create(cudf::io::host_buffer{content.data(), content.size()});
auto const source = cudf::io::text::make_source(*datasource);
test_source(content, *source);
}
TEST_F(DataChunkSourceTest, DataSourceFile)
{
std::string content = "file datasource";
// make it big enough to have is_device_read_preferred return true
content.reserve(content.size() << 20);
for (int i = 0; i < 20; i++) {
content += content;
}
auto const filename = temp_env->get_temp_filepath("file_source");
{
std::ofstream file{filename};
file << content;
}
auto const datasource = cudf::io::datasource::create(filename);
auto const source = cudf::io::text::make_source(*datasource);
test_source(content, *source);
}
TEST_F(DataChunkSourceTest, Device)
{
std::string const content = "device buffer source";
cudf::string_scalar scalar(content);
auto const source = cudf::io::text::make_source(scalar);
test_source(content, *source);
}
TEST_F(DataChunkSourceTest, File)
{
std::string const content = "file source";
auto const filename = temp_env->get_temp_filepath("file_source");
{
std::ofstream file{filename};
file << content;
}
auto const source = cudf::io::text::make_source_from_file(filename);
test_source(content, *source);
}
TEST_F(DataChunkSourceTest, Host)
{
std::string const content = "host buffer source";
auto const source = cudf::io::text::make_source(content);
test_source(content, *source);
}
enum class compression { ENABLED, DISABLED };
enum class eof { ADD_EOF_BLOCK, NO_EOF_BLOCK };
uint64_t virtual_offset(std::size_t block_offset, std::size_t local_offset)
{
return (block_offset << 16) | local_offset;
}
void write_bgzip(std::ostream& output_stream,
cudf::host_span<char const> data,
std::default_random_engine& rng,
compression compress,
eof add_eof)
{
std::vector<char> const extra_garbage_fields1{{13, // magic number
37, // magic number
7, // field length
0, // field length
1,
2,
3,
4,
5,
6,
7}};
std::vector<char> const extra_garbage_fields2{{12, // magic number
34, // magic number
2, // field length
0, // field length
1, 2,
56, // magic number
78, // magic number
1, // field length
0, // field length
3, //
90, // magic number
12, // magic number
8, // field length
0, // field length
1, 2, 3, 4, 5, 6, 7, 8}};
// make sure the block size with header stays below 65536
std::uniform_int_distribution<std::size_t> block_size_dist{1, 65000};
auto begin = data.begin();
auto const end = data.end();
int i = 0;
while (begin < end) {
using cudf::host_span;
auto len = std::min<std::size_t>(end - begin, block_size_dist(rng));
host_span<char const> const garbage_before =
i & 1 ? extra_garbage_fields1 : host_span<char const>{};
host_span<char const> const garbage_after =
i & 2 ? extra_garbage_fields2 : host_span<char const>{};
if (compress == compression::ENABLED) {
cudf::io::text::detail::bgzip::write_compressed_block(
output_stream, {begin, len}, garbage_before, garbage_after);
} else {
cudf::io::text::detail::bgzip::write_uncompressed_block(
output_stream, {begin, len}, garbage_before, garbage_after);
}
begin += len;
i++;
}
if (add_eof == eof::ADD_EOF_BLOCK) {
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
}
}
TEST_F(DataChunkSourceTest, BgzipSource)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source");
std::string input{"bananarama"};
input.reserve(input.size() << 25);
for (int i = 0; i < 24; i++) {
input = input + input;
}
{
std::ofstream output_stream{filename};
std::default_random_engine rng{};
write_bgzip(output_stream, input, rng, compression::DISABLED, eof::ADD_EOF_BLOCK);
}
auto const source = cudf::io::text::make_source_from_bgzip_file(filename);
test_source(input, *source);
}
TEST_F(DataChunkSourceTest, BgzipSourceVirtualOffsets)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source_offsets");
std::string input{"bananarama"};
input.reserve(input.size() << 25);
for (int i = 0; i < 24; i++) {
input = input + input;
}
std::string const padding_garbage(10000, 'g');
std::string const data_garbage{"GARBAGE"};
std::string const begininput{"begin of bananarama"};
std::string const endinput{"end of bananarama"};
std::size_t begin_compressed_offset{};
std::size_t end_compressed_offset{};
std::size_t const begin_local_offset{data_garbage.size()};
std::size_t const end_local_offset{endinput.size()};
{
std::ofstream output_stream{filename};
output_stream.write(padding_garbage.data(), padding_garbage.size());
std::default_random_engine rng{};
begin_compressed_offset = output_stream.tellp();
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream,
data_garbage + begininput);
write_bgzip(output_stream, input, rng, compression::DISABLED, eof::NO_EOF_BLOCK);
end_compressed_offset = output_stream.tellp();
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream,
endinput + data_garbage + data_garbage);
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
output_stream.write(padding_garbage.data(), padding_garbage.size());
}
input = begininput + input + endinput;
auto const source = cudf::io::text::make_source_from_bgzip_file(
filename,
virtual_offset(begin_compressed_offset, begin_local_offset),
virtual_offset(end_compressed_offset, end_local_offset));
test_source(input, *source);
}
TEST_F(DataChunkSourceTest, BgzipSourceVirtualOffsetsSingleGZipBlock)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source_offsets_single_block");
std::string const input{"collection unit brings"};
std::string const head_garbage{"garbage"};
std::string const tail_garbage{"GARBAGE"};
std::size_t const begin_local_offset{head_garbage.size()};
std::size_t const end_local_offset{head_garbage.size() + input.size()};
{
std::ofstream output_stream{filename};
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream,
head_garbage + input + tail_garbage);
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
}
auto const source = cudf::io::text::make_source_from_bgzip_file(
filename, virtual_offset(0, begin_local_offset), virtual_offset(0, end_local_offset));
test_source(input, *source);
}
TEST_F(DataChunkSourceTest, BgzipSourceVirtualOffsetsSingleChunk)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source_offsets_single_chunk");
std::string const input{"collection unit brings"};
std::string const head_garbage{"garbage"};
std::string const tail_garbage{"GARBAGE"};
std::size_t end_compressed_offset{};
std::size_t const begin_local_offset{head_garbage.size()};
std::size_t const end_local_offset{input.size() - 10};
{
std::ofstream output_stream{filename};
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream,
head_garbage + input.substr(0, 10));
end_compressed_offset = output_stream.tellp();
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream,
input.substr(10) + tail_garbage);
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
}
auto const source = cudf::io::text::make_source_from_bgzip_file(
filename,
virtual_offset(0, begin_local_offset),
virtual_offset(end_compressed_offset, end_local_offset));
test_source(input, *source);
}
TEST_F(DataChunkSourceTest, BgzipCompressedSourceVirtualOffsets)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source_compressed_offsets");
std::string input{"bananarama"};
input.reserve(input.size() << 25);
for (int i = 0; i < 24; i++) {
input = input + input;
}
std::string const padding_garbage(10000, 'g');
std::string const data_garbage{"GARBAGE"};
std::string const begininput{"begin of bananarama"};
std::string const endinput{"end of bananarama"};
std::size_t begin_compressed_offset{};
std::size_t end_compressed_offset{};
std::size_t const begin_local_offset{data_garbage.size()};
std::size_t const end_local_offset{endinput.size()};
{
std::ofstream output_stream{filename};
output_stream.write(padding_garbage.data(), padding_garbage.size());
std::default_random_engine rng{};
begin_compressed_offset = output_stream.tellp();
cudf::io::text::detail::bgzip::write_compressed_block(output_stream, data_garbage + begininput);
write_bgzip(output_stream, input, rng, compression::ENABLED, eof::NO_EOF_BLOCK);
end_compressed_offset = output_stream.tellp();
cudf::io::text::detail::bgzip::write_compressed_block(output_stream,
endinput + data_garbage + data_garbage);
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
output_stream.write(padding_garbage.data(), padding_garbage.size());
}
input = begininput + input + endinput;
auto source = cudf::io::text::make_source_from_bgzip_file(
filename,
virtual_offset(begin_compressed_offset, begin_local_offset),
virtual_offset(end_compressed_offset, end_local_offset));
test_source(input, *source);
}
TEST_F(DataChunkSourceTest, BgzipSourceVirtualOffsetsSingleCompressedGZipBlock)
{
auto const filename = temp_env->get_temp_filepath("bgzip_source_offsets_single_compressed_block");
std::string const input{"collection unit brings"};
std::string const head_garbage(10000, 'g');
std::string const tail_garbage{"GARBAGE"};
std::size_t const begin_local_offset{head_garbage.size()};
std::size_t const end_local_offset{head_garbage.size() + input.size()};
{
std::ofstream output_stream{filename};
cudf::io::text::detail::bgzip::write_compressed_block(output_stream,
head_garbage + input + tail_garbage);
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
}
auto const source = cudf::io::text::make_source_from_bgzip_file(
filename, virtual_offset(0, begin_local_offset), virtual_offset(0, end_local_offset));
test_source(input, *source);
}
CUDF_TEST_PROGRAM_MAIN()
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/scripts/gdb-pretty-printers.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gdb
global_locals = locals()
if not all(
name in global_locals
for name in (
"HostIterator",
"DeviceIterator",
"is_template_type_not_alias",
"template_match",
)
):
raise NameError(
"This file expects the RMM pretty-printers to be loaded already. "
"Either load them manually, or use the generated load-pretty-printers "
"script in the build directory"
)
class CudfHostSpanPrinter(gdb.printing.PrettyPrinter):
"""Print a cudf::host_span"""
def __init__(self, val):
self.val = val
self.pointer = val["_data"]
self.size = int(val["_size"])
def children(self):
return HostIterator(self.pointer, self.size)
def to_string(self):
return f"{self.val.type} of length {self.size} at {hex(self.pointer)}"
def display_hint(self):
return "array"
class CudfDeviceSpanPrinter(gdb.printing.PrettyPrinter):
"""Print a cudf::device_span"""
def __init__(self, val):
self.val = val
self.pointer = val["_data"]
self.size = int(val["_size"])
def children(self):
return DeviceIterator(self.pointer, self.size)
def to_string(self):
return f"{self.val.type} of length {self.size} at {hex(self.pointer)}"
def display_hint(self):
return "array"
def lookup_cudf_type(val):
if not str(val.type.unqualified()).startswith("cudf::"):
return None
suffix = str(val.type.unqualified())[6:]
if not is_template_type_not_alias(suffix):
return None
if template_match(suffix, "host_span"):
return CudfHostSpanPrinter(val)
if template_match(suffix, "device_span"):
return CudfDeviceSpanPrinter(val)
return None
gdb.pretty_printers.append(lookup_cudf_type)
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/scripts/load-pretty-printers.in
|
source @Thrust_SOURCE_DIR@/scripts/gdb-pretty-printers.py
source @rmm_SOURCE_DIR@/scripts/gdb-pretty-printers.py
source @PROJECT_SOURCE_DIR@/scripts/gdb-pretty-printers.py
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/scripts/run-cmake-format.sh
|
#!/bin/bash
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
status=0
if [ -z ${CUDF_ROOT:+PLACEHOLDER} ]; then
CUDF_BUILD_DIR=$(git rev-parse --show-toplevel 2>&1)/cpp/build
status=$?
else
CUDF_BUILD_DIR=${CUDF_ROOT}
fi
if ! [ ${status} -eq 0 ]; then
if [[ ${CUDF_BUILD_DIR} == *"not a git repository"* ]]; then
echo "This script must be run inside the cudf repository, or the CUDF_ROOT environment variable must be set."
else
echo "Script failed with unknown error attempting to determine project root:"
echo ${CUDF_BUILD_DIR}
fi
exit 1
fi
DEFAULT_FORMAT_FILE_LOCATIONS=(
"${CUDF_BUILD_DIR:-${HOME}}/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
"cpp/libcudf_kafka/build/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
)
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
for file_path in ${DEFAULT_FORMAT_FILE_LOCATIONS[@]}; do
if [ -f ${file_path} ]; then
RAPIDS_CMAKE_FORMAT_FILE=${file_path}
break
fi
done
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found at any of the default search locations: "
echo ""
( IFS=$'\n'; echo "${DEFAULT_FORMAT_FILE_LOCATIONS[*]}" )
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
cmake-format -i --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
elif [[ $1 == "cmake-lint" ]]; then
# Since the pre-commit hook is verbose, we have to be careful to only
# present cmake-lint's output (which is quite verbose) if we actually
# observe a failure.
OUTPUT=$(cmake-lint --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2})
status=$?
if ! [ ${status} -eq 0 ]; then
echo "${OUTPUT}"
fi
exit ${status}
fi
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/scripts/run-clang-tidy.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import subprocess
import argparse
import json
import multiprocessing as mp
import shutil
EXPECTED_VERSION = "16.0.6"
VERSION_REGEX = re.compile(r" LLVM version ([0-9.]+)")
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
SEPARATOR = "-" * 16
def parse_args():
argparser = argparse.ArgumentParser("Runs clang-tidy on a project")
argparser.add_argument("-cdb", type=str,
# TODO This is a hack, needs to be fixed
default="cpp/build/cuda-11.5.0/clang-tidy/release/compile_commands.clangd.json",
help="Path to cmake-generated compilation database"
" file. It is always found inside the root of the "
"cmake build folder. So make sure that `cmake` has "
"been run once before running this script!")
argparser.add_argument("-exe", type=str, default="clang-tidy",
help="Path to clang-tidy exe")
argparser.add_argument("-ignore", type=str, default="[.]cu$|examples/kmeans/",
help="Regex used to ignore files from checking")
argparser.add_argument("-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument("-j", type=int, default=-1,
help="Number of parallel jobs to launch.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
ret = subprocess.check_output("%s --version" % args.exe, shell=True)
ret = ret.decode("utf-8")
version = VERSION_REGEX.search(ret)
if version is None:
raise Exception("Failed to figure out clang-tidy version!")
version = version.group(1)
if version != EXPECTED_VERSION:
raise Exception("clang-tidy exe must be v%s found '%s'" % \
(EXPECTED_VERSION, version))
if not os.path.exists(args.cdb):
raise Exception("Compilation database '%s' missing" % args.cdb)
return args
def get_all_commands(cdb):
with open(cdb) as fp:
return json.load(fp)
def get_gpu_archs(command):
archs = []
for loc in range(len(command)):
if command[loc] != "-gencode":
continue
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append("--cuda-gpu-arch=sm_%s" % match.group(1))
return archs
def get_index(arr, item):
try:
return arr.index(item)
except:
return -1
def remove_item(arr, item):
loc = get_index(arr, item)
if loc >= 0:
del arr[loc]
return loc
def remove_item_plus_one(arr, item):
loc = get_index(arr, item)
if loc >= 0:
del arr[loc + 1]
del arr[loc]
return loc
def get_clang_includes(exe):
dir = os.getenv("CONDA_PREFIX")
if dir is None:
ret = subprocess.check_output("which %s 2>&1" % exe, shell=True)
ret = ret.decode("utf-8")
dir = os.path.dirname(os.path.dirname(ret))
header = os.path.join(dir, "include", "ClangHeaders")
return ["-I", header]
def get_tidy_args(cmd, exe):
command, file = cmd["command"], cmd["file"]
is_cuda = file.endswith(".cu")
command = re.split(SPACES, command)
# compiler is always clang++!
command[0] = "clang++"
# remove compilation and output targets from the original command
remove_item_plus_one(command, "-c")
remove_item_plus_one(command, "-o")
if is_cuda:
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
archs = get_gpu_archs(command)
command.extend(archs)
while True:
loc = remove_item_plus_one(command, "-gencode")
if loc < 0:
break
# "-x cuda" is the right usage in clang
loc = get_index(command, "-x")
if loc >= 0:
command[loc + 1] = "cuda"
remove_item_plus_one(command, "-ccbin")
remove_item(command, "--expt-extended-lambda")
remove_item(command, "--diag_suppress=unrecognized_gcc_pragma")
command.extend(get_clang_includes(exe))
return command, is_cuda
def run_clang_tidy_command(tidy_cmd):
cmd = " ".join(tidy_cmd)
result = subprocess.run(cmd, check=False, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
status = result.returncode == 0
if status:
out = ""
else:
out = "CMD: " + cmd
out += result.stdout.decode("utf-8").rstrip()
return status, out
def run_clang_tidy(cmd, args):
command, is_cuda = get_tidy_args(cmd, args.exe)
tidy_cmd = [args.exe,
"-header-filter='.*cudf/cpp/(src|include|bench|comms).*'",
cmd["file"], "--", ]
tidy_cmd.extend(command)
status = True
out = ""
if is_cuda:
tidy_cmd.append("--cuda-device-only")
tidy_cmd.append(cmd["file"])
ret, out1 = run_clang_tidy_command(tidy_cmd)
out += out1
out += "%s" % SEPARATOR
if not ret:
status = ret
tidy_cmd[-2] = "--cuda-host-only"
ret, out1 = run_clang_tidy_command(tidy_cmd)
if not ret:
status = ret
out += out1
else:
tidy_cmd.append(cmd["file"])
ret, out1 = run_clang_tidy_command(tidy_cmd)
if not ret:
status = ret
out += out1
return status, out, cmd["file"]
# yikes! global var :(
results = []
def collect_result(result):
global results
results.append(result)
def print_result(passed, stdout, file):
status_str = "PASSED" if passed else "FAILED"
print(f"{SEPARATOR} File:{file} {status_str} {SEPARATOR}")
if stdout:
print(stdout)
print(f"{SEPARATOR} File:{file} ENDS {SEPARATOR}")
def print_results():
global results
status = True
for passed, stdout, file in results:
print_result(passed, stdout, file)
if not passed:
status = False
return status
def run_tidy_for_all_files(args, all_files):
pool = None if args.j == 1 else mp.Pool(args.j)
# actual tidy checker
for cmd in all_files:
# skip files that we don't want to look at
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
continue
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
continue
if pool is not None:
pool.apply_async(run_clang_tidy, args=(cmd, args),
callback=collect_result)
else:
passed, stdout, file = run_clang_tidy(cmd, args)
collect_result((passed, stdout, file))
if pool is not None:
pool.close()
pool.join()
return print_results()
def main():
args = parse_args()
# Attempt to making sure that we run this script from root of repo always
if not os.path.exists(".git"):
raise Exception("This needs to always be run from the root of repo")
# Check whether clang-tidy exists
# print(args)
if "exe" not in args and shutil.which("clang-tidy") is not None:
print("clang-tidy not found. Exiting...")
return
all_files = get_all_commands(args.cdb)
status = run_tidy_for_all_files(args, all_files)
if not status:
raise Exception("clang-tidy failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/scripts/sort_ninja_log.py
|
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
import argparse
import os
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from xml.dom import minidom
parser = argparse.ArgumentParser()
parser.add_argument(
"log_file", type=str, default=".ninja_log", help=".ninja_log file"
)
parser.add_argument(
"--fmt",
type=str,
default="csv",
choices=["csv", "xml", "html"],
help="output format (to stdout)",
)
parser.add_argument(
"--msg",
type=str,
default=None,
help="optional text file to include at the top of the html output",
)
parser.add_argument(
"--cmp_log",
type=str,
default=None,
help="optional baseline ninja_log to compare results",
)
args = parser.parse_args()
log_file = args.log_file
output_fmt = args.fmt
cmp_file = args.cmp_log
# build a map of the log entries
def build_log_map(log_file):
entries = {}
log_path = os.path.dirname(os.path.abspath(log_file))
with open(log_file) as log:
last = 0
files = {}
for line in log:
entry = line.split()
if len(entry) > 4:
obj_file = entry[3]
file_size = (
os.path.getsize(os.path.join(log_path, obj_file))
if os.path.exists(obj_file)
else 0
)
start = int(entry[0])
end = int(entry[1])
# logic based on ninjatracing
if end < last:
files = {}
last = end
files.setdefault(entry[4], (entry[3], start, end, file_size))
# build entries from files dict
for entry in files.values():
entries[entry[0]] = (entry[1], entry[2], entry[3])
return entries
# output results in XML format
def output_xml(entries, sorted_list, args):
root = ET.Element("testsuites")
testsuite = ET.Element(
"testsuite",
attrib={
"name": "build-time",
"tests": str(len(sorted_list)),
"failures": str(0),
"errors": str(0),
},
)
root.append(testsuite)
for name in sorted_list:
entry = entries[name]
build_time = float(entry[1] - entry[0]) / 1000
item = ET.Element(
"testcase",
attrib={
"classname": "BuildTime",
"name": name,
"time": str(build_time),
},
)
testsuite.append(item)
tree = ET.ElementTree(root)
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml(indent=" ")
print(xmlstr)
# utility converts a millisecond value to a column width in pixels
def time_to_width(value, end):
# map a value from (0,end) to (0,1000)
r = (float(value) / float(end)) * 1000.0
return int(r)
# assign each entry to a thread by analyzing the start/end times and
# slotting them into thread buckets where they fit
def assign_entries_to_threads(entries):
# first sort the entries' keys by end timestamp
sorted_keys = sorted(
list(entries.keys()), key=lambda k: entries[k][1], reverse=True
)
# build the chart data by assigning entries to threads
results = {}
threads = []
for name in sorted_keys:
entry = entries[name]
# assign this entry by finding the first available thread identified
# by the thread's current start time greater than the entry's end time
tid = -1
for t in range(len(threads)):
if threads[t] >= entry[1]:
threads[t] = entry[0]
tid = t
break
# if no current thread found, create a new one with this entry
if tid < 0:
threads.append(entry[0])
tid = len(threads) - 1
# add entry name to the array associated with this tid
if tid not in results.keys():
results[tid] = []
results[tid].append(name)
# first entry has the last end time
end_time = entries[sorted_keys[0]][1]
# return the threaded entries and the last end time
return (results, end_time)
# format the build-time
def format_build_time(input_time):
build_time = abs(input_time)
build_time_str = str(build_time) + " ms"
if build_time > 120000: # 2 minutes
minutes = int(build_time / 60000)
seconds = int(((build_time / 60000) - minutes) * 60)
build_time_str = "{:d}:{:02d} min".format(minutes, seconds)
elif build_time > 1000:
build_time_str = "{:.3f} s".format(build_time / 1000)
if input_time < 0:
build_time_str = "-" + build_time_str
return build_time_str
# format file size
def format_file_size(input_size):
file_size = abs(input_size)
file_size_str = ""
if file_size > 1000000:
file_size_str = "{:.3f} MB".format(file_size / 1000000)
elif file_size > 1000:
file_size_str = "{:.3f} KB".format(file_size / 1000)
elif file_size > 0:
file_size_str = str(file_size) + " bytes"
if input_size < 0:
file_size_str = "-" + file_size_str
return file_size_str
# Output chart results in HTML format
# Builds a standalone html file with no javascript or styles
def output_html(entries, sorted_list, cmp_entries, args):
print("<html><head><title>Build Metrics Report</title>")
print("</head><body>")
if args.msg is not None:
msg_file = Path(args.msg)
if msg_file.is_file():
msg = msg_file.read_text()
print("<p>", msg, "</p>")
# map entries to threads
# the end_time is used to scale all the entries to a fixed output width
threads, end_time = assign_entries_to_threads(entries)
# color ranges for build times
summary = {"red": 0, "yellow": 0, "green": 0, "white": 0}
red = "bgcolor='#FFBBD0'"
yellow = "bgcolor='#FFFF80'"
green = "bgcolor='#AAFFBD'"
white = "bgcolor='#FFFFFF'"
# create the build-time chart
print("<table id='chart' width='1000px' bgcolor='#BBBBBB'>")
for tid in range(len(threads)):
names = threads[tid]
# sort the names for this thread by start time
names = sorted(names, key=lambda k: entries[k][0])
# use the last entry's end time as the total row size
# (this is an estimate and does not have to be exact)
last_entry = entries[names[len(names) - 1]]
last_time = time_to_width(last_entry[1], end_time)
print(
"<tr><td><table width='",
last_time,
"px' border='0' cellspacing='1' cellpadding='0'><tr>",
sep="",
)
prev_end = 0 # used for spacing between entries
# write out each entry for this thread as a column for a single row
for name in names:
entry = entries[name]
start = entry[0]
end = entry[1]
# this handles minor gaps between end of the
# previous entry and the start of the next
if prev_end > 0 and start > prev_end:
size = time_to_width(start - prev_end, end_time)
print("<td width='", size, "px'></td>")
# adjust for the cellspacing
prev_end = end + int(end_time / 500)
build_time = end - start
build_time_str = format_build_time(build_time)
# assign color and accumulate legend values
color = white
if build_time > 300000: # 5 minutes
color = red
summary["red"] += 1
elif build_time > 120000: # 2 minutes
color = yellow
summary["yellow"] += 1
elif build_time > 1000: # 1 second
color = green
summary["green"] += 1
else:
summary["white"] += 1
# compute the pixel width based on build-time
size = max(time_to_width(build_time, end_time), 2)
# output the column for this entry
print("<td height='20px' width='", size, "px' ", sep="", end="")
# title text is shown as hover-text by most browsers
print(color, "title='", end="")
print(name, "\n", build_time_str, "' ", sep="", end="")
# centers the name if it fits in the box
print("align='center' nowrap>", end="")
# use a slightly smaller, fixed-width font
print("<font size='-2' face='courier'>", end="")
# add the file-name if it fits, otherwise, truncate the name
file_name = os.path.basename(name)
if len(file_name) + 3 > size / 7:
abbr_size = int(size / 7) - 3
if abbr_size > 1:
print(file_name[:abbr_size], "...", sep="", end="")
else:
print(file_name, end="")
# done with this entry
print("</font></td>")
# update the entry with just the computed output info
entries[name] = (build_time, color, entry[2])
# add a filler column at the end of each row
print("<td width='*'></td></tr></table></td></tr>")
# done with the chart
print("</table><br/>")
# output detail table in build-time descending order
print("<table id='detail' bgcolor='#EEEEEE'>")
print(
"<tr><th>File</th>", "<th>Compile time</th>", "<th>Size</th>", sep=""
)
if cmp_entries:
print("<th>t-cmp</th>", sep="")
print("</tr>")
for name in sorted_list:
entry = entries[name]
build_time = entry[0]
color = entry[1]
file_size = entry[2]
build_time_str = format_build_time(build_time)
file_size_str = format_file_size(file_size)
# output entry row
print("<tr ", color, "><td>", name, "</td>", sep="", end="")
print("<td align='right'>", build_time_str, "</td>", sep="", end="")
print("<td align='right'>", file_size_str, "</td>", sep="", end="")
# output diff column
cmp_entry = (
cmp_entries[name] if cmp_entries and name in cmp_entries else None
)
if cmp_entry:
diff_time = build_time - (cmp_entry[1] - cmp_entry[0])
diff_time_str = format_build_time(diff_time)
diff_color = white
diff_percent = int((diff_time / build_time) * 100)
if build_time > 60000:
if diff_percent > 20:
diff_color = red
diff_time_str = "<b>" + diff_time_str + "</b>"
elif diff_percent < -20:
diff_color = green
diff_time_str = "<b>" + diff_time_str + "</b>"
elif diff_percent > 0:
diff_color = yellow
print(
"<td align='right' ",
diff_color,
">",
diff_time_str,
"</td>",
sep="",
end="",
)
print("</tr>")
print("</table><br/>")
# include summary table with color legend
print("<table id='legend' border='2' bgcolor='#EEEEEE'>")
print("<tr><td", red, ">time > 5 minutes</td>")
print("<td align='right'>", summary["red"], "</td></tr>")
print("<tr><td", yellow, ">2 minutes < time < 5 minutes</td>")
print("<td align='right'>", summary["yellow"], "</td></tr>")
print("<tr><td", green, ">1 second < time < 2 minutes</td>")
print("<td align='right'>", summary["green"], "</td></tr>")
print("<tr><td", white, ">time < 1 second</td>")
print("<td align='right'>", summary["white"], "</td></tr>")
print("</table>")
if cmp_entries:
print("<table id='legend' border='2' bgcolor='#EEEEEE'>")
print("<tr><td", red, ">time increase > 20%</td></tr>")
print("<tr><td", yellow, ">time increase > 0</td></tr>")
print("<tr><td", green, ">time decrease > 20%</td></tr>")
print(
"<tr><td",
white,
">time change < 20%% or build time < 1 minute</td></tr>",
)
print("</table>")
print("</body></html>")
# output results in CSV format
def output_csv(entries, sorted_list, cmp_entries, args):
print("time,size,file", end="")
if cmp_entries:
print(",diff", end="")
print()
for name in sorted_list:
entry = entries[name]
build_time = entry[1] - entry[0]
file_size = entry[2]
cmp_entry = (
cmp_entries[name] if cmp_entries and name in cmp_entries else None
)
print(build_time, file_size, name, sep=",", end="")
if cmp_entry:
diff_time = build_time - (cmp_entry[1] - cmp_entry[0])
print(",", diff_time, sep="", end="")
print()
# parse log file into map
entries = build_log_map(log_file)
if len(entries) == 0:
print("Could not parse", log_file)
exit()
# sort the entries by build-time (descending order)
sorted_list = sorted(
list(entries.keys()),
key=lambda k: entries[k][1] - entries[k][0],
reverse=True,
)
# load the comparison build log if available
cmp_entries = build_log_map(cmp_file) if cmp_file else None
if output_fmt == "xml":
output_xml(entries, sorted_list, args)
elif output_fmt == "html":
output_html(entries, sorted_list, cmp_entries, args)
else:
output_csv(entries, sorted_list, cmp_entries, args)
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/benchmarks/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
find_package(Threads REQUIRED)
add_library(cudf_datagen STATIC common/generate_input.cu)
target_compile_features(cudf_datagen PUBLIC cxx_std_17 cuda_std_17)
target_compile_options(
cudf_datagen PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>"
)
target_link_libraries(
cudf_datagen
PUBLIC GTest::gmock GTest::gtest benchmark::benchmark nvbench::nvbench Threads::Threads cudf
cudftestutil
PRIVATE $<TARGET_NAME_IF_EXISTS:conda_env>
)
target_include_directories(
cudf_datagen
PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>" "$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}>"
"$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/src>"
)
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
# Use an OBJECT library so we only compile these helper source files only once
add_library(
cudf_benchmark_common OBJECT "${CUDF_SOURCE_DIR}/tests/utilities/base_fixture.cpp"
synchronization/synchronization.cpp io/cuio_common.cpp
)
target_link_libraries(cudf_benchmark_common PRIVATE cudf_datagen $<TARGET_NAME_IF_EXISTS:conda_env>)
add_custom_command(
OUTPUT CUDF_BENCHMARKS
COMMAND echo Running benchmarks
COMMAND mkdir -p results
VERBATIM
COMMENT "Running cudf benchmarks."
USES_TERMINAL
)
# This function takes in a benchmark name and benchmark source and handles setting all of the
# associated properties and linking to build the benchmark
function(ConfigureBench CMAKE_BENCH_NAME)
add_executable(${CMAKE_BENCH_NAME} ${ARGN})
set_target_properties(
${CMAKE_BENCH_NAME}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${CUDF_BINARY_DIR}/benchmarks>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
# For std:: support of __int128_t. Can be removed once using cuda::std
CXX_EXTENSIONS ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
)
target_link_libraries(
${CMAKE_BENCH_NAME} PRIVATE cudf_benchmark_common cudf_datagen benchmark::benchmark_main
$<TARGET_NAME_IF_EXISTS:conda_env>
)
add_custom_command(
OUTPUT CUDF_BENCHMARKS
COMMAND ${CMAKE_BENCH_NAME} --benchmark_out_format=json
--benchmark_out=results/${CMAKE_BENCH_NAME}.json
APPEND
COMMENT "Adding ${CMAKE_BENCH_NAME}"
)
install(
TARGETS ${CMAKE_BENCH_NAME}
COMPONENT testing
DESTINATION bin/benchmarks/libcudf
EXCLUDE_FROM_ALL
)
endfunction()
# This function takes in a benchmark name and benchmark source for nvbench benchmarks and handles
# setting all of the associated properties and linking to build the benchmark
function(ConfigureNVBench CMAKE_BENCH_NAME)
add_executable(${CMAKE_BENCH_NAME} ${ARGN} fixture/nvbench_main.cpp)
set_target_properties(
${CMAKE_BENCH_NAME}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${CUDF_BINARY_DIR}/benchmarks>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
)
target_link_libraries(
${CMAKE_BENCH_NAME} PRIVATE cudf_benchmark_common cudf_datagen nvbench::nvbench
$<TARGET_NAME_IF_EXISTS:conda_env>
)
install(
TARGETS ${CMAKE_BENCH_NAME}
COMPONENT testing
DESTINATION bin/benchmarks/libcudf
EXCLUDE_FROM_ALL
)
endfunction()
# ##################################################################################################
# * column benchmarks -----------------------------------------------------------------------------
ConfigureBench(COLUMN_CONCAT_BENCH column/concatenate.cpp)
# ##################################################################################################
# * gather benchmark ------------------------------------------------------------------------------
ConfigureBench(GATHER_BENCH copying/gather.cu)
# ##################################################################################################
# * scatter benchmark -----------------------------------------------------------------------------
ConfigureBench(SCATTER_BENCH copying/scatter.cu)
# ##################################################################################################
# * lists scatter benchmark -----------------------------------------------------------------------
ConfigureBench(SCATTER_LISTS_BENCH lists/copying/scatter_lists.cu)
# ##################################################################################################
# * Other list-related operartions benchmark ------------------------------------------------------
ConfigureNVBench(SET_OPS_NVBENCH lists/set_operations.cpp)
# ##################################################################################################
# * contiguous_split benchmark -------------------------------------------------------------------
ConfigureBench(CONTIGUOUS_SPLIT_BENCH copying/contiguous_split.cu)
# ##################################################################################################
# * shift benchmark -------------------------------------------------------------------------------
ConfigureBench(SHIFT_BENCH copying/shift.cu)
# ##################################################################################################
# * copy-if-else benchmark
# -----------------------------------------------------------------------------
ConfigureBench(COPY_IF_ELSE_BENCH copying/copy_if_else.cpp)
# ##################################################################################################
# * transpose benchmark ---------------------------------------------------------------------------
ConfigureBench(TRANSPOSE_BENCH transpose/transpose.cpp)
# ##################################################################################################
# * apply_boolean_mask benchmark ------------------------------------------------------------------
ConfigureBench(APPLY_BOOLEAN_MASK_BENCH stream_compaction/apply_boolean_mask.cpp)
# ##################################################################################################
# * stream_compaction benchmark -------------------------------------------------------------------
ConfigureNVBench(
STREAM_COMPACTION_NVBENCH
stream_compaction/distinct.cpp
stream_compaction/distinct_count.cpp
stream_compaction/stable_distinct.cpp
stream_compaction/unique.cpp
stream_compaction/unique_count.cpp
)
# ##################################################################################################
# * join benchmark --------------------------------------------------------------------------------
ConfigureBench(JOIN_BENCH join/left_join.cu join/conditional_join.cu)
ConfigureNVBench(JOIN_NVBENCH join/join.cu join/mixed_join.cu)
# ##################################################################################################
# * iterator benchmark ----------------------------------------------------------------------------
ConfigureBench(ITERATOR_BENCH iterator/iterator.cu)
# ##################################################################################################
# * search benchmark ------------------------------------------------------------------------------
ConfigureBench(SEARCH_BENCH search/search.cpp)
ConfigureNVBench(SEARCH_NVBENCH search/contains_scalar.cpp search/contains_table.cpp)
# ##################################################################################################
# * sort benchmark --------------------------------------------------------------------------------
ConfigureBench(SORT_BENCH sort/rank.cpp sort/sort.cpp sort/sort_strings.cpp)
ConfigureNVBench(
SORT_NVBENCH sort/rank_lists.cpp sort/rank_structs.cpp sort/segmented_sort.cpp
sort/sort_lists.cpp sort/sort_structs.cpp
)
# ##################################################################################################
# * quantiles benchmark
# --------------------------------------------------------------------------------
ConfigureBench(QUANTILES_BENCH quantiles/quantiles.cpp)
# ##################################################################################################
# * type_dispatcher benchmark ---------------------------------------------------------------------
ConfigureBench(TYPE_DISPATCHER_BENCH type_dispatcher/type_dispatcher.cu)
# ##################################################################################################
# * reduction benchmark ---------------------------------------------------------------------------
ConfigureBench(
REDUCTION_BENCH reduction/anyall.cpp reduction/dictionary.cpp reduction/minmax.cpp
reduction/reduce.cpp reduction/scan.cpp
)
ConfigureNVBench(
REDUCTION_NVBENCH reduction/rank.cpp reduction/scan_structs.cpp reduction/segmented_reduce.cpp
)
# ##################################################################################################
# * reduction benchmark ---------------------------------------------------------------------------
ConfigureBench(REPLACE_BENCH replace/clamp.cpp replace/nans.cpp)
# ##################################################################################################
# * filling benchmark -----------------------------------------------------------------------------
ConfigureBench(FILL_BENCH filling/repeat.cpp)
# ##################################################################################################
# * groupby benchmark -----------------------------------------------------------------------------
ConfigureBench(
GROUPBY_BENCH groupby/group_sum.cpp groupby/group_nth.cpp groupby/group_shift.cpp
groupby/group_struct_values.cpp groupby/group_no_requests.cpp groupby/group_scan.cpp
)
ConfigureNVBench(
GROUPBY_NVBENCH groupby/group_max.cpp groupby/group_nunique.cpp groupby/group_rank.cpp
groupby/group_struct_keys.cpp
)
# ##################################################################################################
# * hashing benchmark -----------------------------------------------------------------------------
ConfigureBench(HASHING_BENCH hashing/partition.cpp)
ConfigureNVBench(HASHING_NVBENCH hashing/hash.cpp)
# ##################################################################################################
# * merge benchmark -------------------------------------------------------------------------------
ConfigureBench(MERGE_BENCH merge/merge.cpp)
ConfigureNVBench(MERGE_NVBENCH merge/merge_structs.cpp merge/merge_lists.cpp)
# ##################################################################################################
# * null_mask benchmark ---------------------------------------------------------------------------
ConfigureBench(NULLMASK_BENCH null_mask/set_null_mask.cpp)
# ##################################################################################################
# * parquet writer benchmark ----------------------------------------------------------------------
ConfigureNVBench(
PARQUET_WRITER_NVBENCH io/parquet/parquet_writer.cpp io/parquet/parquet_writer_chunks.cpp
)
# ##################################################################################################
# * parquet reader benchmark ----------------------------------------------------------------------
ConfigureNVBench(
PARQUET_READER_NVBENCH io/parquet/parquet_reader_input.cpp io/parquet/parquet_reader_options.cpp
)
# ##################################################################################################
# * orc reader benchmark --------------------------------------------------------------------------
ConfigureNVBench(ORC_READER_NVBENCH io/orc/orc_reader_input.cpp io/orc/orc_reader_options.cpp)
# ##################################################################################################
# * csv reader benchmark --------------------------------------------------------------------------
ConfigureNVBench(CSV_READER_NVBENCH io/csv/csv_reader_input.cpp io/csv/csv_reader_options.cpp)
# ##################################################################################################
# * orc writer benchmark --------------------------------------------------------------------------
ConfigureNVBench(ORC_WRITER_NVBENCH io/orc/orc_writer.cpp io/orc/orc_writer_chunks.cpp)
# ##################################################################################################
# * csv writer benchmark --------------------------------------------------------------------------
ConfigureNVBench(CSV_WRITER_NVBENCH io/csv/csv_writer.cpp)
# ##################################################################################################
# * ast benchmark ---------------------------------------------------------------------------------
ConfigureBench(AST_BENCH ast/transform.cpp)
# ##################################################################################################
# * binaryop benchmark ----------------------------------------------------------------------------
ConfigureBench(BINARYOP_BENCH binaryop/binaryop.cpp binaryop/compiled_binaryop.cpp)
# ##################################################################################################
# * nvtext benchmark -------------------------------------------------------------------
ConfigureBench(TEXT_BENCH text/ngrams.cpp text/subword.cpp)
ConfigureNVBench(
TEXT_NVBENCH text/edit_distance.cpp text/hash_ngrams.cpp text/jaccard.cpp text/minhash.cpp
text/normalize.cpp text/replace.cpp text/tokenize.cpp text/vocab.cpp
)
# ##################################################################################################
# * strings benchmark -------------------------------------------------------------------
ConfigureBench(
STRINGS_BENCH
string/combine.cpp
string/convert_datetime.cpp
string/convert_durations.cpp
string/convert_fixed_point.cpp
string/convert_numerics.cpp
string/copy.cu
string/factory.cu
string/filter.cpp
string/find.cpp
string/repeat_strings.cpp
string/replace.cpp
string/slice.cpp
string/translate.cpp
string/url_decode.cu
)
ConfigureNVBench(
STRINGS_NVBENCH
string/case.cpp
string/char_types.cpp
string/contains.cpp
string/count.cpp
string/extract.cpp
string/gather.cpp
string/join_strings.cpp
string/lengths.cpp
string/like.cpp
string/replace_re.cpp
string/reverse.cpp
string/split.cpp
string/split_re.cpp
)
# ##################################################################################################
# * json benchmark -------------------------------------------------------------------
ConfigureBench(JSON_BENCH json/json.cu)
ConfigureNVBench(FST_NVBENCH io/fst.cu)
ConfigureNVBench(JSON_READER_NVBENCH io/json/nested_json.cpp io/json/json_reader_input.cpp)
ConfigureNVBench(JSON_WRITER_NVBENCH io/json/json_writer.cpp)
# ##################################################################################################
# * io benchmark ---------------------------------------------------------------------
ConfigureNVBench(MULTIBYTE_SPLIT_NVBENCH io/text/multibyte_split.cpp)
target_link_libraries(MULTIBYTE_SPLIT_NVBENCH PRIVATE ZLIB::ZLIB)
add_custom_target(
run_benchmarks
DEPENDS CUDF_BENCHMARKS
COMMENT "Custom command for running cudf benchmarks."
)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/replace/nans.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/replace.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
class ReplaceNans : public cudf::benchmark {};
template <typename type>
static void BM_replace_nans(benchmark::State& state, bool include_nulls)
{
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
auto const input = create_random_column(dtype, row_count{n_rows});
if (!include_nulls) input->set_null_mask(rmm::device_buffer{}, 0);
auto zero = cudf::make_fixed_width_scalar<type>(0);
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::replace_nans(*input, *zero);
}
}
#define NANS_BENCHMARK_DEFINE(name, type, nulls) \
BENCHMARK_DEFINE_F(ReplaceNans, name) \
(::benchmark::State & state) { BM_replace_nans<type>(state, nulls); } \
BENCHMARK_REGISTER_F(ReplaceNans, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
NANS_BENCHMARK_DEFINE(float32_nulls, float, true);
NANS_BENCHMARK_DEFINE(float64_nulls, double, true);
NANS_BENCHMARK_DEFINE(float32_no_nulls, float, false);
NANS_BENCHMARK_DEFINE(float64_no_nulls, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/replace/clamp.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
class ReplaceClamp : public cudf::benchmark {};
template <typename type>
static void BM_clamp(benchmark::State& state, bool include_nulls)
{
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
auto const input = create_random_column(dtype, row_count{n_rows});
if (!include_nulls) input->set_null_mask(rmm::device_buffer{}, 0);
auto [low_scalar, high_scalar] = cudf::minmax(*input);
// set the clamps 2 in from the min and max
{
using ScalarType = cudf::scalar_type_t<type>;
auto lvalue = static_cast<ScalarType*>(low_scalar.get());
auto hvalue = static_cast<ScalarType*>(high_scalar.get());
// super heavy clamp
auto mid = lvalue->value() + (hvalue->value() - lvalue->value()) / 2;
lvalue->set_value(mid - 10);
hvalue->set_value(mid + 10);
}
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::clamp(*input, *low_scalar, *high_scalar);
}
}
#define CLAMP_BENCHMARK_DEFINE(name, type, nulls) \
BENCHMARK_DEFINE_F(ReplaceClamp, name) \
(::benchmark::State & state) { BM_clamp<type>(state, nulls); } \
BENCHMARK_REGISTER_F(ReplaceClamp, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
CLAMP_BENCHMARK_DEFINE(int8_no_nulls, int8_t, false);
CLAMP_BENCHMARK_DEFINE(int32_no_nulls, int32_t, false);
CLAMP_BENCHMARK_DEFINE(uint64_no_nulls, uint64_t, false);
CLAMP_BENCHMARK_DEFINE(float_no_nulls, float, false);
CLAMP_BENCHMARK_DEFINE(int16_nulls, int16_t, true);
CLAMP_BENCHMARK_DEFINE(uint32_nulls, uint32_t, true);
CLAMP_BENCHMARK_DEFINE(double_nulls, double, true);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/binaryop/compiled_binaryop.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/binaryop.hpp>
class COMPILED_BINARYOP : public cudf::benchmark {};
template <typename TypeLhs, typename TypeRhs, typename TypeOut>
void BM_compiled_binaryop(benchmark::State& state, cudf::binary_operator binop)
{
auto const column_size{static_cast<cudf::size_type>(state.range(0))};
auto const source_table = create_random_table(
{cudf::type_to_id<TypeLhs>(), cudf::type_to_id<TypeRhs>()}, row_count{column_size});
auto lhs = cudf::column_view(source_table->get_column(0));
auto rhs = cudf::column_view(source_table->get_column(1));
auto output_dtype = cudf::data_type(cudf::type_to_id<TypeOut>());
// Call once for hot cache.
cudf::binary_operation(lhs, rhs, binop, output_dtype);
for (auto _ : state) {
cuda_event_timer timer(state, true);
cudf::binary_operation(lhs, rhs, binop, output_dtype);
}
// use number of bytes read and written to global memory
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * column_size *
(sizeof(TypeLhs) + sizeof(TypeRhs) + sizeof(TypeOut)));
}
// TODO tparam boolean for null.
#define BM_BINARYOP_BENCHMARK_DEFINE(name, lhs, rhs, bop, tout) \
BENCHMARK_DEFINE_F(COMPILED_BINARYOP, name) \
(::benchmark::State & st) \
{ \
BM_compiled_binaryop<lhs, rhs, tout>(st, cudf::binary_operator::bop); \
} \
BENCHMARK_REGISTER_F(COMPILED_BINARYOP, name) \
->Unit(benchmark::kMicrosecond) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
#define build_name(a, b, c, d) a##_##b##_##c##_##d
#define BINARYOP_BENCHMARK_DEFINE(lhs, rhs, bop, tout) \
BM_BINARYOP_BENCHMARK_DEFINE(build_name(bop, lhs, rhs, tout), lhs, rhs, bop, tout)
using cudf::duration_D;
using cudf::duration_ms;
using cudf::duration_ns;
using cudf::duration_s;
using cudf::timestamp_D;
using cudf::timestamp_ms;
using cudf::timestamp_s;
using numeric::decimal32;
// clang-format off
BINARYOP_BENCHMARK_DEFINE(float, int64_t, ADD, int32_t);
BINARYOP_BENCHMARK_DEFINE(float, float, ADD, float);
BINARYOP_BENCHMARK_DEFINE(timestamp_s, duration_s, ADD, timestamp_s);
BINARYOP_BENCHMARK_DEFINE(duration_s, duration_D, SUB, duration_ms);
BINARYOP_BENCHMARK_DEFINE(int64_t, int64_t, SUB, int64_t);
BINARYOP_BENCHMARK_DEFINE(float, float, MUL, int64_t);
BINARYOP_BENCHMARK_DEFINE(duration_s, int64_t, MUL, duration_s);
BINARYOP_BENCHMARK_DEFINE(int64_t, int64_t, DIV, int64_t);
BINARYOP_BENCHMARK_DEFINE(duration_ms, int32_t, DIV, duration_ms);
BINARYOP_BENCHMARK_DEFINE(int64_t, int64_t, TRUE_DIV, int64_t);
BINARYOP_BENCHMARK_DEFINE(int64_t, int64_t, FLOOR_DIV, int64_t);
BINARYOP_BENCHMARK_DEFINE(double, double, MOD, double);
BINARYOP_BENCHMARK_DEFINE(duration_ms, int64_t, MOD, duration_ms);
BINARYOP_BENCHMARK_DEFINE(int32_t, int64_t, PMOD, double);
BINARYOP_BENCHMARK_DEFINE(int32_t, uint8_t, PYMOD, int64_t);
BINARYOP_BENCHMARK_DEFINE(int64_t, int64_t, POW, double);
BINARYOP_BENCHMARK_DEFINE(float, double, LOG_BASE, double);
BINARYOP_BENCHMARK_DEFINE(float, double, ATAN2, double);
BINARYOP_BENCHMARK_DEFINE(int, int, SHIFT_LEFT, int);
BINARYOP_BENCHMARK_DEFINE(int16_t, int64_t, SHIFT_RIGHT, int);
BINARYOP_BENCHMARK_DEFINE(int64_t, int32_t, SHIFT_RIGHT_UNSIGNED, int64_t);
BINARYOP_BENCHMARK_DEFINE(int64_t, int32_t, BITWISE_AND, int16_t);
BINARYOP_BENCHMARK_DEFINE(int16_t, int32_t, BITWISE_OR, int64_t);
BINARYOP_BENCHMARK_DEFINE(int16_t, int64_t, BITWISE_XOR, int32_t);
BINARYOP_BENCHMARK_DEFINE(double, int8_t, LOGICAL_AND, bool);
BINARYOP_BENCHMARK_DEFINE(int16_t, int64_t, LOGICAL_OR, bool);
BINARYOP_BENCHMARK_DEFINE(int32_t, int64_t, EQUAL, bool);
BINARYOP_BENCHMARK_DEFINE(duration_ms, duration_ns, EQUAL, bool);
BINARYOP_BENCHMARK_DEFINE(decimal32, decimal32, NOT_EQUAL, bool);
BINARYOP_BENCHMARK_DEFINE(timestamp_s, timestamp_s, LESS, bool);
BINARYOP_BENCHMARK_DEFINE(timestamp_ms, timestamp_s, GREATER, bool);
BINARYOP_BENCHMARK_DEFINE(duration_ms, duration_ns, NULL_EQUALS, bool);
BINARYOP_BENCHMARK_DEFINE(decimal32, decimal32, NULL_MAX, decimal32);
BINARYOP_BENCHMARK_DEFINE(timestamp_D, timestamp_s, NULL_MIN, timestamp_s);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/binaryop/binaryop.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/binaryop.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <algorithm>
#include <vector>
// This set of benchmarks is designed to be a comparison for the AST benchmarks
enum class TreeType {
IMBALANCED_LEFT // All operator expressions have a left child operator expression and a right
// child column reference
};
template <typename key_type, TreeType tree_type, bool reuse_columns>
class BINARYOP : public cudf::benchmark {};
template <typename key_type, TreeType tree_type, bool reuse_columns>
static void BM_binaryop_transform(benchmark::State& state)
{
auto const table_size{static_cast<cudf::size_type>(state.range(0))};
auto const tree_levels{static_cast<cudf::size_type>(state.range(1))};
// Create table data
auto const n_cols = reuse_columns ? 1 : tree_levels + 1;
auto const source_table = create_sequence_table(
cycle_dtypes({cudf::type_to_id<key_type>()}, n_cols), row_count{table_size});
cudf::table_view table{*source_table};
// Execute benchmark
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
// Execute tree that chains additions like (((a + b) + c) + d)
auto const op = cudf::binary_operator::ADD;
auto const result_data_type = cudf::data_type(cudf::type_to_id<key_type>());
if (reuse_columns) {
auto result = cudf::binary_operation(table.column(0), table.column(0), op, result_data_type);
for (cudf::size_type i = 0; i < tree_levels - 1; i++) {
result = cudf::binary_operation(result->view(), table.column(0), op, result_data_type);
}
} else {
auto result = cudf::binary_operation(table.column(0), table.column(1), op, result_data_type);
std::for_each(std::next(table.begin(), 2), table.end(), [&](auto const& col) {
result = cudf::binary_operation(result->view(), col, op, result_data_type);
});
}
}
// Use the number of bytes read from global memory
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * state.range(0) *
(tree_levels + 1) * sizeof(key_type));
}
#define BINARYOP_TRANSFORM_BENCHMARK_DEFINE(name, key_type, tree_type, reuse_columns) \
BENCHMARK_TEMPLATE_DEFINE_F(BINARYOP, name, key_type, tree_type, reuse_columns) \
(::benchmark::State & st) { BM_binaryop_transform<key_type, tree_type, reuse_columns>(st); }
BINARYOP_TRANSFORM_BENCHMARK_DEFINE(binaryop_int32_imbalanced_unique,
int32_t,
TreeType::IMBALANCED_LEFT,
false);
BINARYOP_TRANSFORM_BENCHMARK_DEFINE(binaryop_int32_imbalanced_reuse,
int32_t,
TreeType::IMBALANCED_LEFT,
true);
BINARYOP_TRANSFORM_BENCHMARK_DEFINE(binaryop_double_imbalanced_unique,
double,
TreeType::IMBALANCED_LEFT,
false);
static void CustomRanges(benchmark::internal::Benchmark* b)
{
auto row_counts = std::vector<cudf::size_type>{100'000, 1'000'000, 10'000'000, 100'000'000};
auto operation_counts = std::vector<cudf::size_type>{1, 2, 5, 10};
for (auto const& row_count : row_counts) {
for (auto const& operation_count : operation_counts) {
b->Args({row_count, operation_count});
}
}
}
BENCHMARK_REGISTER_F(BINARYOP, binaryop_int32_imbalanced_unique)
->Apply(CustomRanges)
->Unit(benchmark::kMillisecond)
->UseManualTime();
BENCHMARK_REGISTER_F(BINARYOP, binaryop_int32_imbalanced_reuse)
->Apply(CustomRanges)
->Unit(benchmark::kMillisecond)
->UseManualTime();
BENCHMARK_REGISTER_F(BINARYOP, binaryop_double_imbalanced_unique)
->Apply(CustomRanges)
->Unit(benchmark::kMillisecond)
->UseManualTime();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/hashing/hash.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/hashing.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
#include <optional>
static void bench_hash(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const nulls = state.get_float64("nulls");
// disable null bitmask if probability is exactly 0.0
bool const no_nulls = nulls == 0.0;
auto const hash_name = state.get_string("hash_name");
data_profile const profile =
data_profile_builder().null_probability(no_nulls ? std::nullopt : std::optional<double>{nulls});
auto const data = create_random_table(
{cudf::type_id::INT64, cudf::type_id::STRING}, row_count{num_rows}, profile);
auto stream = cudf::get_default_stream();
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
// collect statistics
cudf::strings_column_view input(data->get_column(1).view());
auto const chars_size = input.chars_size();
// add memory read from string column
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
// add memory read from int64_t column
state.add_global_memory_reads<nvbench::int64_t>(num_rows);
// add memory read from bitmaks
if (!no_nulls) {
state.add_global_memory_reads<nvbench::int8_t>(2 *
cudf::bitmask_allocation_size_bytes(num_rows));
}
// memory written depends on used hash
if (hash_name == "murmurhash3_x86_32") {
state.add_global_memory_writes<nvbench::uint32_t>(num_rows);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::hashing::murmurhash3_x86_32(data->view());
});
} else if (hash_name == "md5") {
// md5 creates a 32-byte string
state.add_global_memory_writes<nvbench::int8_t>(32 * num_rows);
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = cudf::hashing::md5(data->view()); });
} else if (hash_name == "spark_murmurhash3_x86_32") {
state.add_global_memory_writes<nvbench::int32_t>(num_rows);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::hashing::spark_murmurhash3_x86_32(data->view());
});
} else {
state.skip(hash_name + ": unknown hash name");
}
}
NVBENCH_BENCH(bench_hash)
.set_name("hashing")
.add_int64_axis("num_rows", {65536, 16777216})
.add_float64_axis("nulls", {0.0, 0.1})
.add_string_axis("hash_name", {"murmurhash3_x86_32", "md5", "spark_murmurhash3_x86_32"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/hashing/partition.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/partitioning.hpp>
#include <algorithm>
class Hashing : public cudf::benchmark {};
template <class T>
void BM_hash_partition(benchmark::State& state)
{
auto const num_rows = state.range(0);
auto const num_cols = state.range(1);
auto const num_partitions = state.range(2);
// Create owning columns
auto input_table = create_sequence_table(cycle_dtypes({cudf::type_to_id<T>()}, num_cols),
row_count{static_cast<cudf::size_type>(num_rows)});
auto input = cudf::table_view(*input_table);
auto columns_to_hash = std::vector<cudf::size_type>(num_cols);
std::iota(columns_to_hash.begin(), columns_to_hash.end(), 0);
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto output = cudf::hash_partition(input, columns_to_hash, num_partitions);
}
auto const bytes_read = num_rows * num_cols * sizeof(T);
auto const bytes_written = num_rows * num_cols * sizeof(T);
auto const partition_bytes = num_partitions * sizeof(cudf::size_type);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(bytes_read + bytes_written + partition_bytes));
}
BENCHMARK_DEFINE_F(Hashing, hash_partition)
(::benchmark::State& state) { BM_hash_partition<double>(state); }
static void CustomRanges(benchmark::internal::Benchmark* b)
{
for (int columns = 1; columns <= 256; columns *= 16) {
for (int partitions = 64; partitions <= 1024; partitions *= 2) {
for (int rows = 1 << 17; rows <= 1 << 21; rows *= 2) {
b->Args({rows, columns, partitions});
}
}
}
}
BENCHMARK_REGISTER_F(Hashing, hash_partition)
->Apply(CustomRanges)
->Unit(benchmark::kMillisecond)
->UseManualTime();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/json/json.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/json/json.hpp>
#include <cudf/strings/detail/strings_children.cuh>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/string_view.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <thrust/random.h>
class JsonPath : public cudf::benchmark {};
std::vector<std::string> const Books{
R"json({
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
})json",
R"json({
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
})json",
R"json({
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99
})json",
R"json({
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99
})json"};
constexpr int Approx_book_size = 110;
std::vector<std::string> const Bicycles{
R"json({"color": "red", "price": 9.95})json",
R"json({"color": "green", "price": 29.95})json",
R"json({"color": "blue", "price": 399.95})json",
R"json({"color": "yellow", "price": 99.95})json",
R"json({"color": "mauve", "price": 199.95})json",
};
constexpr int Approx_bicycle_size = 33;
std::string Misc{"\n\"expensive\": 10\n"};
struct json_benchmark_row_builder {
int const desired_bytes;
cudf::size_type const num_rows;
cudf::column_device_view const d_books_bicycles[2]; // Books, Bicycles strings
cudf::column_device_view const d_book_pct; // Book percentage
cudf::column_device_view const d_misc_order; // Misc-Store order
cudf::column_device_view const d_store_order; // Books-Bicycles order
int32_t* d_offsets{};
char* d_chars{};
thrust::minstd_rand rng{5236};
thrust::uniform_int_distribution<int> dist{};
// internal data structure for {bytes, out_ptr} with operator+=
struct bytes_and_ptr {
cudf::size_type bytes;
char* ptr;
__device__ bytes_and_ptr& operator+=(cudf::string_view const& str_append)
{
bytes += str_append.size_bytes();
if (ptr) { ptr = cudf::strings::detail::copy_string(ptr, str_append); }
return *this;
}
};
__device__ inline void copy_items(int this_idx,
cudf::size_type num_items,
bytes_and_ptr& output_str)
{
using param_type = thrust::uniform_int_distribution<int>::param_type;
dist.param(param_type{0, d_books_bicycles[this_idx].size() - 1});
cudf::string_view comma(",\n", 2);
for (int i = 0; i < num_items; i++) {
if (i > 0) { output_str += comma; }
int idx = dist(rng);
auto item = d_books_bicycles[this_idx].element<cudf::string_view>(idx);
output_str += item;
}
}
__device__ void operator()(cudf::size_type idx)
{
int num_books = 2;
int num_bicycles = 2;
int remaining_bytes = max(
0, desired_bytes - ((num_books * Approx_book_size) + (num_bicycles * Approx_bicycle_size)));
// divide up the remainder between books and bikes
auto book_pct = d_book_pct.element<float>(idx);
// {Misc, store} OR {store, Misc}
// store: {books, bicycles} OR store: {bicycles, books}
float bicycle_pct = 1.0f - book_pct;
num_books += (remaining_bytes * book_pct) / Approx_book_size;
num_bicycles += (remaining_bytes * bicycle_pct) / Approx_bicycle_size;
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
bytes_and_ptr output_str{0, out_ptr};
//
cudf::string_view comma(",\n", 2);
cudf::string_view brace1("{\n", 2);
cudf::string_view store_member_start[2]{{"\"book\": [\n", 10}, {"\"bicycle\": [\n", 13}};
cudf::string_view store("\"store\": {\n", 11);
cudf::string_view Misc{"\"expensive\": 10", 15};
cudf::string_view brace2("\n}", 2);
cudf::string_view square2{"\n]", 2};
output_str += brace1;
if (d_misc_order.element<bool>(idx)) { // Misc. first.
output_str += Misc;
output_str += comma;
}
output_str += store;
for (int store_order = 0; store_order < 2; store_order++) {
if (store_order > 0) { output_str += comma; }
int this_idx = (d_store_order.element<bool>(idx) == store_order);
auto& mem_start = store_member_start[this_idx];
output_str += mem_start;
copy_items(this_idx, this_idx == 0 ? num_books : num_bicycles, output_str);
output_str += square2;
}
output_str += brace2;
if (!d_misc_order.element<bool>(idx)) { // Misc, if not first.
output_str += comma;
output_str += Misc;
}
output_str += brace2;
if (!output_str.ptr) d_offsets[idx] = output_str.bytes;
}
};
auto build_json_string_column(int desired_bytes, int num_rows)
{
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_id::FLOAT32, distribution_id::UNIFORM, 0.0, 1.0);
auto float_2bool_columns =
create_random_table({cudf::type_id::FLOAT32, cudf::type_id::BOOL8, cudf::type_id::BOOL8},
row_count{num_rows},
profile);
cudf::test::strings_column_wrapper books(Books.begin(), Books.end());
cudf::test::strings_column_wrapper bicycles(Bicycles.begin(), Bicycles.end());
auto d_books = cudf::column_device_view::create(books);
auto d_bicycles = cudf::column_device_view::create(bicycles);
auto d_book_pct = cudf::column_device_view::create(float_2bool_columns->get_column(0));
auto d_misc_order = cudf::column_device_view::create(float_2bool_columns->get_column(1));
auto d_store_order = cudf::column_device_view::create(float_2bool_columns->get_column(2));
json_benchmark_row_builder jb{
desired_bytes, num_rows, {*d_books, *d_bicycles}, *d_book_pct, *d_misc_order, *d_store_order};
auto children = cudf::strings::detail::make_strings_children(
jb, num_rows, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
return cudf::make_strings_column(
num_rows, std::move(children.first), std::move(children.second), 0, {});
}
void BM_case(benchmark::State& state, std::string query_arg)
{
srand(5236);
int num_rows = state.range(0);
int desired_bytes = state.range(1);
auto input = build_json_string_column(desired_bytes, num_rows);
cudf::strings_column_view scv(input->view());
size_t num_chars = scv.chars().size();
std::string json_path(query_arg);
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto result = cudf::get_json_object(scv, json_path);
CUDF_CUDA_TRY(cudaStreamSynchronize(0));
}
// this isn't strictly 100% accurate. a given query isn't necessarily
// going to visit every single incoming character. but in spirit it does.
state.SetBytesProcessed(state.iterations() * num_chars);
}
#define JSON_BENCHMARK_DEFINE(name, query) \
BENCHMARK_DEFINE_F(JsonPath, name)(::benchmark::State & state) { BM_case(state, query); } \
BENCHMARK_REGISTER_F(JsonPath, name) \
->ArgsProduct({{100, 1000, 100000, 400000}, {300, 600, 4096}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
JSON_BENCHMARK_DEFINE(query0, "$");
JSON_BENCHMARK_DEFINE(query1, "$.store");
JSON_BENCHMARK_DEFINE(query2, "$.store.book");
JSON_BENCHMARK_DEFINE(query3, "$.store.*");
JSON_BENCHMARK_DEFINE(query4, "$.store.book[*]");
JSON_BENCHMARK_DEFINE(query5, "$.store.book[*].category");
JSON_BENCHMARK_DEFINE(query6, "$.store['bicycle']");
JSON_BENCHMARK_DEFINE(query7, "$.store.book[*]['isbn']");
JSON_BENCHMARK_DEFINE(query8, "$.store.bicycle[1]");
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/lists/set_operations.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/lists/set_operations.hpp>
#include <nvbench/nvbench.cuh>
namespace {
constexpr auto max_list_size = 20;
auto generate_random_lists(cudf::size_type num_rows, cudf::size_type depth, double null_freq)
{
auto builder =
data_profile_builder()
.cardinality(0)
.distribution(cudf::type_id::LIST, distribution_id::UNIFORM, 0, max_list_size)
.list_depth(depth)
.null_probability(null_freq > 0 ? std::optional<double>{null_freq} : std::nullopt);
auto data_table =
create_random_table({cudf::type_id::LIST}, row_count{num_rows}, data_profile{builder});
return std::move(data_table->release().front());
}
template <typename BenchFuncPtr>
void nvbench_set_op(nvbench::state& state, BenchFuncPtr bfunc)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const depth = static_cast<cudf::size_type>(state.get_int64("depth"));
auto const null_freq = state.get_float64("null_frequency");
auto const lhs = generate_random_lists(num_rows, depth, null_freq);
auto const rhs = generate_random_lists(num_rows, depth, null_freq);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
bfunc(cudf::lists_column_view{*lhs},
cudf::lists_column_view{*rhs},
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL,
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
});
}
} // namespace
void nvbench_have_overlap(nvbench::state& state)
{
nvbench_set_op(state, &cudf::lists::have_overlap);
}
void nvbench_intersect_distinct(nvbench::state& state)
{
nvbench_set_op(state, &cudf::lists::intersect_distinct);
}
NVBENCH_BENCH(nvbench_have_overlap)
.set_name("have_overlap")
.add_int64_power_of_two_axis("num_rows", {10, 13, 16})
.add_int64_axis("depth", {1, 4})
.add_float64_axis("null_frequency", {0, 0.2, 0.8});
NVBENCH_BENCH(nvbench_intersect_distinct)
.set_name("intersect_distinct")
.add_int64_power_of_two_axis("num_rows", {10, 13, 16})
.add_int64_axis("depth", {1, 4})
.add_float64_axis("null_frequency", {0, 0.2, 0.8});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/lists
|
rapidsai_public_repos/cudf/cpp/benchmarks/lists/copying/scatter_lists.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/shuffle.h>
#include <cmath>
class ScatterLists : public cudf::benchmark {};
template <class TypeParam, bool coalesce>
void BM_lists_scatter(::benchmark::State& state)
{
auto stream = cudf::get_default_stream();
auto mr = rmm::mr::get_current_device_resource();
cudf::size_type const base_size{(cudf::size_type)state.range(0)};
cudf::size_type const num_elements_per_row{(cudf::size_type)state.range(1)};
auto const num_rows = (cudf::size_type)ceil(double(base_size) / num_elements_per_row);
auto source_base_col = make_fixed_width_column(cudf::data_type{cudf::type_to_id<TypeParam>()},
base_size,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto target_base_col = make_fixed_width_column(cudf::data_type{cudf::type_to_id<TypeParam>()},
base_size,
cudf::mask_state::UNALLOCATED,
stream,
mr);
thrust::sequence(rmm::exec_policy(stream),
source_base_col->mutable_view().begin<TypeParam>(),
source_base_col->mutable_view().end<TypeParam>());
thrust::sequence(rmm::exec_policy(stream),
target_base_col->mutable_view().begin<TypeParam>(),
target_base_col->mutable_view().end<TypeParam>());
auto source_offsets =
make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
num_rows + 1,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto target_offsets =
make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
num_rows + 1,
cudf::mask_state::UNALLOCATED,
stream,
mr);
thrust::sequence(rmm::exec_policy(stream),
source_offsets->mutable_view().begin<cudf::size_type>(),
source_offsets->mutable_view().end<cudf::size_type>(),
0,
num_elements_per_row);
thrust::sequence(rmm::exec_policy(stream),
target_offsets->mutable_view().begin<cudf::size_type>(),
target_offsets->mutable_view().end<cudf::size_type>(),
0,
num_elements_per_row);
auto source = make_lists_column(num_rows,
std::move(source_offsets),
std::move(source_base_col),
0,
cudf::create_null_mask(num_rows, cudf::mask_state::UNALLOCATED),
stream,
mr);
auto target = make_lists_column(num_rows,
std::move(target_offsets),
std::move(target_base_col),
0,
cudf::create_null_mask(num_rows, cudf::mask_state::UNALLOCATED),
stream,
mr);
auto scatter_map = make_fixed_width_column(cudf::data_type{cudf::type_to_id<cudf::size_type>()},
num_rows,
cudf::mask_state::UNALLOCATED,
stream,
mr);
auto m_scatter_map = scatter_map->mutable_view();
thrust::sequence(rmm::exec_policy(stream),
m_scatter_map.begin<cudf::size_type>(),
m_scatter_map.end<cudf::size_type>(),
num_rows - 1,
-1);
if (not coalesce) {
thrust::default_random_engine g;
thrust::shuffle(rmm::exec_policy(stream),
m_scatter_map.begin<cudf::size_type>(),
m_scatter_map.begin<cudf::size_type>(),
g);
}
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
scatter(cudf::table_view{{*source}},
*scatter_map,
cudf::table_view{{*target}},
cudf::get_default_stream(),
mr);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * state.range(0) * 2 *
sizeof(TypeParam));
}
#define SBM_BENCHMARK_DEFINE(name, type, coalesce) \
BENCHMARK_DEFINE_F(ScatterLists, name)(::benchmark::State & state) \
{ \
BM_lists_scatter<type, coalesce>(state); \
} \
BENCHMARK_REGISTER_F(ScatterLists, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 10, 1 << 25}, {64, 2048}}) /* 1K-1B rows, 64-2048 elements */ \
->UseManualTime();
SBM_BENCHMARK_DEFINE(double_type_colesce_o, double, true);
SBM_BENCHMARK_DEFINE(double_type_colesce_x, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/filling/repeat.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/filling.hpp>
class Repeat : public cudf::benchmark {};
template <class TypeParam, bool nulls>
void BM_repeat(benchmark::State& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto const n_cols = static_cast<cudf::size_type>(state.range(1));
auto const input_table =
create_sequence_table(cycle_dtypes({cudf::type_to_id<TypeParam>()}, n_cols),
row_count{n_rows},
nulls ? std::optional<double>{1.0} : std::nullopt);
// Create table view
auto input = cudf::table_view(*input_table);
// repeat counts
using sizeT = cudf::size_type;
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<sizeT>(), distribution_id::UNIFORM, 0, 3);
auto repeat_count = create_random_column(cudf::type_to_id<sizeT>(), row_count{n_rows}, profile);
// warm up
auto output = cudf::repeat(input, *repeat_count);
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::repeat(input, *repeat_count);
}
auto data_bytes =
(input.num_columns() * input.num_rows() + output->num_columns() * output->num_rows()) *
sizeof(TypeParam);
auto null_bytes =
nulls ? input.num_columns() * cudf::bitmask_allocation_size_bytes(input.num_rows()) +
output->num_columns() * cudf::bitmask_allocation_size_bytes(output->num_rows())
: 0;
state.SetBytesProcessed(state.iterations() * (data_bytes + null_bytes));
}
#define REPEAT_BENCHMARK_DEFINE(name, type, nulls) \
BENCHMARK_DEFINE_F(Repeat, name)(::benchmark::State & state) { BM_repeat<type, nulls>(state); } \
BENCHMARK_REGISTER_F(Repeat, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 10, 1 << 26}, {1, 8}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
REPEAT_BENCHMARK_DEFINE(double_nulls, double, true);
REPEAT_BENCHMARK_DEFINE(double_no_nulls, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/iterator/iterator.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/device_operators.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <cub/device/device_reduce.cuh>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <random>
template <typename T>
T random_int(T min, T max)
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<T> uniform{min, max};
return uniform(engine);
}
// -----------------------------------------------------------------------------
template <typename InputIterator, typename OutputIterator, typename T>
inline auto reduce_by_cub(OutputIterator result, InputIterator d_in, int num_items, T init)
{
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Reduce(
nullptr, temp_storage_bytes, d_in, result, num_items, cudf::DeviceSum{}, init);
// Allocate temporary storage
rmm::device_buffer d_temp_storage(temp_storage_bytes, cudf::get_default_stream());
// Run reduction
cub::DeviceReduce::Reduce(
d_temp_storage.data(), temp_storage_bytes, d_in, result, num_items, cudf::DeviceSum{}, init);
return temp_storage_bytes;
}
// -----------------------------------------------------------------------------
template <typename T>
void raw_stream_bench_cub(cudf::column_view& col, rmm::device_uvector<T>& result)
{
// std::cout << "raw stream cub: " << "\t";
T init{0};
auto begin = col.data<T>();
int num_items = col.size();
reduce_by_cub(result.begin(), begin, num_items, init);
};
template <typename T, bool has_null>
void iterator_bench_cub(cudf::column_view& col, rmm::device_uvector<T>& result)
{
// std::cout << "iterator cub " << ( (has_null) ? "<true>: " : "<false>: " ) << "\t";
T init{0};
auto d_col = cudf::column_device_view::create(col);
int num_items = col.size();
if (has_null) {
auto begin = cudf::detail::make_null_replacement_iterator(*d_col, init);
reduce_by_cub(result.begin(), begin, num_items, init);
} else {
auto begin = d_col->begin<T>();
reduce_by_cub(result.begin(), begin, num_items, init);
}
}
// -----------------------------------------------------------------------------
template <typename T>
void raw_stream_bench_thrust(cudf::column_view& col, rmm::device_uvector<T>& result)
{
// std::cout << "raw stream thust: " << "\t\t";
T init{0};
auto d_in = col.data<T>();
auto d_end = d_in + col.size();
thrust::reduce(thrust::device, d_in, d_end, init, cudf::DeviceSum{});
}
template <typename T, bool has_null>
void iterator_bench_thrust(cudf::column_view& col, rmm::device_uvector<T>& result)
{
// std::cout << "iterator thust " << ( (has_null) ? "<true>: " : "<false>: " ) << "\t";
T init{0};
auto d_col = cudf::column_device_view::create(col);
if (has_null) {
auto d_in = cudf::detail::make_null_replacement_iterator(*d_col, init);
auto d_end = d_in + col.size();
thrust::reduce(thrust::device, d_in, d_end, init, cudf::DeviceSum{});
} else {
auto d_in = d_col->begin<T>();
auto d_end = d_in + col.size();
thrust::reduce(thrust::device, d_in, d_end, init, cudf::DeviceSum{});
}
}
// -----------------------------------------------------------------------------
class Iterator : public cudf::benchmark {};
template <class TypeParam, bool cub_or_thrust, bool raw_or_iterator>
void BM_iterator(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
using T = TypeParam;
auto num_gen = thrust::counting_iterator<cudf::size_type>(0);
cudf::test::fixed_width_column_wrapper<T> wrap_hasnull_F(num_gen, num_gen + column_size);
cudf::column_view hasnull_F = wrap_hasnull_F;
// Initialize dev_result to false
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<TypeParam>(
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
if (raw_or_iterator) {
raw_stream_bench_cub<T>(hasnull_F, dev_result); // driven by raw pointer
} else {
iterator_bench_cub<T, false>(hasnull_F, dev_result); // driven by riterator without nulls
}
} else {
if (raw_or_iterator) {
raw_stream_bench_thrust<T>(hasnull_F, dev_result); // driven by raw pointer
} else {
iterator_bench_thrust<T, false>(hasnull_F,
dev_result); // driven by riterator without nulls
}
}
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * column_size *
sizeof(TypeParam));
}
// operator+ defined for pair iterator reduction
template <typename T>
__device__ thrust::pair<T, bool> operator+(thrust::pair<T, bool> lhs, thrust::pair<T, bool> rhs)
{
return thrust::pair<T, bool>{lhs.first * lhs.second + rhs.first * rhs.second,
lhs.second + rhs.second};
}
// -----------------------------------------------------------------------------
template <typename T, bool has_null>
void pair_iterator_bench_cub(cudf::column_view& col,
rmm::device_uvector<thrust::pair<T, bool>>& result)
{
thrust::pair<T, bool> init{0, false};
auto d_col = cudf::column_device_view::create(col);
int num_items = col.size();
auto begin = d_col->pair_begin<T, has_null>();
reduce_by_cub(result.begin(), begin, num_items, init);
}
template <typename T, bool has_null>
void pair_iterator_bench_thrust(cudf::column_view& col,
rmm::device_uvector<thrust::pair<T, bool>>& result)
{
thrust::pair<T, bool> init{0, false};
auto d_col = cudf::column_device_view::create(col);
auto d_in = d_col->pair_begin<T, has_null>();
auto d_end = d_in + col.size();
thrust::reduce(thrust::device, d_in, d_end, init, cudf::DeviceSum{});
}
template <class TypeParam, bool cub_or_thrust>
void BM_pair_iterator(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
using T = TypeParam;
auto num_gen = thrust::counting_iterator<cudf::size_type>(0);
auto null_gen =
thrust::make_transform_iterator(num_gen, [](cudf::size_type row) { return row % 2 == 0; });
cudf::test::fixed_width_column_wrapper<T> wrap_hasnull_F(num_gen, num_gen + column_size);
cudf::test::fixed_width_column_wrapper<T> wrap_hasnull_T(
num_gen, num_gen + column_size, null_gen);
cudf::column_view hasnull_F = wrap_hasnull_F;
cudf::column_view hasnull_T = wrap_hasnull_T;
// Initialize dev_result to false
auto dev_result = cudf::detail::make_zeroed_device_uvector_sync<thrust::pair<T, bool>>(
1, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
if (cub_or_thrust) {
pair_iterator_bench_cub<T, false>(hasnull_T,
dev_result); // driven by pair iterator with nulls
} else {
pair_iterator_bench_thrust<T, false>(hasnull_T,
dev_result); // driven by pair iterator with nulls
}
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * column_size *
sizeof(TypeParam));
}
#define ITER_BM_BENCHMARK_DEFINE(name, type, cub_or_thrust, raw_or_iterator) \
BENCHMARK_DEFINE_F(Iterator, name)(::benchmark::State & state) \
{ \
BM_iterator<type, cub_or_thrust, raw_or_iterator>(state); \
} \
BENCHMARK_REGISTER_F(Iterator, name) \
->RangeMultiplier(10) \
->Range(1000, 10000000) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
ITER_BM_BENCHMARK_DEFINE(double_cub_raw, double, true, true);
ITER_BM_BENCHMARK_DEFINE(double_cub_iter, double, true, false);
ITER_BM_BENCHMARK_DEFINE(double_thrust_raw, double, false, true);
ITER_BM_BENCHMARK_DEFINE(double_thrust_iter, double, false, false);
#define PAIRITER_BM_BENCHMARK_DEFINE(name, type, cub_or_thrust) \
BENCHMARK_DEFINE_F(Iterator, name)(::benchmark::State & state) \
{ \
BM_pair_iterator<type, cub_or_thrust>(state); \
} \
BENCHMARK_REGISTER_F(Iterator, name) \
->RangeMultiplier(10) \
->Range(1000, 10000000) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
PAIRITER_BM_BENCHMARK_DEFINE(double_cub_pair, double, true);
PAIRITER_BM_BENCHMARK_DEFINE(double_thrust_pair, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/sort_strings.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/sorting.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
class Sort : public cudf::benchmark {};
static void BM_sort(benchmark::State& state)
{
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
auto const table = create_random_table({cudf::type_id::STRING}, row_count{n_rows});
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::sort(table->view());
}
}
#define SORT_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(Sort, name) \
(::benchmark::State & st) { BM_sort(st); } \
BENCHMARK_REGISTER_F(Sort, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 10, 1 << 24}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
SORT_BENCHMARK_DEFINE(strings)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/sort_lists.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/detail/sorting.hpp>
#include <nvbench/nvbench.cuh>
namespace {
constexpr cudf::size_type min_val = 0;
constexpr cudf::size_type max_val = 100;
void sort_multiple_lists(nvbench::state& state)
{
auto const num_columns = static_cast<cudf::size_type>(state.get_int64("num_columns"));
auto const input_table = create_lists_data(state, num_columns, min_val, max_val);
auto const stream = cudf::get_default_stream();
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::detail::sorted_order(
*input_table, {}, {}, stream, rmm::mr::get_current_device_resource());
});
}
void sort_lists_of_structs(nvbench::state& state)
{
auto const num_columns = static_cast<cudf::size_type>(state.get_int64("num_columns"));
auto const lists_table = create_lists_data(state, num_columns, min_val, max_val);
// After having a table of (multiple) lists columns, convert those lists columns into lists of
// structs columns. The children of these structs columns are also children of the original lists
// columns.
// Such resulted lists-of-structs columns are very similar to the original lists-of-integers
// columns so their benchmarks can be somewhat comparable.
std::vector<cudf::column_view> lists_of_structs;
for (auto const& col : lists_table->view()) {
auto const child = col.child(cudf::lists_column_view::child_column_index);
// Put the child column under a struct column having the same null mask/null count.
auto const new_child = cudf::column_view{cudf::data_type{cudf::type_id::STRUCT},
child.size(),
nullptr,
child.null_mask(),
child.null_count(),
child.offset(),
{child}};
auto const converted_col =
cudf::column_view{cudf::data_type{cudf::type_id::LIST},
col.size(),
nullptr,
col.null_mask(),
col.null_count(),
col.offset(),
{col.child(cudf::lists_column_view::offsets_column_index), new_child}};
lists_of_structs.push_back(converted_col);
}
auto const input_table = cudf::table_view{lists_of_structs};
auto const stream = cudf::get_default_stream();
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
cudf::detail::sorted_order(input_table, {}, {}, stream, rmm::mr::get_current_device_resource());
});
}
} // namespace
void nvbench_sort_lists(nvbench::state& state)
{
auto const has_lists_of_structs = state.get_int64("lists_of_structs") > 0;
if (has_lists_of_structs) {
sort_lists_of_structs(state);
} else {
sort_multiple_lists(state);
}
}
NVBENCH_BENCH(nvbench_sort_lists)
.set_name("sort_list")
.add_int64_power_of_two_axis("size_bytes", {10, 18, 24, 28})
.add_int64_axis("depth", {1, 4})
.add_int64_axis("num_columns", {1})
.add_int64_axis("lists_of_structs", {0, 1})
.add_float64_axis("null_frequency", {0, 0.2});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/rank_types_common.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <nvbench/nvbench.cuh>
enum class rank_method : int32_t {};
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::rank_method,
[](cudf::rank_method value) {
switch (value) {
case cudf::rank_method::FIRST: return "FIRST";
case cudf::rank_method::AVERAGE: return "AVERAGE";
case cudf::rank_method::MIN: return "MIN";
case cudf::rank_method::MAX: return "MAX";
case cudf::rank_method::DENSE: return "DENSE";
default: return "unknown";
}
},
[](cudf::rank_method value) {
switch (value) {
case cudf::rank_method::FIRST: return "cudf::rank_method::FIRST";
case cudf::rank_method::AVERAGE: return "cudf::rank_method::AVERAGE";
case cudf::rank_method::MIN: return "cudf::rank_method::MIN";
case cudf::rank_method::MAX: return "cudf::rank_method::MAX";
case cudf::rank_method::DENSE: return "cudf::rank_method::DENSE";
default: return "unknown";
}
})
using methods = nvbench::enum_type_list<cudf::rank_method::AVERAGE,
cudf::rank_method::DENSE,
cudf::rank_method::FIRST,
cudf::rank_method::MAX,
cudf::rank_method::MIN>;
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/rank.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
class Rank : public cudf::benchmark {};
static void BM_rank(benchmark::State& state, bool nulls)
{
using Type = int;
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
// Create columns with values in the range [0,100)
data_profile profile = data_profile_builder().cardinality(0).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 100);
profile.set_null_probability(nulls ? std::optional{0.2} : std::nullopt);
auto keys = create_random_column(cudf::type_to_id<Type>(), row_count{n_rows}, profile);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::rank(keys->view(),
cudf::rank_method::FIRST,
cudf::order::ASCENDING,
nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE,
cudf::null_order::AFTER,
false);
}
}
#define RANK_BENCHMARK_DEFINE(name, nulls) \
BENCHMARK_DEFINE_F(Rank, name) \
(::benchmark::State & st) { BM_rank(st, nulls); } \
BENCHMARK_REGISTER_F(Rank, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 10, 1 << 26}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
RANK_BENCHMARK_DEFINE(no_nulls, false)
RANK_BENCHMARK_DEFINE(nulls, true)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/sort_structs.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/detail/sorting.hpp>
#include <nvbench/nvbench.cuh>
void nvbench_sort_struct(nvbench::state& state)
{
auto const input = create_structs_data(state);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
cudf::detail::sorted_order(*input, {}, {}, stream_view, rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH(nvbench_sort_struct)
.set_name("sort_struct")
.add_int64_power_of_two_axis("NumRows", {10, 18, 26})
.add_int64_axis("Depth", {0, 1, 8})
.add_int64_axis("Nulls", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/rank_lists.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rank_types_common.hpp"
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/sorting.hpp>
#include <cudf_test/column_utilities.hpp>
#include <nvbench/nvbench.cuh>
template <cudf::rank_method method>
void nvbench_rank_lists(nvbench::state& state, nvbench::type_list<nvbench::enum_type<method>>)
{
auto const table = create_lists_data(state);
auto const null_frequency{state.get_float64("null_frequency")};
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::rank(table->view().column(0),
method,
cudf::order::ASCENDING,
null_frequency ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE,
cudf::null_order::AFTER,
rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH_TYPES(nvbench_rank_lists, NVBENCH_TYPE_AXES(methods))
.set_name("rank_lists")
.add_int64_power_of_two_axis("size_bytes", {10, 18, 24, 28})
.add_int64_axis("depth", {1, 4})
.add_float64_axis("null_frequency", {0, 0.2});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/segmented_sort.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
void nvbench_segmented_sort(nvbench::state& state)
{
auto const stable = static_cast<bool>(state.get_int64("stable"));
auto const dtype = cudf::type_to_id<int32_t>();
auto const size_bytes = static_cast<size_t>(state.get_int64("size_bytes"));
auto const null_freq = state.get_float64("null_frequency");
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
data_profile const table_profile =
data_profile_builder().null_probability(null_freq).distribution(
dtype, distribution_id::UNIFORM, 0, 10);
auto const input =
create_random_table({cudf::type_id::INT32}, table_size_bytes{size_bytes}, table_profile);
auto const rows = input->num_rows();
auto const segments = cudf::sequence((rows / row_width) + 1,
cudf::numeric_scalar<int32_t>(0),
cudf::numeric_scalar<int32_t>(row_width));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.add_element_count(size_bytes, "bytes");
state.add_global_memory_reads<nvbench::int32_t>(rows * row_width);
state.add_global_memory_writes<nvbench::int32_t>(rows);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
if (stable)
cudf::stable_segmented_sorted_order(*input, *segments);
else
cudf::segmented_sorted_order(*input, *segments);
});
}
NVBENCH_BENCH(nvbench_segmented_sort)
.set_name("segmented_sort")
.add_int64_axis("stable", {0, 1})
.add_int64_power_of_two_axis("size_bytes", {16, 18, 20, 22, 24, 28})
.add_float64_axis("null_frequency", {0, 0.1})
.add_int64_axis("row_width", {16, 128, 1024});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/sort.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/sorting.hpp>
#include <cudf/utilities/default_stream.hpp>
template <bool stable>
class Sort : public cudf::benchmark {};
template <bool stable>
static void BM_sort(benchmark::State& state, bool nulls)
{
using Type = int;
auto const dtype = cudf::type_to_id<Type>();
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
cudf::size_type const n_cols{(cudf::size_type)state.range(1)};
// Create table with values in the range [0,100)
data_profile const profile = data_profile_builder()
.cardinality(0)
.null_probability(nulls ? std::optional{0.01} : std::nullopt)
.distribution(dtype, distribution_id::UNIFORM, 0, 100);
auto input_table = create_random_table(cycle_dtypes({dtype}, n_cols), row_count{n_rows}, profile);
cudf::table_view input{*input_table};
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = (stable) ? cudf::stable_sorted_order(input) : cudf::sorted_order(input);
}
}
#define SORT_BENCHMARK_DEFINE(name, stable, nulls) \
BENCHMARK_TEMPLATE_DEFINE_F(Sort, name, stable) \
(::benchmark::State & st) { BM_sort<stable>(st, nulls); } \
BENCHMARK_REGISTER_F(Sort, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 10, 1 << 26}, {1, 8}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
SORT_BENCHMARK_DEFINE(unstable_no_nulls, false, false)
SORT_BENCHMARK_DEFINE(stable_no_nulls, true, false)
SORT_BENCHMARK_DEFINE(unstable, false, true)
SORT_BENCHMARK_DEFINE(stable, true, true)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/sort/rank_structs.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rank_types_common.hpp"
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/sorting.hpp>
#include <nvbench/nvbench.cuh>
template <cudf::rank_method method>
void nvbench_rank_structs(nvbench::state& state, nvbench::type_list<nvbench::enum_type<method>>)
{
auto const table = create_structs_data(state);
bool const nulls{static_cast<bool>(state.get_int64("Nulls"))};
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::rank(table->view().column(0),
method,
cudf::order::ASCENDING,
nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE,
cudf::null_order::AFTER,
rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH_TYPES(nvbench_rank_structs, NVBENCH_TYPE_AXES(methods))
.set_name("rank_structs")
.add_int64_power_of_two_axis("NumRows", {10, 18, 26})
.add_int64_axis("Depth", {0, 1, 8})
.add_int64_axis("Nulls", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/null_mask/set_null_mask.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/null_mask.hpp>
class SetNullmask : public cudf::benchmark {};
void BM_setnullmask(benchmark::State& state)
{
cudf::size_type const size{(cudf::size_type)state.range(0)};
rmm::device_buffer mask = cudf::create_null_mask(size, cudf::mask_state::UNINITIALIZED);
auto begin = 0, end = size;
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::set_null_mask(static_cast<cudf::bitmask_type*>(mask.data()), begin, end, true);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * size / 8);
}
#define NBM_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(SetNullmask, name)(::benchmark::State & state) { BM_setnullmask(state); } \
BENCHMARK_REGISTER_F(SetNullmask, name) \
->RangeMultiplier(1 << 10) \
->Range(1 << 10, 1 << 30) \
->UseManualTime();
NBM_BENCHMARK_DEFINE(SetNullMaskKernel);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/synchronization/synchronization.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "synchronization.hpp"
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
cuda_event_timer::cuda_event_timer(benchmark::State& state,
bool flush_l2_cache,
rmm::cuda_stream_view stream)
: stream(stream), p_state(&state)
{
// flush all of L2$
if (flush_l2_cache) {
int current_device = 0;
CUDF_CUDA_TRY(cudaGetDevice(¤t_device));
int l2_cache_bytes = 0;
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&l2_cache_bytes, cudaDevAttrL2CacheSize, current_device));
if (l2_cache_bytes > 0) {
int const memset_value = 0;
rmm::device_buffer l2_cache_buffer(l2_cache_bytes, stream);
CUDF_CUDA_TRY(
cudaMemsetAsync(l2_cache_buffer.data(), memset_value, l2_cache_bytes, stream.value()));
}
}
CUDF_CUDA_TRY(cudaEventCreate(&start));
CUDF_CUDA_TRY(cudaEventCreate(&stop));
CUDF_CUDA_TRY(cudaEventRecord(start, stream.value()));
}
cuda_event_timer::~cuda_event_timer()
{
CUDF_CUDA_TRY(cudaEventRecord(stop, stream.value()));
CUDF_CUDA_TRY(cudaEventSynchronize(stop));
float milliseconds = 0.0f;
CUDF_CUDA_TRY(cudaEventElapsedTime(&milliseconds, start, stop));
p_state->SetIterationTime(milliseconds / (1000.0f));
CUDF_CUDA_TRY(cudaEventDestroy(start));
CUDF_CUDA_TRY(cudaEventDestroy(stop));
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/synchronization/synchronization.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file synchronization.hpp
* @brief This is the header file for `cuda_event_timer`.
*/
/**
* @brief This class serves as a wrapper for using `cudaEvent_t` as the user
* defined timer within the framework of google benchmark
* (https://github.com/google/benchmark).
*
* It is built on top of the idea of Resource acquisition is initialization
* (RAII). In the following we show a minimal example of how to use this class.
#include <benchmark/benchmark.h>
#include <cudf/utilities/default_stream.hpp>
static void sample_cuda_benchmark(benchmark::State& state) {
for (auto _ : state){
// default stream, could be another stream
rmm::cuda_stream_view stream{cudf::get_default_stream()};
// Create (Construct) an object of this class. You HAVE to pass in the
// benchmark::State object you are using. It measures the time from its
// creation to its destruction that is spent on the specified CUDA stream.
// It also clears the L2 cache by cudaMemset'ing a device buffer that is of
// the size of the L2 cache (if flush_l2_cache is set to true and there is
// an L2 cache on the current device).
cuda_event_timer raii(state, true, stream); // flush_l2_cache = true
// Now perform the operations that is to be benchmarked
sample_kernel<<<1, 256, 0, stream.value()>>>(); // Possibly launching a CUDA kernel
}
}
// Register the function as a benchmark. You will need to set the `UseManualTime()`
// flag in order to use the timer embedded in this class.
BENCHMARK(sample_cuda_benchmark)->UseManualTime();
*/
#pragma once
// Google Benchmark library
#include <benchmark/benchmark.h>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <driver_types.h>
class cuda_event_timer {
public:
/**
* @brief This c'tor clears the L2$ by cudaMemset'ing a buffer of L2$ size
* and starts the timer.
*
* @param[in,out] state This is the benchmark::State whose timer we are going
* to update.
* @param[in] flush_l2_cache_ whether or not to flush the L2 cache before
* every iteration.
* @param[in] stream_ The CUDA stream we are measuring time on.
*/
cuda_event_timer(benchmark::State& state,
bool flush_l2_cache,
rmm::cuda_stream_view stream = cudf::get_default_stream());
// The user must provide a benchmark::State object to set
// the timer so we disable the default c'tor.
cuda_event_timer() = delete;
// The d'tor stops the timer and performs a synchronization.
// Time of the benchmark::State object provided to the c'tor
// will be set to the value given by `cudaEventElapsedTime`.
~cuda_event_timer();
private:
cudaEvent_t start;
cudaEvent_t stop;
rmm::cuda_stream_view stream;
benchmark::State* p_state;
};
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/quantiles/quantiles.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/quantiles.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/execution_policy.h>
#include <thrust/tabulate.h>
class Quantiles : public cudf::benchmark {};
static void BM_quantiles(benchmark::State& state, bool nulls)
{
using Type = int;
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
cudf::size_type const n_cols{(cudf::size_type)state.range(1)};
cudf::size_type const n_quantiles{(cudf::size_type)state.range(2)};
// Create columns with values in the range [0,100)
data_profile profile = data_profile_builder().cardinality(0).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 100);
profile.set_null_probability(nulls ? std::optional{0.01}
: std::nullopt); // 1% nulls or no null mask (<0)
auto input_table = create_random_table(
cycle_dtypes({cudf::type_to_id<Type>()}, n_cols), row_count{n_rows}, profile);
auto input = cudf::table_view(*input_table);
std::vector<double> q(n_quantiles);
thrust::tabulate(
thrust::seq, q.begin(), q.end(), [n_quantiles](auto i) { return i * (1.0f / n_quantiles); });
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::quantiles(input, q);
// auto result = (stable) ? cudf::stable_sorted_order(input) : cudf::sorted_order(input);
}
}
#define QUANTILES_BENCHMARK_DEFINE(name, nulls) \
BENCHMARK_DEFINE_F(Quantiles, name) \
(::benchmark::State & st) { BM_quantiles(st, nulls); } \
BENCHMARK_REGISTER_F(Quantiles, name) \
->RangeMultiplier(4) \
->Ranges({{1 << 16, 1 << 26}, {1, 8}, {1, 12}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
QUANTILES_BENCHMARK_DEFINE(no_nulls, false)
QUANTILES_BENCHMARK_DEFINE(nulls, true)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_struct_keys.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/groupby.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
#include <random>
void bench_groupby_struct_keys(nvbench::state& state)
{
using Type = int;
using column_wrapper = cudf::test::fixed_width_column_wrapper<Type>;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, 100);
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.get_int64("NumRows"))};
cudf::size_type const n_cols{1};
cudf::size_type const depth{static_cast<cudf::size_type>(state.get_int64("Depth"))};
bool const nulls{static_cast<bool>(state.get_int64("Nulls"))};
// Create columns with values in the range [0,100)
std::vector<column_wrapper> columns;
columns.reserve(n_cols);
std::generate_n(std::back_inserter(columns), n_cols, [&]() {
auto const elements = cudf::detail::make_counting_transform_iterator(
0, [&](auto row) { return distribution(generator); });
if (!nulls) return column_wrapper(elements, elements + n_rows);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 10 != 0; });
return column_wrapper(elements, elements + n_rows, valids);
});
std::vector<std::unique_ptr<cudf::column>> cols;
std::transform(columns.begin(), columns.end(), std::back_inserter(cols), [](column_wrapper& col) {
return col.release();
});
std::vector<std::unique_ptr<cudf::column>> child_cols = std::move(cols);
// Add some layers
for (int i = 0; i < depth; i++) {
std::vector<bool> struct_validity;
std::uniform_int_distribution<int> bool_distribution(0, 100 * (i + 1));
std::generate_n(
std::back_inserter(struct_validity), n_rows, [&]() { return bool_distribution(generator); });
cudf::test::structs_column_wrapper struct_col(std::move(child_cols), struct_validity);
child_cols = std::vector<std::unique_ptr<cudf::column>>{};
child_cols.push_back(struct_col.release());
}
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto const keys_table = cudf::table(std::move(child_cols));
auto const vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{n_rows}, profile);
cudf::groupby::groupby gb_obj(keys_table.view());
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_min_aggregation<cudf::groupby_aggregation>());
// Set up nvbench default stream
auto stream = cudf::get_default_stream();
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto const result = gb_obj.aggregate(requests); });
}
NVBENCH_BENCH(bench_groupby_struct_keys)
.set_name("groupby_struct_keys")
.add_int64_power_of_two_axis("NumRows", {10, 16, 20})
.add_int64_axis("Depth", {0, 1, 8})
.add_int64_axis("Nulls", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_no_requests.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/groupby/group_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
class Groupby : public cudf::benchmark {};
void BM_basic_no_requests(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
std::vector<cudf::groupby::aggregation_request> requests;
for (auto _ : state) {
cuda_event_timer timer(state, true);
cudf::groupby::groupby gb_obj(*keys_table);
auto result = gb_obj.aggregate(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, BasicNoRequest)(::benchmark::State& state)
{
BM_basic_no_requests(state);
}
BENCHMARK_REGISTER_F(Groupby, BasicNoRequest)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(10000)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
void BM_pre_sorted_no_requests(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
auto sort_order = cudf::sorted_order(*keys_table);
auto sorted_keys = cudf::gather(*keys_table, *sort_order);
// No need to sort values using sort_order because they were generated randomly
std::vector<cudf::groupby::aggregation_request> requests;
for (auto _ : state) {
cuda_event_timer timer(state, true);
cudf::groupby::groupby gb_obj(*sorted_keys, cudf::null_policy::EXCLUDE, cudf::sorted::YES);
auto result = gb_obj.aggregate(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, PreSortedNoRequests)(::benchmark::State& state)
{
BM_pre_sorted_no_requests(state);
}
BENCHMARK_REGISTER_F(Groupby, PreSortedNoRequests)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_sum.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/groupby/group_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
class Groupby : public cudf::benchmark {};
void BM_basic_sum(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
auto vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
cudf::groupby::groupby gb_obj(cudf::table_view({keys->view(), keys->view(), keys->view()}));
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_sum_aggregation<cudf::groupby_aggregation>());
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.aggregate(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, Basic)(::benchmark::State& state) { BM_basic_sum(state); }
BENCHMARK_REGISTER_F(Groupby, Basic)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(10000)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
void BM_pre_sorted_sum(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
profile.set_null_probability(0.1);
auto vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
auto sort_order = cudf::sorted_order(*keys_table);
auto sorted_keys = cudf::gather(*keys_table, *sort_order);
// No need to sort values using sort_order because they were generated randomly
cudf::groupby::groupby gb_obj(*sorted_keys, cudf::null_policy::EXCLUDE, cudf::sorted::YES);
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_sum_aggregation<cudf::groupby_aggregation>());
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.aggregate(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, PreSorted)(::benchmark::State& state) { BM_pre_sorted_sum(state); }
BENCHMARK_REGISTER_F(Groupby, PreSorted)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_nth.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/groupby/group_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
class Groupby : public cudf::benchmark {};
void BM_pre_sorted_nth(benchmark::State& state)
{
// const cudf::size_type num_columns{(cudf::size_type)state.range(0)};
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
auto vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
auto sort_order = cudf::sorted_order(*keys_table);
auto sorted_keys = cudf::gather(*keys_table, *sort_order);
// No need to sort values using sort_order because they were generated randomly
cudf::groupby::groupby gb_obj(*sorted_keys, cudf::null_policy::EXCLUDE, cudf::sorted::YES);
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(
cudf::make_nth_element_aggregation<cudf::groupby_aggregation>(-1));
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.aggregate(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, PreSortedNth)(::benchmark::State& state) { BM_pre_sorted_nth(state); }
BENCHMARK_REGISTER_F(Groupby, PreSortedNth)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(1000000) /* 1M */
->Arg(10000000) /* 10M */
->Arg(100000000); /* 100M */
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_common.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <random>
template <typename T>
T random_int(T min, T max)
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<T> uniform{min, max};
return uniform(engine);
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_nunique.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/groupby.hpp>
#include <nvbench/nvbench.cuh>
namespace {
template <typename... Args>
auto make_aggregation_request_vector(cudf::column_view const& values, Args&&... args)
{
std::vector<std::unique_ptr<cudf::groupby_aggregation>> aggregations;
(aggregations.emplace_back(std::forward<Args>(args)), ...);
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request{values, std::move(aggregations)});
return requests;
}
} // namespace
template <typename Type>
void bench_groupby_nunique(nvbench::state& state, nvbench::type_list<Type>)
{
auto const size = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const keys = [&] {
data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int32_t>(), distribution_id::UNIFORM, 0, 100);
return create_random_column(cudf::type_to_id<int32_t>(), row_count{size}, profile);
}();
auto const vals = [&] {
data_profile profile = data_profile_builder().cardinality(0).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 1000);
if (const auto null_freq = state.get_float64("null_probability"); null_freq > 0) {
profile.set_null_probability(null_freq);
} else {
profile.set_null_probability(std::nullopt);
}
return create_random_column(cudf::type_to_id<Type>(), row_count{size}, profile);
}();
auto gb_obj =
cudf::groupby::groupby(cudf::table_view({keys->view(), keys->view(), keys->view()}));
auto const requests = make_aggregation_request_vector(
*vals, cudf::make_nunique_aggregation<cudf::groupby_aggregation>());
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto const result = gb_obj.aggregate(requests); });
}
NVBENCH_BENCH_TYPES(bench_groupby_nunique, NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, int64_t>))
.set_name("groupby_nunique")
.add_int64_power_of_two_axis("num_rows", {12, 16, 20, 24})
.add_float64_axis("null_probability", {0, 0.5});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_rank.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
template <cudf::rank_method method>
static void nvbench_groupby_rank(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<method>>)
{
constexpr auto dtype = cudf::type_to_id<int64_t>();
bool const is_sorted = state.get_int64("is_sorted");
cudf::size_type const column_size = state.get_int64("data_size");
constexpr int num_groups = 100;
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
dtype, distribution_id::UNIFORM, 0, num_groups);
auto source_table = create_random_table({dtype, dtype}, row_count{column_size}, profile);
// values to be pre-sorted too for groupby rank
if (is_sorted) source_table = cudf::sort(*source_table);
cudf::table_view keys{{source_table->view().column(0)}};
cudf::column_view order_by{source_table->view().column(1)};
auto agg = cudf::make_rank_aggregation<cudf::groupby_scan_aggregation>(method);
std::vector<cudf::groupby::scan_request> requests;
requests.emplace_back(cudf::groupby::scan_request());
requests[0].values = order_by;
requests[0].aggregations.push_back(std::move(agg));
cudf::groupby::groupby gb_obj(
keys, cudf::null_policy::EXCLUDE, is_sorted ? cudf::sorted::YES : cudf::sorted::NO);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
// groupby scan uses sort implementation
auto result = gb_obj.scan(requests);
});
}
enum class rank_method : int32_t {};
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::rank_method,
[](cudf::rank_method value) {
switch (value) {
case cudf::rank_method::FIRST: return "FIRST";
case cudf::rank_method::AVERAGE: return "AVERAGE";
case cudf::rank_method::MIN: return "MIN";
case cudf::rank_method::MAX: return "MAX";
case cudf::rank_method::DENSE: return "DENSE";
default: return "unknown";
}
},
[](cudf::rank_method value) {
switch (value) {
case cudf::rank_method::FIRST: return "cudf::rank_method::FIRST";
case cudf::rank_method::AVERAGE: return "cudf::rank_method::AVERAGE";
case cudf::rank_method::MIN: return "cudf::rank_method::MIN";
case cudf::rank_method::MAX: return "cudf::rank_method::MAX";
case cudf::rank_method::DENSE: return "cudf::rank_method::DENSE";
default: return "unknown";
}
})
using methods = nvbench::enum_type_list<cudf::rank_method::AVERAGE,
cudf::rank_method::DENSE,
cudf::rank_method::FIRST,
cudf::rank_method::MAX,
cudf::rank_method::MIN>;
NVBENCH_BENCH_TYPES(nvbench_groupby_rank, NVBENCH_TYPE_AXES(methods))
.set_type_axes_names({"rank_method"})
.set_name("groupby_rank")
.add_int64_axis("data_size",
{
1000000, // 1M
10000000, // 10M
100000000, // 100M
})
.add_int64_axis("is_sorted", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_max.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/groupby.hpp>
#include <nvbench/nvbench.cuh>
template <typename Type>
void bench_groupby_max(nvbench::state& state, nvbench::type_list<Type>)
{
auto const size = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const keys = [&] {
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int32_t>(), distribution_id::UNIFORM, 0, 100);
return create_random_column(cudf::type_to_id<int32_t>(), row_count{size}, profile);
}();
auto const vals = [&] {
auto builder = data_profile_builder().cardinality(0).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 1000);
if (const auto null_freq = state.get_float64("null_probability"); null_freq > 0) {
builder.null_probability(null_freq);
} else {
builder.no_validity();
}
return create_random_column(cudf::type_to_id<Type>(), row_count{size}, data_profile{builder});
}();
auto keys_view = keys->view();
auto gb_obj = cudf::groupby::groupby(cudf::table_view({keys_view, keys_view, keys_view}));
std::vector<cudf::groupby::aggregation_request> requests;
requests.emplace_back(cudf::groupby::aggregation_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_max_aggregation<cudf::groupby_aggregation>());
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto const result = gb_obj.aggregate(requests); });
}
NVBENCH_BENCH_TYPES(bench_groupby_max,
NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, int64_t, float, double>))
.set_name("groupby_max")
.add_int64_power_of_two_axis("num_rows", {12, 18, 24})
.add_float64_axis("null_probability", {0, 0.1, 0.9});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_scan.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/groupby/group_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/groupby.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table.hpp>
class Groupby : public cudf::benchmark {};
void BM_basic_sum_scan(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
auto vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
cudf::groupby::groupby gb_obj(cudf::table_view({keys->view(), keys->view(), keys->view()}));
std::vector<cudf::groupby::scan_request> requests;
requests.emplace_back(cudf::groupby::scan_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_sum_aggregation<cudf::groupby_scan_aggregation>());
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.scan(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, BasicSumScan)(::benchmark::State& state) { BM_basic_sum_scan(state); }
BENCHMARK_REGISTER_F(Groupby, BasicSumScan)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
void BM_pre_sorted_sum_scan(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, 100);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
profile.set_null_probability(0.1);
auto vals = create_random_column(cudf::type_to_id<int64_t>(), row_count{column_size}, profile);
auto sort_order = cudf::sorted_order(*keys_table);
auto sorted_keys = cudf::gather(*keys_table, *sort_order);
// No need to sort values using sort_order because they were generated randomly
cudf::groupby::groupby gb_obj(*sorted_keys, cudf::null_policy::EXCLUDE, cudf::sorted::YES);
std::vector<cudf::groupby::scan_request> requests;
requests.emplace_back(cudf::groupby::scan_request());
requests[0].values = vals->view();
requests[0].aggregations.push_back(cudf::make_sum_aggregation<cudf::groupby_scan_aggregation>());
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.scan(requests);
}
}
BENCHMARK_DEFINE_F(Groupby, PreSortedSumScan)(::benchmark::State& state)
{
BM_pre_sorted_sum_scan(state);
}
BENCHMARK_REGISTER_F(Groupby, PreSortedSumScan)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_shift.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/groupby/group_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/groupby.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
class Groupby : public cudf::benchmark {};
void BM_group_shift(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
int const num_groups = 100;
data_profile const profile =
data_profile_builder().cardinality(0).null_probability(0.01).distribution(
cudf::type_to_id<int64_t>(), distribution_id::UNIFORM, 0, num_groups);
auto keys_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
auto vals_table =
create_random_table({cudf::type_to_id<int64_t>()}, row_count{column_size}, profile);
cudf::groupby::groupby gb_obj(*keys_table);
std::vector<cudf::size_type> offsets{
static_cast<cudf::size_type>(column_size / float(num_groups) * 0.5)}; // forward shift half way
// null fill value
auto fill_value = cudf::make_default_constructed_scalar(cudf::data_type(cudf::type_id::INT64));
// non null fill value
// auto fill_value = cudf::make_fixed_width_scalar(static_cast<int64_t>(42));
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = gb_obj.shift(*vals_table, offsets, {*fill_value});
}
}
BENCHMARK_DEFINE_F(Groupby, Shift)(::benchmark::State& state) { BM_group_shift(state); }
BENCHMARK_REGISTER_F(Groupby, Shift)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000)
->UseManualTime()
->Unit(benchmark::kMillisecond);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/groupby/group_struct_values.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/groupby.hpp>
#include <cudf/structs/structs_column_view.hpp>
static constexpr cudf::size_type num_struct_members = 8;
static constexpr cudf::size_type max_int = 100;
static constexpr cudf::size_type max_str_length = 32;
static auto create_data_table(cudf::size_type n_rows)
{
data_profile const table_profile =
data_profile_builder()
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, max_int)
.distribution(cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
// The first two struct members are int32 and string.
// The first column is also used as keys in groupby.
// The subsequent struct members are int32 and string again.
return create_random_table(
cycle_dtypes({cudf::type_id::INT32, cudf::type_id::STRING}, num_struct_members),
row_count{n_rows},
table_profile);
}
// Max aggregation/scan technically has the same performance as min.
template <typename OpType>
void BM_groupby_min_struct(benchmark::State& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto data_cols = create_data_table(n_rows)->release();
auto const keys_view = data_cols.front()->view();
auto const values =
cudf::make_structs_column(keys_view.size(), std::move(data_cols), 0, rmm::device_buffer());
using RequestType = std::conditional_t<std::is_same_v<OpType, cudf::groupby_aggregation>,
cudf::groupby::aggregation_request,
cudf::groupby::scan_request>;
auto gb_obj = cudf::groupby::groupby(cudf::table_view({keys_view}));
auto requests = std::vector<RequestType>();
requests.emplace_back(RequestType());
requests.front().values = values->view();
requests.front().aggregations.push_back(cudf::make_min_aggregation<OpType>());
for (auto _ : state) {
[[maybe_unused]] auto const timer = cuda_event_timer(state, true);
if constexpr (std::is_same_v<OpType, cudf::groupby_aggregation>) {
[[maybe_unused]] auto const result = gb_obj.aggregate(requests);
} else {
[[maybe_unused]] auto const result = gb_obj.scan(requests);
}
}
}
class Groupby : public cudf::benchmark {};
#define MIN_RANGE 10'000
#define MAX_RANGE 10'000'000
#define REGISTER_BENCHMARK(name, op_type) \
BENCHMARK_DEFINE_F(Groupby, name)(::benchmark::State & state) \
{ \
BM_groupby_min_struct<op_type>(state); \
} \
BENCHMARK_REGISTER_F(Groupby, name) \
->UseManualTime() \
->Unit(benchmark::kMillisecond) \
->RangeMultiplier(4) \
->Ranges({{MIN_RANGE, MAX_RANGE}});
REGISTER_BENCHMARK(Aggregation, cudf::groupby_aggregation)
REGISTER_BENCHMARK(Scan, cudf::groupby_scan_aggregation)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/join.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/join/join_common.hpp>
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_inner_join(nvbench::state& state,
nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_input,
cudf::table_view const& right_input,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
auto const has_nulls = cudf::has_nested_nulls(left_input) || cudf::has_nested_nulls(right_input)
? cudf::nullable_join::YES
: cudf::nullable_join::NO;
cudf::hash_join hj_obj(left_input, has_nulls, compare_nulls, stream);
return hj_obj.inner_join(right_input, std::nullopt, stream);
};
BM_join<key_type, payload_type, Nullable>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_left_join(nvbench::state& state,
nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_input,
cudf::table_view const& right_input,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
auto const has_nulls = cudf::has_nested_nulls(left_input) || cudf::has_nested_nulls(right_input)
? cudf::nullable_join::YES
: cudf::nullable_join::NO;
cudf::hash_join hj_obj(left_input, has_nulls, compare_nulls, stream);
return hj_obj.left_join(right_input, std::nullopt, stream);
};
BM_join<key_type, payload_type, Nullable>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_full_join(nvbench::state& state,
nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_input,
cudf::table_view const& right_input,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
auto const has_nulls = cudf::has_nested_nulls(left_input) || cudf::has_nested_nulls(right_input)
? cudf::nullable_join::YES
: cudf::nullable_join::NO;
cudf::hash_join hj_obj(left_input, has_nulls, compare_nulls, stream);
return hj_obj.full_join(right_input, std::nullopt, stream);
};
BM_join<key_type, payload_type, Nullable>(state, join);
}
// inner join -----------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("inner_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("inner_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("inner_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("inner_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// left join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("left_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("left_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("left_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("left_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// full join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("full_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("full_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("full_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("full_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/join_common.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "generate_input_tables.cuh"
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/filling.hpp>
#include <cudf/join.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <nvbench/nvbench.cuh>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_int_distribution.h>
#include <vector>
struct null75_generator {
thrust::minstd_rand engine;
thrust::uniform_int_distribution<unsigned> rand_gen;
null75_generator() : engine(), rand_gen() {}
__device__ bool operator()(size_t i)
{
engine.discard(i);
// roughly 75% nulls
return (rand_gen(engine) & 3) == 0;
}
};
enum class join_t { CONDITIONAL, MIXED, HASH };
inline void skip_helper(nvbench::state& state)
{
auto const build_table_size = state.get_int64("Build Table Size");
auto const probe_table_size = state.get_int64("Probe Table Size");
if (build_table_size > probe_table_size) {
state.skip("Large build tables are skipped.");
return;
}
if (build_table_size * 100 <= probe_table_size) {
state.skip("Large probe tables are skipped.");
return;
}
}
template <typename key_type,
typename payload_type,
bool Nullable,
join_t join_type = join_t::HASH,
typename state_type,
typename Join>
void BM_join(state_type& state, Join JoinFunc)
{
auto const build_table_size = [&]() {
if constexpr (std::is_same_v<state_type, benchmark::State>) {
return static_cast<cudf::size_type>(state.range(0));
}
if constexpr (std::is_same_v<state_type, nvbench::state>) {
return static_cast<cudf::size_type>(state.get_int64("Build Table Size"));
}
}();
auto const probe_table_size = [&]() {
if constexpr (std::is_same_v<state_type, benchmark::State>) {
return static_cast<cudf::size_type>(state.range(1));
}
if constexpr (std::is_same_v<state_type, nvbench::state>) {
return static_cast<cudf::size_type>(state.get_int64("Probe Table Size"));
}
}();
double const selectivity = 0.3;
int const multiplicity = 1;
// Generate build and probe tables
auto build_random_null_mask = [](int size) {
// roughly 75% nulls
auto validity =
thrust::make_transform_iterator(thrust::make_counting_iterator(0), null75_generator{});
return cudf::detail::valid_if(validity,
validity + size,
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
};
std::unique_ptr<cudf::column> build_key_column0 = [&]() {
auto [null_mask, null_count] = build_random_null_mask(build_table_size);
return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()),
build_table_size,
std::move(null_mask),
null_count)
: cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()),
build_table_size);
}();
std::unique_ptr<cudf::column> probe_key_column0 = [&]() {
auto [null_mask, null_count] = build_random_null_mask(probe_table_size);
return Nullable ? cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()),
probe_table_size,
std::move(null_mask),
null_count)
: cudf::make_numeric_column(cudf::data_type(cudf::type_to_id<key_type>()),
probe_table_size);
}();
generate_input_tables<key_type, cudf::size_type>(
build_key_column0->mutable_view().data<key_type>(),
build_table_size,
probe_key_column0->mutable_view().data<key_type>(),
probe_table_size,
selectivity,
multiplicity);
// Copy build_key_column0 and probe_key_column0 into new columns.
// If Nullable, the new columns will be assigned new nullmasks.
auto const build_key_column1 = [&]() {
auto col = std::make_unique<cudf::column>(build_key_column0->view());
if (Nullable) {
auto [null_mask, null_count] = build_random_null_mask(build_table_size);
col->set_null_mask(std::move(null_mask), null_count);
}
return col;
}();
auto const probe_key_column1 = [&]() {
auto col = std::make_unique<cudf::column>(probe_key_column0->view());
if (Nullable) {
auto [null_mask, null_count] = build_random_null_mask(probe_table_size);
col->set_null_mask(std::move(null_mask), null_count);
}
return col;
}();
auto init = cudf::make_fixed_width_scalar<payload_type>(static_cast<payload_type>(0));
auto build_payload_column = cudf::sequence(build_table_size, *init);
auto probe_payload_column = cudf::sequence(probe_table_size, *init);
CUDF_CHECK_CUDA(0);
cudf::table_view build_table(
{build_key_column0->view(), build_key_column1->view(), *build_payload_column});
cudf::table_view probe_table(
{probe_key_column0->view(), probe_key_column1->view(), *probe_payload_column});
// Setup join parameters and result table
[[maybe_unused]] std::vector<cudf::size_type> columns_to_join = {0};
// Benchmark the inner join operation
if constexpr (std::is_same_v<state_type, benchmark::State> and
(join_type != join_t::CONDITIONAL)) {
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = JoinFunc(probe_table.select(columns_to_join),
build_table.select(columns_to_join),
cudf::null_equality::UNEQUAL);
}
}
if constexpr (std::is_same_v<state_type, nvbench::state> and (join_type != join_t::CONDITIONAL)) {
if constexpr (join_type == join_t::MIXED) {
auto const col_ref_left_0 = cudf::ast::column_reference(0);
auto const col_ref_right_0 =
cudf::ast::column_reference(0, cudf::ast::table_reference::RIGHT);
auto left_zero_eq_right_zero =
cudf::ast::operation(cudf::ast::ast_operator::EQUAL, col_ref_left_0, col_ref_right_0);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
auto result = JoinFunc(probe_table.select(columns_to_join),
build_table.select(columns_to_join),
probe_table.select({1}),
build_table.select({1}),
left_zero_eq_right_zero,
cudf::null_equality::UNEQUAL,
stream_view);
});
}
if constexpr (join_type == join_t::HASH) {
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
auto result = JoinFunc(probe_table.select(columns_to_join),
build_table.select(columns_to_join),
cudf::null_equality::UNEQUAL,
stream_view);
});
}
}
// Benchmark conditional join
if constexpr (std::is_same_v<state_type, benchmark::State> and join_type == join_t::CONDITIONAL) {
// Common column references.
auto const col_ref_left_0 = cudf::ast::column_reference(0);
auto const col_ref_right_0 = cudf::ast::column_reference(0, cudf::ast::table_reference::RIGHT);
auto left_zero_eq_right_zero =
cudf::ast::operation(cudf::ast::ast_operator::EQUAL, col_ref_left_0, col_ref_right_0);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result =
JoinFunc(probe_table, build_table, left_zero_eq_right_zero, cudf::null_equality::UNEQUAL);
}
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/mixed_join.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/join/join_common.hpp>
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_mixed_inner_join(
nvbench::state& state, nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_equality_input,
cudf::table_view const& right_equality_input,
cudf::table_view const& left_conditional_input,
cudf::table_view const& right_conditional_input,
cudf::ast::operation binary_pred,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
return cudf::mixed_inner_join(left_equality_input,
right_equality_input,
left_conditional_input,
right_conditional_input,
binary_pred,
compare_nulls);
};
BM_join<key_type, payload_type, Nullable, join_t::MIXED>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_mixed_left_join(
nvbench::state& state, nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_equality_input,
cudf::table_view const& right_equality_input,
cudf::table_view const& left_conditional_input,
cudf::table_view const& right_conditional_input,
cudf::ast::operation binary_pred,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
return cudf::mixed_left_join(left_equality_input,
right_equality_input,
left_conditional_input,
right_conditional_input,
binary_pred,
compare_nulls);
};
BM_join<key_type, payload_type, Nullable, join_t::MIXED>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_mixed_full_join(
nvbench::state& state, nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_equality_input,
cudf::table_view const& right_equality_input,
cudf::table_view const& left_conditional_input,
cudf::table_view const& right_conditional_input,
cudf::ast::operation binary_pred,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
return cudf::mixed_full_join(left_equality_input,
right_equality_input,
left_conditional_input,
right_conditional_input,
binary_pred,
compare_nulls);
};
BM_join<key_type, payload_type, Nullable, join_t::MIXED>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_mixed_left_semi_join(
nvbench::state& state, nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_equality_input,
cudf::table_view const& right_equality_input,
cudf::table_view const& left_conditional_input,
cudf::table_view const& right_conditional_input,
cudf::ast::operation binary_pred,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
return cudf::mixed_left_semi_join(left_equality_input,
right_equality_input,
left_conditional_input,
right_conditional_input,
binary_pred,
compare_nulls);
};
BM_join<key_type, payload_type, Nullable, join_t::MIXED>(state, join);
}
template <typename key_type, typename payload_type, bool Nullable>
void nvbench_mixed_left_anti_join(
nvbench::state& state, nvbench::type_list<key_type, payload_type, nvbench::enum_type<Nullable>>)
{
skip_helper(state);
auto join = [](cudf::table_view const& left_equality_input,
cudf::table_view const& right_equality_input,
cudf::table_view const& left_conditional_input,
cudf::table_view const& right_conditional_input,
cudf::ast::operation binary_pred,
cudf::null_equality compare_nulls,
rmm::cuda_stream_view stream) {
return cudf::mixed_left_anti_join(left_equality_input,
right_equality_input,
left_conditional_input,
right_conditional_input,
binary_pred,
compare_nulls);
};
BM_join<key_type, payload_type, Nullable, join_t::MIXED>(state, join);
}
// inner join -----------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_mixed_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_inner_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_inner_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_inner_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_inner_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_inner_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// left join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_mixed_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// full join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_mixed_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_full_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_full_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_full_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_full_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_full_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// left semi join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_mixed_left_semi_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_semi_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_semi_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_semi_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_semi_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_semi_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_semi_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_semi_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
// left anti join ------------------------------------------------------------------------
NVBENCH_BENCH_TYPES(nvbench_mixed_left_anti_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_anti_join_32bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_anti_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<false>))
.set_name("mixed_left_anti_join_64bit")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_anti_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int32_t>,
nvbench::type_list<nvbench::int32_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_anti_join_32bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {100'000, 10'000'000, 80'000'000, 100'000'000})
.add_int64_axis("Probe Table Size",
{100'000, 400'000, 10'000'000, 40'000'000, 100'000'000, 240'000'000});
NVBENCH_BENCH_TYPES(nvbench_mixed_left_anti_join,
NVBENCH_TYPE_AXES(nvbench::type_list<nvbench::int64_t>,
nvbench::type_list<nvbench::int64_t>,
nvbench::enum_type_list<true>))
.set_name("mixed_left_anti_join_64bit_nulls")
.set_type_axes_names({"Key Type", "Payload Type", "Nullable"})
.add_int64_axis("Build Table Size", {40'000'000, 50'000'000})
.add_int64_axis("Probe Table Size", {50'000'000, 120'000'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/left_join.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/join/join_common.hpp>
template <typename key_type, typename payload_type>
class Join : public cudf::benchmark {};
#define LEFT_ANTI_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(Join, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::null_equality compare_nulls) { \
return cudf::left_anti_join(left, right, compare_nulls); \
}; \
BM_join<key_type, payload_type, nullable>(st, join); \
}
LEFT_ANTI_JOIN_BENCHMARK_DEFINE(left_anti_join_32bit, int32_t, int32_t, false);
LEFT_ANTI_JOIN_BENCHMARK_DEFINE(left_anti_join_64bit, int64_t, int64_t, false);
LEFT_ANTI_JOIN_BENCHMARK_DEFINE(left_anti_join_32bit_nulls, int32_t, int32_t, true);
LEFT_ANTI_JOIN_BENCHMARK_DEFINE(left_anti_join_64bit_nulls, int64_t, int64_t, true);
#define LEFT_SEMI_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(Join, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::null_equality compare_nulls) { \
return cudf::left_semi_join(left, right, compare_nulls); \
}; \
BM_join<key_type, payload_type, nullable>(st, join); \
}
LEFT_SEMI_JOIN_BENCHMARK_DEFINE(left_semi_join_32bit, int32_t, int32_t, false);
LEFT_SEMI_JOIN_BENCHMARK_DEFINE(left_semi_join_64bit, int64_t, int64_t, false);
LEFT_SEMI_JOIN_BENCHMARK_DEFINE(left_semi_join_32bit_nulls, int32_t, int32_t, true);
LEFT_SEMI_JOIN_BENCHMARK_DEFINE(left_semi_join_64bit_nulls, int64_t, int64_t, true);
// left anti-join -------------------------------------------------------------
BENCHMARK_REGISTER_F(Join, left_anti_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->Args({10'000'000, 10'000'000})
->Args({10'000'000, 40'000'000})
->Args({10'000'000, 100'000'000})
->Args({100'000'000, 100'000'000})
->Args({80'000'000, 240'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_anti_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({50'000'000, 50'000'000})
->Args({40'000'000, 120'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_anti_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->Args({10'000'000, 10'000'000})
->Args({10'000'000, 40'000'000})
->Args({10'000'000, 100'000'000})
->Args({100'000'000, 100'000'000})
->Args({80'000'000, 240'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_anti_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({50'000'000, 50'000'000})
->Args({40'000'000, 120'000'000})
->UseManualTime();
// left semi-join -------------------------------------------------------------
BENCHMARK_REGISTER_F(Join, left_semi_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->Args({10'000'000, 10'000'000})
->Args({10'000'000, 40'000'000})
->Args({10'000'000, 100'000'000})
->Args({100'000'000, 100'000'000})
->Args({80'000'000, 240'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_semi_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({50'000'000, 50'000'000})
->Args({40'000'000, 120'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_semi_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->Args({10'000'000, 10'000'000})
->Args({10'000'000, 40'000'000})
->Args({10'000'000, 100'000'000})
->Args({100'000'000, 100'000'000})
->Args({80'000'000, 240'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(Join, left_semi_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({50'000'000, 50'000'000})
->Args({40'000'000, 120'000'000})
->UseManualTime();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/generate_input_tables.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cassert>
__global__ static void init_curand(curandState* state, int const nstates)
{
int ithread = threadIdx.x + blockIdx.x * blockDim.x;
if (ithread < nstates) { curand_init(1234ULL, ithread, 0, state + ithread); }
}
template <typename key_type, typename size_type>
__global__ static void init_build_tbl(key_type* const build_tbl,
size_type const build_tbl_size,
int const multiplicity,
curandState* state,
int const num_states)
{
auto const start_idx = blockIdx.x * blockDim.x + threadIdx.x;
auto const stride = blockDim.x * gridDim.x;
assert(start_idx < num_states);
curandState localState = state[start_idx];
for (size_type idx = start_idx; idx < build_tbl_size; idx += stride) {
double const x = curand_uniform_double(&localState);
build_tbl[idx] = static_cast<key_type>(x * (build_tbl_size / multiplicity));
}
state[start_idx] = localState;
}
template <typename key_type, typename size_type>
__global__ void init_probe_tbl(key_type* const probe_tbl,
size_type const probe_tbl_size,
size_type const build_tbl_size,
key_type const rand_max,
double const selectivity,
int const multiplicity,
curandState* state,
int const num_states)
{
auto const start_idx = blockIdx.x * blockDim.x + threadIdx.x;
auto const stride = blockDim.x * gridDim.x;
assert(start_idx < num_states);
curandState localState = state[start_idx];
for (size_type idx = start_idx; idx < probe_tbl_size; idx += stride) {
key_type val;
double x = curand_uniform_double(&localState);
if (x <= selectivity) {
// x <= selectivity means this key in the probe table should be present in the build table, so
// we pick a key from [0, build_tbl_size / multiplicity]
x = curand_uniform_double(&localState);
val = static_cast<key_type>(x * (build_tbl_size / multiplicity));
} else {
// This key in the probe table should not be present in the build table, so we pick a key from
// [build_tbl_size, rand_max].
x = curand_uniform_double(&localState);
val = static_cast<key_type>(x * (rand_max - build_tbl_size) + build_tbl_size);
}
probe_tbl[idx] = val;
}
state[start_idx] = localState;
}
/**
* generate_input_tables generates random integer input tables for database benchmarks.
*
* generate_input_tables generates two random integer input tables for database benchmark
* mainly designed to benchmark join operations. The templates key_type and size_type needed
* to be builtin integer types (e.g. short, int, longlong) and key_type needs to be signed
* as the lottery used internally relies on being able to use negative values to mark drawn
* numbers. The tables need to be preallocated in a memory region accessible by the GPU
* (e.g. device memory, zero copy memory or unified memory). Each value in the build table
* will be from [0,rand_max] and if uniq_build_tbl_keys is true it is ensured that each value
* will be uniq in the build table. Each value in the probe table will be also in the build
* table with a probability of selectivity and a random number from
* [0,rand_max] \setminus \{build_tbl\} otherwise.
*
* @param[out] build_tbl The build table to generate. Usually the smaller table used to
* "build" the hash table in a hash based join implementation.
* @param[in] build_tbl_size number of keys in the build table
* @param[out] probe_tbl The probe table to generate. Usually the larger table used to
* probe into the hash table created from the build table.
* @param[in] build_tbl_size number of keys in the build table
* @param[in] selectivity probability with which an element of the probe table is
* present in the build table.
* @param[in] multiplicity number of matches for each key.
*/
template <typename key_type, typename size_type>
void generate_input_tables(key_type* const build_tbl,
size_type const build_tbl_size,
key_type* const probe_tbl,
size_type const probe_tbl_size,
double const selectivity,
int const multiplicity)
{
// With large values of rand_max the a lot of temporary storage is needed for the lottery. At the
// expense of not being that accurate with applying the selectivity an especially more memory
// efficient implementations would be to partition the random numbers into two intervals and then
// let one table choose random numbers from only one interval and the other only select with
// selective probability from the same interval and from the other in the other cases.
constexpr int block_size = 128;
// Maximize exposed parallelism while minimizing storage for curand state
int num_blocks_init_build_tbl{-1};
CUDF_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&num_blocks_init_build_tbl, init_build_tbl<key_type, size_type>, block_size, 0));
int num_blocks_init_probe_tbl{-1};
CUDF_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&num_blocks_init_probe_tbl, init_probe_tbl<key_type, size_type>, block_size, 0));
int dev_id{-1};
CUDF_CUDA_TRY(cudaGetDevice(&dev_id));
int num_sms{-1};
CUDF_CUDA_TRY(cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, dev_id));
int const num_states =
num_sms * std::max(num_blocks_init_build_tbl, num_blocks_init_probe_tbl) * block_size;
rmm::device_uvector<curandState> devStates(num_states, cudf::get_default_stream());
init_curand<<<(num_states - 1) / block_size + 1, block_size>>>(devStates.data(), num_states);
CUDF_CHECK_CUDA(0);
init_build_tbl<key_type, size_type><<<num_sms * num_blocks_init_build_tbl, block_size>>>(
build_tbl, build_tbl_size, multiplicity, devStates.data(), num_states);
CUDF_CHECK_CUDA(0);
auto const rand_max = std::numeric_limits<key_type>::max();
init_probe_tbl<key_type, size_type>
<<<num_sms * num_blocks_init_build_tbl, block_size>>>(probe_tbl,
probe_tbl_size,
build_tbl_size,
rand_max,
selectivity,
multiplicity,
devStates.data(),
num_states);
CUDF_CHECK_CUDA(0);
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/join/conditional_join.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/join/join_common.hpp>
template <typename key_type, typename payload_type>
class ConditionalJoin : public cudf::benchmark {};
// For compatibility with the shared logic for equality (hash) joins, all of
// the join lambdas defined by these macros accept a null_equality parameter
// but ignore it (don't forward it to the underlying join implementation)
// because conditional joins do not use this parameter.
#define CONDITIONAL_INNER_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(ConditionalJoin, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::ast::operation binary_pred, \
cudf::null_equality compare_nulls) { \
return cudf::conditional_inner_join(left, right, binary_pred); \
}; \
BM_join<key_type, payload_type, nullable, join_t::CONDITIONAL>(st, join); \
}
CONDITIONAL_INNER_JOIN_BENCHMARK_DEFINE(conditional_inner_join_32bit, int32_t, int32_t, false);
CONDITIONAL_INNER_JOIN_BENCHMARK_DEFINE(conditional_inner_join_64bit, int64_t, int64_t, false);
CONDITIONAL_INNER_JOIN_BENCHMARK_DEFINE(conditional_inner_join_32bit_nulls, int32_t, int32_t, true);
CONDITIONAL_INNER_JOIN_BENCHMARK_DEFINE(conditional_inner_join_64bit_nulls, int64_t, int64_t, true);
#define CONDITIONAL_LEFT_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(ConditionalJoin, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::ast::operation binary_pred, \
cudf::null_equality compare_nulls) { \
return cudf::conditional_left_join(left, right, binary_pred); \
}; \
BM_join<key_type, payload_type, nullable, join_t::CONDITIONAL>(st, join); \
}
CONDITIONAL_LEFT_JOIN_BENCHMARK_DEFINE(conditional_left_join_32bit, int32_t, int32_t, false);
CONDITIONAL_LEFT_JOIN_BENCHMARK_DEFINE(conditional_left_join_64bit, int64_t, int64_t, false);
CONDITIONAL_LEFT_JOIN_BENCHMARK_DEFINE(conditional_left_join_32bit_nulls, int32_t, int32_t, true);
CONDITIONAL_LEFT_JOIN_BENCHMARK_DEFINE(conditional_left_join_64bit_nulls, int64_t, int64_t, true);
#define CONDITIONAL_FULL_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(ConditionalJoin, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::ast::operation binary_pred, \
cudf::null_equality compare_nulls) { \
return cudf::conditional_full_join(left, right, binary_pred); \
}; \
BM_join<key_type, payload_type, nullable, join_t::CONDITIONAL>(st, join); \
}
CONDITIONAL_FULL_JOIN_BENCHMARK_DEFINE(conditional_full_join_32bit, int32_t, int32_t, false);
CONDITIONAL_FULL_JOIN_BENCHMARK_DEFINE(conditional_full_join_64bit, int64_t, int64_t, false);
CONDITIONAL_FULL_JOIN_BENCHMARK_DEFINE(conditional_full_join_32bit_nulls, int32_t, int32_t, true);
CONDITIONAL_FULL_JOIN_BENCHMARK_DEFINE(conditional_full_join_64bit_nulls, int64_t, int64_t, true);
#define CONDITIONAL_LEFT_ANTI_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(ConditionalJoin, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::ast::operation binary_pred, \
cudf::null_equality compare_nulls) { \
return cudf::conditional_left_anti_join(left, right, binary_pred); \
}; \
BM_join<key_type, payload_type, nullable, join_t::CONDITIONAL>(st, join); \
}
CONDITIONAL_LEFT_ANTI_JOIN_BENCHMARK_DEFINE(conditional_left_anti_join_32bit,
int32_t,
int32_t,
false);
CONDITIONAL_LEFT_ANTI_JOIN_BENCHMARK_DEFINE(conditional_left_anti_join_64bit,
int64_t,
int64_t,
false);
CONDITIONAL_LEFT_ANTI_JOIN_BENCHMARK_DEFINE(conditional_left_anti_join_32bit_nulls,
int32_t,
int32_t,
true);
CONDITIONAL_LEFT_ANTI_JOIN_BENCHMARK_DEFINE(conditional_left_anti_join_64bit_nulls,
int64_t,
int64_t,
true);
#define CONDITIONAL_LEFT_SEMI_JOIN_BENCHMARK_DEFINE(name, key_type, payload_type, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(ConditionalJoin, name, key_type, payload_type) \
(::benchmark::State & st) \
{ \
auto join = [](cudf::table_view const& left, \
cudf::table_view const& right, \
cudf::ast::operation binary_pred, \
cudf::null_equality compare_nulls) { \
return cudf::conditional_left_semi_join(left, right, binary_pred); \
}; \
BM_join<key_type, payload_type, nullable, join_t::CONDITIONAL>(st, join); \
}
CONDITIONAL_LEFT_SEMI_JOIN_BENCHMARK_DEFINE(conditional_left_semi_join_32bit,
int32_t,
int32_t,
false);
CONDITIONAL_LEFT_SEMI_JOIN_BENCHMARK_DEFINE(conditional_left_semi_join_64bit,
int64_t,
int64_t,
false);
CONDITIONAL_LEFT_SEMI_JOIN_BENCHMARK_DEFINE(conditional_left_semi_join_32bit_nulls,
int32_t,
int32_t,
true);
CONDITIONAL_LEFT_SEMI_JOIN_BENCHMARK_DEFINE(conditional_left_semi_join_64bit_nulls,
int64_t,
int64_t,
true);
// inner join -----------------------------------------------------------------------
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_inner_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({400'000, 100'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_inner_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({400'000, 100'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_inner_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({400'000, 100'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_inner_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({400'000, 100'000})
->Args({100'000, 1'000'000})
->UseManualTime();
// left join -----------------------------------------------------------------------
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
// full join -----------------------------------------------------------------------
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_full_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_full_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_full_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_full_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
// left anti-join -------------------------------------------------------------
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_anti_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_anti_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_anti_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_anti_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
// left semi-join -------------------------------------------------------------
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_semi_join_32bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_semi_join_64bit)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_semi_join_32bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
BENCHMARK_REGISTER_F(ConditionalJoin, conditional_left_semi_join_64bit_nulls)
->Unit(benchmark::kMillisecond)
->Args({100'000, 100'000})
->Args({100'000, 400'000})
->Args({100'000, 1'000'000})
->UseManualTime();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/transpose/transpose.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/transpose.hpp>
#include <cudf/types.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
static void BM_transpose(benchmark::State& state)
{
auto count = state.range(0);
constexpr auto column_type_id = cudf::type_id::INT32;
auto int_column_generator =
thrust::make_transform_iterator(thrust::counting_iterator(0), [count](int i) {
return cudf::make_numeric_column(
cudf::data_type{column_type_id}, count, cudf::mask_state::ALL_VALID);
});
auto input_table = cudf::table(std::vector(int_column_generator, int_column_generator + count));
auto input = input_table.view();
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto output = cudf::transpose(input);
}
// Collect memory statistics.
auto const bytes_read = static_cast<uint64_t>(input.num_columns()) * input.num_rows() *
sizeof(cudf::id_to_type<column_type_id>);
auto const bytes_written = bytes_read;
// Account for nullability in input and output.
auto const null_bytes = 2 * static_cast<uint64_t>(input.num_columns()) *
cudf::bitmask_allocation_size_bytes(input.num_rows());
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(bytes_read + bytes_written + null_bytes));
}
class Transpose : public cudf::benchmark {};
#define TRANSPOSE_BM_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(Transpose, name)(::benchmark::State & state) { BM_transpose(state); } \
BENCHMARK_REGISTER_F(Transpose, name) \
->RangeMultiplier(4) \
->Range(4, 4 << 13) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
TRANSPOSE_BM_BENCHMARK_DEFINE(transpose_simple);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/unique.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/lists/list_view.hpp>
#include <cudf/sorting.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
// necessary for custom enum types
// see: https://github.com/NVIDIA/nvbench/blob/main/examples/enums.cu
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
// Enum type:
cudf::duplicate_keep_option,
// Callable to generate input strings:
[](cudf::duplicate_keep_option option) {
switch (option) {
case cudf::duplicate_keep_option::KEEP_FIRST: return "KEEP_FIRST";
case cudf::duplicate_keep_option::KEEP_LAST: return "KEEP_LAST";
case cudf::duplicate_keep_option::KEEP_NONE: return "KEEP_NONE";
default: return "ERROR";
}
},
// Callable to generate descriptions:
[](auto) { return std::string{}; })
NVBENCH_DECLARE_TYPE_STRINGS(cudf::timestamp_ms, "cudf::timestamp_ms", "cudf::timestamp_ms");
template <typename Type, cudf::duplicate_keep_option Keep>
void nvbench_unique(nvbench::state& state, nvbench::type_list<Type, nvbench::enum_type<Keep>>)
{
// KEEP_FIRST and KEEP_ANY are equivalent for unique
if constexpr (not std::is_same_v<Type, int32_t> and
Keep == cudf::duplicate_keep_option::KEEP_ANY) {
state.skip("Skip unwanted benchmarks.");
}
cudf::size_type const num_rows = state.get_int64("NumRows");
auto const sorting = state.get_int64("Sort");
data_profile profile = data_profile_builder().cardinality(0).null_probability(0.01).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, num_rows / 100);
auto source_column = create_random_column(cudf::type_to_id<Type>(), row_count{num_rows}, profile);
auto input_column = source_column->view();
auto input_table = cudf::table_view({input_column, input_column, input_column, input_column});
auto const run_bench = [&](auto const input) {
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::unique(input, {0}, Keep, cudf::null_equality::EQUAL);
});
};
if (sorting) {
auto const sort_order = cudf::sorted_order(input_table);
auto const sort_table = cudf::gather(input_table, *sort_order);
run_bench(*sort_table);
} else {
run_bench(input_table);
}
}
using data_type = nvbench::type_list<bool, int8_t, int32_t, int64_t, float, cudf::timestamp_ms>;
using keep_option = nvbench::enum_type_list<cudf::duplicate_keep_option::KEEP_FIRST,
cudf::duplicate_keep_option::KEEP_LAST,
cudf::duplicate_keep_option::KEEP_NONE>;
NVBENCH_BENCH_TYPES(nvbench_unique, NVBENCH_TYPE_AXES(data_type, keep_option))
.set_name("unique")
.set_type_axes_names({"Type", "KeepOption"})
.add_int64_axis("NumRows", {10'000, 100'000, 1'000'000, 10'000'000})
.add_int64_axis("Sort", {0, 1});
template <typename Type, cudf::duplicate_keep_option Keep>
void nvbench_unique_list(nvbench::state& state, nvbench::type_list<Type, nvbench::enum_type<Keep>>)
{
// KEEP_FIRST and KEEP_ANY are equivalent for unique
if constexpr (Keep == cudf::duplicate_keep_option::KEEP_ANY) {
state.skip("Skip unwanted benchmarks.");
}
auto const size = state.get_int64("ColumnSize");
auto const dtype = cudf::type_to_id<Type>();
double const null_probability = state.get_float64("null_probability");
auto const sorting = state.get_int64("Sort");
auto builder = data_profile_builder().null_probability(null_probability);
if (dtype == cudf::type_id::LIST) {
builder.distribution(dtype, distribution_id::UNIFORM, 0, 4)
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, 4)
.list_depth(1);
} else {
// We're comparing unique() on a non-nested column to that on a list column with the same
// number of unique rows. The max list size is 4 and the number of unique values in the
// list's child is 5. So the number of unique rows in the list = 1 + 5 + 5^2 + 5^3 + 5^4 = 781
// We want this column to also have 781 unique values.
builder.distribution(dtype, distribution_id::UNIFORM, 0, 781);
}
auto const input_table = create_random_table(
{dtype}, table_size_bytes{static_cast<size_t>(size)}, data_profile{builder}, 0);
auto const run_bench = [&](auto const input) {
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::unique(input, {0}, Keep, cudf::null_equality::EQUAL);
});
};
if (sorting) {
auto const sort_order = cudf::sorted_order(*input_table);
auto const sort_table = cudf::gather(*input_table, *sort_order);
run_bench(*sort_table);
} else {
run_bench(*input_table);
}
}
NVBENCH_BENCH_TYPES(nvbench_unique_list,
NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, cudf::list_view>, keep_option))
.set_name("unique_list")
.set_type_axes_names({"Type", "KeepOption"})
.add_float64_axis("null_probability", {0.0, 0.1})
.add_int64_axis("ColumnSize", {10'000, 100'000, 1'000'000, 10'000'000, 100'000'000})
.add_int64_axis("Sort", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/stable_distinct.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/lists/list_view.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
NVBENCH_DECLARE_TYPE_STRINGS(cudf::timestamp_ms, "cudf::timestamp_ms", "cudf::timestamp_ms");
template <typename Type>
void nvbench_stable_distinct(nvbench::state& state, nvbench::type_list<Type>)
{
cudf::size_type const num_rows = state.get_int64("NumRows");
data_profile profile = data_profile_builder().cardinality(0).null_probability(0.01).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 100);
auto source_column = create_random_column(cudf::type_to_id<Type>(), row_count{num_rows}, profile);
auto input_column = source_column->view();
auto input_table = cudf::table_view({input_column, input_column, input_column, input_column});
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::stable_distinct(input_table,
{0},
cudf::duplicate_keep_option::KEEP_ANY,
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL);
});
}
using data_type = nvbench::type_list<bool, int8_t, int32_t, int64_t, float, cudf::timestamp_ms>;
NVBENCH_BENCH_TYPES(nvbench_stable_distinct, NVBENCH_TYPE_AXES(data_type))
.set_name("stable_distinct")
.set_type_axes_names({"Type"})
.add_int64_axis("NumRows", {10'000, 100'000, 1'000'000, 10'000'000});
template <typename Type>
void nvbench_stable_distinct_list(nvbench::state& state, nvbench::type_list<Type>)
{
auto const size = state.get_int64("ColumnSize");
auto const dtype = cudf::type_to_id<Type>();
double const null_probability = state.get_float64("null_probability");
auto builder = data_profile_builder().null_probability(null_probability);
if (dtype == cudf::type_id::LIST) {
builder.distribution(dtype, distribution_id::UNIFORM, 0, 4)
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, 4)
.list_depth(1);
} else {
// We're comparing stable_distinct() on a non-nested column to that on a list column with the
// same number of stable_distinct rows. The max list size is 4 and the number of distinct values
// in the list's child is 5. So the number of distinct rows in the list = 1 + 5 + 5^2 + 5^3 +
// 5^4 = 781 We want this column to also have 781 distinct values.
builder.distribution(dtype, distribution_id::UNIFORM, 0, 781);
}
auto const table = create_random_table(
{dtype}, table_size_bytes{static_cast<size_t>(size)}, data_profile{builder}, 0);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::stable_distinct(*table,
{0},
cudf::duplicate_keep_option::KEEP_ANY,
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL);
});
}
NVBENCH_BENCH_TYPES(nvbench_stable_distinct_list,
NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, cudf::list_view>))
.set_name("stable_distinct_list")
.set_type_axes_names({"Type"})
.add_float64_axis("null_probability", {0.0, 0.1})
.add_int64_axis("ColumnSize", {100'000'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/unique_count.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/sorting.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
template <typename Type>
void nvbench_unique_count(nvbench::state& state, nvbench::type_list<Type>)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("NumRows"));
auto const nulls = state.get_float64("NullProbability");
data_profile profile = data_profile_builder().cardinality(0).null_probability(nulls).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, num_rows / 100);
auto source_column = create_random_column(cudf::type_to_id<Type>(), row_count{num_rows}, profile);
auto sorted_table = cudf::sort(cudf::table_view({source_column->view()}));
auto input = sorted_table->view();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::unique_count(input, cudf::null_equality::EQUAL);
});
}
using data_type = nvbench::type_list<bool, int8_t, int32_t, int64_t, float, cudf::timestamp_ms>;
NVBENCH_BENCH_TYPES(nvbench_unique_count, NVBENCH_TYPE_AXES(data_type))
.set_name("unique_count")
.set_type_axes_names({"Type"})
.add_int64_axis("NumRows", {10'000, 100'000, 1'000'000, 10'000'000})
.add_float64_axis("NullProbability", {0.0, 0.1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/distinct.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/lists/list_view.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
NVBENCH_DECLARE_TYPE_STRINGS(cudf::timestamp_ms, "cudf::timestamp_ms", "cudf::timestamp_ms");
template <typename Type>
void nvbench_distinct(nvbench::state& state, nvbench::type_list<Type>)
{
cudf::size_type const num_rows = state.get_int64("NumRows");
data_profile profile = data_profile_builder().cardinality(0).null_probability(0.01).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 100);
auto source_column = create_random_column(cudf::type_to_id<Type>(), row_count{num_rows}, profile);
auto input_column = source_column->view();
auto input_table = cudf::table_view({input_column, input_column, input_column, input_column});
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::distinct(input_table,
{0},
cudf::duplicate_keep_option::KEEP_ANY,
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL);
});
}
using data_type = nvbench::type_list<bool, int8_t, int32_t, int64_t, float, cudf::timestamp_ms>;
NVBENCH_BENCH_TYPES(nvbench_distinct, NVBENCH_TYPE_AXES(data_type))
.set_name("distinct")
.set_type_axes_names({"Type"})
.add_int64_axis("NumRows", {10'000, 100'000, 1'000'000, 10'000'000});
template <typename Type>
void nvbench_distinct_list(nvbench::state& state, nvbench::type_list<Type>)
{
auto const size = state.get_int64("ColumnSize");
auto const dtype = cudf::type_to_id<Type>();
double const null_probability = state.get_float64("null_probability");
auto builder = data_profile_builder().null_probability(null_probability);
if (dtype == cudf::type_id::LIST) {
builder.distribution(dtype, distribution_id::UNIFORM, 0, 4)
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, 4)
.list_depth(1);
} else {
// We're comparing distinct() on a non-nested column to that on a list column with the same
// number of distinct rows. The max list size is 4 and the number of distinct values in the
// list's child is 5. So the number of distinct rows in the list = 1 + 5 + 5^2 + 5^3 + 5^4 = 781
// We want this column to also have 781 distinct values.
builder.distribution(dtype, distribution_id::UNIFORM, 0, 781);
}
auto const table = create_random_table(
{dtype}, table_size_bytes{static_cast<size_t>(size)}, data_profile{builder}, 0);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::distinct(*table,
{0},
cudf::duplicate_keep_option::KEEP_ANY,
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL);
});
}
NVBENCH_BENCH_TYPES(nvbench_distinct_list,
NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, cudf::list_view>))
.set_name("distinct_list")
.set_type_axes_names({"Type"})
.add_float64_axis("null_probability", {0.0, 0.1})
.add_int64_axis("ColumnSize", {100'000'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/apply_boolean_mask.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <fixture/benchmark_fixture.hpp>
#include <synchronization/synchronization.hpp>
#include <cudf/stream_compaction.hpp>
namespace {
constexpr cudf::size_type hundredM = 1e8;
constexpr cudf::size_type tenM = 1e7;
constexpr cudf::size_type tenK = 1e4;
constexpr cudf::size_type fifty_percent = 50;
void percent_range(benchmark::internal::Benchmark* b)
{
b->Unit(benchmark::kMillisecond);
for (int percent = 0; percent <= 100; percent += 10)
b->Args({hundredM, percent});
}
void size_range(benchmark::internal::Benchmark* b)
{
b->Unit(benchmark::kMillisecond);
for (int size = tenK; size <= hundredM; size *= 10)
b->Args({size, fifty_percent});
}
template <typename T>
void calculate_bandwidth(benchmark::State& state, cudf::size_type num_columns)
{
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const percent_true{static_cast<cudf::size_type>(state.range(1))};
float const fraction = percent_true / 100.f;
cudf::size_type const column_size_out = fraction * column_size;
int64_t const mask_size =
sizeof(bool) * column_size + cudf::bitmask_allocation_size_bytes(column_size);
int64_t const validity_bytes_in = (fraction >= 1.0f / 32)
? cudf::bitmask_allocation_size_bytes(column_size)
: 4 * column_size_out;
int64_t const validity_bytes_out = cudf::bitmask_allocation_size_bytes(column_size_out);
int64_t const column_bytes_out = sizeof(T) * column_size_out;
int64_t const column_bytes_in = column_bytes_out; // we only read unmasked inputs
int64_t const bytes_read =
(column_bytes_in + validity_bytes_in) * num_columns + // reading columns
mask_size; // reading boolean mask
int64_t const bytes_written =
(column_bytes_out + validity_bytes_out) * num_columns; // writing columns
state.SetItemsProcessed(state.iterations() * column_size * num_columns);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * (bytes_read + bytes_written));
}
} // namespace
template <class T>
void BM_apply_boolean_mask(benchmark::State& state, cudf::size_type num_columns)
{
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const percent_true{static_cast<cudf::size_type>(state.range(1))};
data_profile profile = data_profile_builder().cardinality(0).null_probability(0.0).distribution(
cudf::type_to_id<T>(), distribution_id::UNIFORM, 0, 100);
auto source_table = create_random_table(
cycle_dtypes({cudf::type_to_id<T>()}, num_columns), row_count{column_size}, profile);
profile.set_bool_probability_true(percent_true / 100.0);
profile.set_null_probability(std::nullopt); // no null mask
auto mask = create_random_column(cudf::type_id::BOOL8, row_count{column_size}, profile);
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto result = cudf::apply_boolean_mask(*source_table, mask->view());
}
calculate_bandwidth<T>(state, num_columns);
}
template <class T>
class ApplyBooleanMask : public cudf::benchmark {
public:
using TypeParam = T;
};
#define ABM_BENCHMARK_DEFINE(name, type, n_columns) \
BENCHMARK_TEMPLATE_DEFINE_F(ApplyBooleanMask, name, type)(::benchmark::State & st) \
{ \
BM_apply_boolean_mask<TypeParam>(st, n_columns); \
}
ABM_BENCHMARK_DEFINE(float_1_col, float, 1);
ABM_BENCHMARK_DEFINE(float_2_col, float, 2);
ABM_BENCHMARK_DEFINE(float_4_col, float, 4);
// shmoo 1, 2, 4 column float across percentage true
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_1_col)->Apply(percent_range);
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_2_col)->Apply(percent_range);
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_4_col)->Apply(percent_range);
// shmoo 1, 2, 4 column float across column sizes with 50% true
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_1_col)->Apply(size_range);
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_2_col)->Apply(size_range);
BENCHMARK_REGISTER_F(ApplyBooleanMask, float_4_col)->Apply(size_range);
// spot benchmark other types
ABM_BENCHMARK_DEFINE(int8_1_col, int8_t, 1);
ABM_BENCHMARK_DEFINE(int16_1_col, int16_t, 1);
ABM_BENCHMARK_DEFINE(int32_1_col, int32_t, 1);
ABM_BENCHMARK_DEFINE(int64_1_col, int64_t, 1);
ABM_BENCHMARK_DEFINE(double_1_col, double, 1);
BENCHMARK_REGISTER_F(ApplyBooleanMask, int8_1_col)->Args({tenM, fifty_percent});
BENCHMARK_REGISTER_F(ApplyBooleanMask, int16_1_col)->Args({tenM, fifty_percent});
BENCHMARK_REGISTER_F(ApplyBooleanMask, int32_1_col)->Args({tenM, fifty_percent});
BENCHMARK_REGISTER_F(ApplyBooleanMask, int64_1_col)->Args({tenM, fifty_percent});
BENCHMARK_REGISTER_F(ApplyBooleanMask, double_1_col)->Args({tenM, fifty_percent});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/stream_compaction/distinct_count.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf/stream_compaction.hpp>
#include <nvbench/nvbench.cuh>
template <typename Type>
static void bench_distinct_count(nvbench::state& state, nvbench::type_list<Type>)
{
auto const dtype = cudf::type_to_id<Type>();
auto const size = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const null_probability = state.get_float64("null_probability");
data_profile profile =
data_profile_builder().distribution(dtype, distribution_id::UNIFORM, 0, size / 100);
if (null_probability > 0) {
profile.set_null_probability({null_probability});
} else {
profile.set_null_probability(std::nullopt);
}
auto const data_table = create_random_table({dtype}, row_count{size}, profile);
auto const& data_column = data_table->get_column(0);
auto const input_table = cudf::table_view{{data_column, data_column, data_column}};
// Collect memory statistics for input and output.
state.add_global_memory_reads<Type>(input_table.num_rows() * input_table.num_columns());
state.add_global_memory_writes<cudf::size_type>(1);
if (null_probability > 0) {
state.add_global_memory_reads<nvbench::int8_t>(
input_table.num_columns() * cudf::bitmask_allocation_size_bytes(input_table.num_rows()));
}
auto mem_stats_logger = cudf::memory_stats_logger(); // init stats logger
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::distinct_count(input_table, cudf::null_equality::EQUAL);
});
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
}
using data_type = nvbench::type_list<int32_t, int64_t, float, double>;
NVBENCH_BENCH_TYPES(bench_distinct_count, NVBENCH_TYPE_AXES(data_type))
.set_name("distinct_count")
.add_int64_axis("num_rows",
{
10000, // 10k
100000, // 100k
1000000, // 1M
10000000, // 10M
100000000, // 100M
})
.add_float64_axis("null_probability", {0, 0.5});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/char_types.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_char_types(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const api_type = state.get_string("api");
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
cudf::strings_column_view input(table->view().column(0));
auto input_types = cudf::strings::string_character_types::SPACE;
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
if (api_type == "all") {
state.add_global_memory_writes<nvbench::int8_t>(num_rows); // output is a bool8 per row
} else {
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
}
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
if (api_type == "all") {
auto result = cudf::strings::all_characters_of_type(input, input_types);
} else {
auto result = cudf::strings::filter_characters_of_type(input, input_types);
}
});
}
NVBENCH_BENCH(bench_char_types)
.set_name("char_types")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048, 4096})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_string_axis("api", {"all", "filter"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/url_decode.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/filling.hpp>
#include <cudf/strings/convert/convert_urls.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/random.h>
#include <thrust/tuple.h>
struct url_string_generator {
char* chars;
double esc_seq_chance;
thrust::minstd_rand engine;
thrust::uniform_real_distribution<float> esc_seq_dist;
url_string_generator(char* c, double esc_seq_chance, thrust::minstd_rand& engine)
: chars(c), esc_seq_chance(esc_seq_chance), engine(engine), esc_seq_dist(0, 1)
{
}
__device__ void operator()(thrust::tuple<cudf::size_type, cudf::size_type> str_begin_end)
{
auto begin = thrust::get<0>(str_begin_end);
auto end = thrust::get<1>(str_begin_end);
engine.discard(begin);
for (auto i = begin; i < end; ++i) {
if (esc_seq_dist(engine) < esc_seq_chance and i < end - 3) {
chars[i] = '%';
chars[i + 1] = '2';
chars[i + 2] = '0';
i += 2;
} else {
chars[i] = 'a';
}
}
}
};
auto generate_column(cudf::size_type num_rows, cudf::size_type chars_per_row, double esc_seq_chance)
{
std::vector<std::string> strings{std::string(chars_per_row, 'a')};
auto col_1a = cudf::test::strings_column_wrapper(strings.begin(), strings.end());
auto table_a = cudf::repeat(cudf::table_view{{col_1a}}, num_rows);
auto result_col = std::move(table_a->release()[0]); // string column with num_rows aaa...
auto chars_col = result_col->child(cudf::strings_column_view::chars_column_index).mutable_view();
auto offset_col = result_col->child(cudf::strings_column_view::offsets_column_index).view();
auto engine = thrust::default_random_engine{};
thrust::for_each_n(thrust::device,
thrust::make_zip_iterator(offset_col.begin<cudf::size_type>(),
offset_col.begin<cudf::size_type>() + 1),
num_rows,
url_string_generator{chars_col.begin<char>(), esc_seq_chance, engine});
return result_col;
}
class UrlDecode : public cudf::benchmark {};
void BM_url_decode(benchmark::State& state, int esc_seq_pct)
{
cudf::size_type const num_rows = state.range(0);
cudf::size_type const chars_per_row = state.range(1);
auto column = generate_column(num_rows, chars_per_row, esc_seq_pct / 100.0);
auto strings_view = cudf::strings_column_view(column->view());
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::strings::url_decode(strings_view);
}
state.SetBytesProcessed(state.iterations() * num_rows *
(chars_per_row + sizeof(cudf::size_type)));
}
#define URLD_BENCHMARK_DEFINE(esc_seq_pct) \
BENCHMARK_DEFINE_F(UrlDecode, esc_seq_pct) \
(::benchmark::State & st) { BM_url_decode(st, esc_seq_pct); } \
BENCHMARK_REGISTER_F(UrlDecode, esc_seq_pct) \
->Args({100000000, 10}) \
->Args({10000000, 100}) \
->Args({1000000, 1000}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime();
URLD_BENCHMARK_DEFINE(10)
URLD_BENCHMARK_DEFINE(50)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/convert_durations.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/strings/convert/convert_durations.hpp>
#include <cudf/types.hpp>
#include <cudf/wrappers/durations.hpp>
#include <algorithm>
#include <random>
class DurationsToString : public cudf::benchmark {};
template <class TypeParam>
void BM_convert_from_durations(benchmark::State& state)
{
cudf::size_type const source_size = state.range(0);
// Every element is valid
auto data = cudf::detail::make_counting_transform_iterator(
0, [source_size](auto i) { return TypeParam{i - source_size / 2}; });
cudf::test::fixed_width_column_wrapper<TypeParam> source_durations(data, data + source_size);
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::strings::from_durations(source_durations, "%D days %H:%M:%S");
}
state.SetBytesProcessed(state.iterations() * source_size * sizeof(TypeParam));
}
class StringToDurations : public cudf::benchmark {};
template <class TypeParam>
void BM_convert_to_durations(benchmark::State& state)
{
cudf::size_type const source_size = state.range(0);
// Every element is valid
auto data = cudf::detail::make_counting_transform_iterator(
0, [source_size](auto i) { return TypeParam{i - source_size / 2}; });
cudf::test::fixed_width_column_wrapper<TypeParam> source_durations(data, data + source_size);
auto results = cudf::strings::from_durations(source_durations, "%D days %H:%M:%S");
cudf::strings_column_view source_string(*results);
auto output_type = cudf::data_type(cudf::type_to_id<TypeParam>());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::strings::to_durations(source_string, output_type, "%D days %H:%M:%S");
}
state.SetBytesProcessed(state.iterations() * source_size * sizeof(TypeParam));
}
#define DSBM_BENCHMARK_DEFINE(name, type) \
BENCHMARK_DEFINE_F(DurationsToString, name)(::benchmark::State & state) \
{ \
BM_convert_from_durations<type>(state); \
} \
BENCHMARK_REGISTER_F(DurationsToString, name) \
->RangeMultiplier(1 << 5) \
->Range(1 << 10, 1 << 25) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
#define SDBM_BENCHMARK_DEFINE(name, type) \
BENCHMARK_DEFINE_F(StringToDurations, name)(::benchmark::State & state) \
{ \
BM_convert_to_durations<type>(state); \
} \
BENCHMARK_REGISTER_F(StringToDurations, name) \
->RangeMultiplier(1 << 5) \
->Range(1 << 10, 1 << 25) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
DSBM_BENCHMARK_DEFINE(from_durations_D, cudf::duration_D);
DSBM_BENCHMARK_DEFINE(from_durations_s, cudf::duration_s);
DSBM_BENCHMARK_DEFINE(from_durations_ms, cudf::duration_ms);
DSBM_BENCHMARK_DEFINE(from_durations_us, cudf::duration_us);
DSBM_BENCHMARK_DEFINE(from_durations_ns, cudf::duration_ns);
SDBM_BENCHMARK_DEFINE(to_durations_D, cudf::duration_D);
SDBM_BENCHMARK_DEFINE(to_durations_s, cudf::duration_s);
SDBM_BENCHMARK_DEFINE(to_durations_ms, cudf::duration_ms);
SDBM_BENCHMARK_DEFINE(to_durations_us, cudf::duration_us);
SDBM_BENCHMARK_DEFINE(to_durations_ns, cudf::duration_ns);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/find.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/find.hpp>
#include <cudf/strings/find_multiple.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <limits>
enum FindAPI { find, find_multi, contains, starts_with, ends_with };
class StringFindScalar : public cudf::benchmark {};
static void BM_find_scalar(benchmark::State& state, FindAPI find_api)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
cudf::string_scalar target("+");
cudf::test::strings_column_wrapper targets({"+", "-"});
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
switch (find_api) {
case find: cudf::strings::find(input, target); break;
case find_multi:
cudf::strings::find_multiple(input, cudf::strings_column_view(targets));
break;
case contains: cudf::strings::contains(input, target); break;
case starts_with: cudf::strings::starts_with(input, target); break;
case ends_with: cudf::strings::ends_with(input, target); break;
}
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 2;
for (int row_count = min_rows; row_count <= max_rows; row_count *= row_mult) {
for (int rowlen = min_rowlen; rowlen <= max_rowlen; rowlen *= len_mult) {
// avoid generating combinations that exceed the cudf column limit
size_t total_chars = static_cast<size_t>(row_count) * rowlen;
if (total_chars < static_cast<size_t>(std::numeric_limits<cudf::size_type>::max())) {
b->Args({row_count, rowlen});
}
}
}
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringFindScalar, name) \
(::benchmark::State & st) { BM_find_scalar(st, name); } \
BENCHMARK_REGISTER_F(StringFindScalar, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(find)
STRINGS_BENCHMARK_DEFINE(find_multi)
STRINGS_BENCHMARK_DEFINE(contains)
STRINGS_BENCHMARK_DEFINE(starts_with)
STRINGS_BENCHMARK_DEFINE(ends_with)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/split.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_split(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const stype = state.get_string("type");
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{num_rows}, profile);
cudf::strings_column_view input(column->view());
cudf::string_scalar target("+");
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size); // all bytes are written
if (stype == "split") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { cudf::strings::split(input, target); });
} else if (stype == "split_ws") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { cudf::strings::split(input); });
} else if (stype == "record") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { cudf::strings::split_record(input, target); });
} else {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { cudf::strings::split_record(input); });
}
}
NVBENCH_BENCH(bench_split)
.set_name("split")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_string_axis("type", {"split", "split_ws", "record", "record_ws"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/join_strings.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_join(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
cudf::strings_column_view input(table->view().column(0));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto const chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size); // all bytes are written
std::string separator(":");
std::string narep("null");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::strings::join_strings(input, separator, narep);
});
}
NVBENCH_BENCH(bench_join)
.set_name("strings_join")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/copy.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
class StringCopy : public cudf::benchmark {};
enum copy_type { gather, scatter };
static void BM_copy(benchmark::State& state, copy_type ct)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const source =
create_random_table({cudf::type_id::STRING}, row_count{n_rows}, table_profile);
auto const target =
create_random_table({cudf::type_id::STRING}, row_count{n_rows}, table_profile);
// scatter indices
auto index_map_col = make_numeric_column(
cudf::data_type{cudf::type_id::INT32}, n_rows, cudf::mask_state::UNALLOCATED);
auto index_map = index_map_col->mutable_view();
thrust::shuffle_copy(thrust::device,
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(n_rows),
index_map.begin<cudf::size_type>(),
thrust::default_random_engine());
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
switch (ct) {
case gather: cudf::gather(source->view(), index_map); break;
case scatter: cudf::scatter(source->view(), index_map, target->view()); break;
}
}
state.SetBytesProcessed(state.iterations() *
cudf::strings_column_view(source->view().column(0)).chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 4;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
// Benchmark for very small strings
b->Args({67108864, 2});
}
#define COPY_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringCopy, name) \
(::benchmark::State & st) { BM_copy(st, copy_type::name); } \
BENCHMARK_REGISTER_F(StringCopy, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
COPY_BENCHMARK_DEFINE(gather)
COPY_BENCHMARK_DEFINE(scatter)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/convert_fixed_point.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/strings/convert/convert_fixed_point.hpp>
#include <cudf/strings/convert/convert_floats.hpp>
#include <cudf/types.hpp>
namespace {
std::unique_ptr<cudf::column> get_strings_column(cudf::size_type rows)
{
auto result =
create_random_column(cudf::type_id::FLOAT32, row_count{static_cast<cudf::size_type>(rows)});
return cudf::strings::from_floats(result->view());
}
} // anonymous namespace
class StringsToFixedPoint : public cudf::benchmark {};
template <typename fixed_point_type>
void convert_to_fixed_point(benchmark::State& state)
{
auto const rows = static_cast<cudf::size_type>(state.range(0));
auto const strings_col = get_strings_column(rows);
auto const strings_view = cudf::strings_column_view(strings_col->view());
auto const dtype = cudf::data_type{cudf::type_to_id<fixed_point_type>(), numeric::scale_type{-2}};
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto volatile results = cudf::strings::to_fixed_point(strings_view, dtype);
}
// bytes_processed = bytes_input + bytes_output
state.SetBytesProcessed(state.iterations() *
(strings_view.chars_size() + rows * cudf::size_of(dtype)));
}
class StringsFromFixedPoint : public cudf::benchmark {};
template <typename fixed_point_type>
void convert_from_fixed_point(benchmark::State& state)
{
auto const rows = static_cast<cudf::size_type>(state.range(0));
auto const strings_col = get_strings_column(rows);
auto const dtype = cudf::data_type{cudf::type_to_id<fixed_point_type>(), numeric::scale_type{-2}};
auto const fp_col =
cudf::strings::to_fixed_point(cudf::strings_column_view(strings_col->view()), dtype);
std::unique_ptr<cudf::column> results = nullptr;
for (auto _ : state) {
cuda_event_timer raii(state, true);
results = cudf::strings::from_fixed_point(fp_col->view());
}
// bytes_processed = bytes_input + bytes_output
state.SetBytesProcessed(
state.iterations() *
(cudf::strings_column_view(results->view()).chars_size() + rows * cudf::size_of(dtype)));
}
#define CONVERT_TO_FIXED_POINT_BMD(name, fixed_point_type) \
BENCHMARK_DEFINE_F(StringsToFixedPoint, name)(::benchmark::State & state) \
{ \
convert_to_fixed_point<fixed_point_type>(state); \
} \
BENCHMARK_REGISTER_F(StringsToFixedPoint, name) \
->RangeMultiplier(4) \
->Range(1 << 12, 1 << 24) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
#define CONVERT_FROM_FIXED_POINT_BMD(name, fixed_point_type) \
BENCHMARK_DEFINE_F(StringsFromFixedPoint, name)(::benchmark::State & state) \
{ \
convert_from_fixed_point<fixed_point_type>(state); \
} \
BENCHMARK_REGISTER_F(StringsFromFixedPoint, name) \
->RangeMultiplier(4) \
->Range(1 << 12, 1 << 24) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
CONVERT_TO_FIXED_POINT_BMD(strings_to_decimal32, numeric::decimal32);
CONVERT_TO_FIXED_POINT_BMD(strings_to_decimal64, numeric::decimal64);
CONVERT_FROM_FIXED_POINT_BMD(strings_from_decimal32, numeric::decimal32);
CONVERT_FROM_FIXED_POINT_BMD(strings_from_decimal64, numeric::decimal64);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/slice.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/slice.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <limits>
class StringSlice : public cudf::benchmark {};
enum slice_type { position, multi_position };
static void BM_slice(benchmark::State& state, slice_type rt)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
auto starts_itr = thrust::constant_iterator<cudf::size_type>(max_str_length / 3);
auto stops_itr = thrust::constant_iterator<cudf::size_type>(max_str_length / 2);
cudf::test::fixed_width_column_wrapper<int32_t> starts(starts_itr, starts_itr + n_rows);
cudf::test::fixed_width_column_wrapper<int32_t> stops(stops_itr, stops_itr + n_rows);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
switch (rt) {
case position:
cudf::strings::slice_strings(input, max_str_length / 3, max_str_length / 2);
break;
case multi_position: cudf::strings::slice_strings(input, starts, stops); break;
}
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 2;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringSlice, name) \
(::benchmark::State & st) { BM_slice(st, slice_type::name); } \
BENCHMARK_REGISTER_F(StringSlice, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(position)
STRINGS_BENCHMARK_DEFINE(multi_position)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/convert_numerics.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/strings/convert/convert_floats.hpp>
#include <cudf/strings/convert/convert_integers.hpp>
#include <cudf/types.hpp>
namespace {
template <typename NumericType>
std::unique_ptr<cudf::column> get_numerics_column(cudf::size_type rows)
{
return create_random_column(cudf::type_to_id<NumericType>(), row_count{rows});
}
template <typename NumericType>
std::unique_ptr<cudf::column> get_strings_column(cudf::size_type rows)
{
auto const numerics_col = get_numerics_column<NumericType>(rows);
if constexpr (std::is_floating_point_v<NumericType>) {
return cudf::strings::from_floats(numerics_col->view());
} else {
return cudf::strings::from_integers(numerics_col->view());
}
}
} // anonymous namespace
class StringsToNumeric : public cudf::benchmark {};
template <typename NumericType>
void convert_to_number(benchmark::State& state)
{
auto const rows = static_cast<cudf::size_type>(state.range(0));
auto const strings_col = get_strings_column<NumericType>(rows);
auto const strings_view = cudf::strings_column_view(strings_col->view());
auto const col_type = cudf::type_to_id<NumericType>();
for (auto _ : state) {
cuda_event_timer raii(state, true);
if constexpr (std::is_floating_point_v<NumericType>) {
cudf::strings::to_floats(strings_view, cudf::data_type{col_type});
} else {
cudf::strings::to_integers(strings_view, cudf::data_type{col_type});
}
}
// bytes_processed = bytes_input + bytes_output
state.SetBytesProcessed(state.iterations() *
(strings_view.chars_size() + rows * sizeof(NumericType)));
}
class StringsFromNumeric : public cudf::benchmark {};
template <typename NumericType>
void convert_from_number(benchmark::State& state)
{
auto const rows = static_cast<cudf::size_type>(state.range(0));
auto const numerics_col = get_numerics_column<NumericType>(rows);
auto const numerics_view = numerics_col->view();
std::unique_ptr<cudf::column> results = nullptr;
for (auto _ : state) {
cuda_event_timer raii(state, true);
if constexpr (std::is_floating_point_v<NumericType>)
results = cudf::strings::from_floats(numerics_view);
else
results = cudf::strings::from_integers(numerics_view);
}
// bytes_processed = bytes_input + bytes_output
state.SetBytesProcessed(
state.iterations() *
(cudf::strings_column_view(results->view()).chars_size() + rows * sizeof(NumericType)));
}
#define CONVERT_TO_NUMERICS_BD(name, type) \
BENCHMARK_DEFINE_F(StringsToNumeric, name)(::benchmark::State & state) \
{ \
convert_to_number<type>(state); \
} \
BENCHMARK_REGISTER_F(StringsToNumeric, name) \
->RangeMultiplier(4) \
->Range(1 << 10, 1 << 17) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
#define CONVERT_FROM_NUMERICS_BD(name, type) \
BENCHMARK_DEFINE_F(StringsFromNumeric, name)(::benchmark::State & state) \
{ \
convert_from_number<type>(state); \
} \
BENCHMARK_REGISTER_F(StringsFromNumeric, name) \
->RangeMultiplier(4) \
->Range(1 << 10, 1 << 17) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
CONVERT_TO_NUMERICS_BD(strings_to_float32, float);
CONVERT_TO_NUMERICS_BD(strings_to_float64, double);
CONVERT_TO_NUMERICS_BD(strings_to_int32, int32_t);
CONVERT_TO_NUMERICS_BD(strings_to_int64, int64_t);
CONVERT_TO_NUMERICS_BD(strings_to_uint8, uint8_t);
CONVERT_TO_NUMERICS_BD(strings_to_uint16, uint16_t);
CONVERT_FROM_NUMERICS_BD(strings_from_float32, float);
CONVERT_FROM_NUMERICS_BD(strings_from_float64, double);
CONVERT_FROM_NUMERICS_BD(strings_from_int32, int32_t);
CONVERT_FROM_NUMERICS_BD(strings_from_int64, int64_t);
CONVERT_FROM_NUMERICS_BD(strings_from_uint8, uint8_t);
CONVERT_FROM_NUMERICS_BD(strings_from_uint16, uint16_t);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/reverse.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/reverse.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_reverse(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
cudf::strings_column_view input(table->view().column(0));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size); // all bytes are written
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = cudf::strings::reverse(input); });
}
NVBENCH_BENCH(bench_reverse)
.set_name("reverse")
.add_int64_axis("row_width", {8, 16, 32, 64, 128})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/extract.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/extract.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
#include <random>
static void bench_extract(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
auto groups = static_cast<cudf::size_type>(state.get_int64("groups"));
std::default_random_engine generator;
std::uniform_int_distribution<int> words_dist(0, 999);
std::vector<std::string> samples(100); // 100 unique rows of data to reuse
std::generate(samples.begin(), samples.end(), [&]() {
std::string row; // build a row of random tokens
while (static_cast<cudf::size_type>(row.size()) < row_width) {
row += std::to_string(words_dist(generator)) + " ";
}
return row;
});
std::string pattern{""};
while (groups--) {
pattern += "(\\d+) ";
}
cudf::test::strings_column_wrapper samples_column(samples.begin(), samples.end());
data_profile const profile = data_profile_builder().no_validity().distribution(
cudf::type_to_id<cudf::size_type>(), distribution_id::UNIFORM, 0ul, samples.size() - 1);
auto map =
create_random_column(cudf::type_to_id<cudf::size_type>(), row_count{num_rows}, profile);
auto input = cudf::gather(
cudf::table_view{{samples_column}}, map->view(), cudf::out_of_bounds_policy::DONT_CHECK);
cudf::strings_column_view strings_view(input->get_column(0).view());
auto prog = cudf::strings::regex_program::create(pattern);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = strings_view.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size); // all bytes are written
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::strings::extract(strings_view, *prog);
});
}
NVBENCH_BENCH(bench_extract)
.set_name("extract")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_int64_axis("groups", {1, 2, 4});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/convert_datetime.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/wrappers/timestamps.hpp>
class StringDateTime : public cudf::benchmark {};
enum class direction { to, from };
template <class TypeParam>
void BM_convert_datetime(benchmark::State& state, direction dir)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto const data_type = cudf::data_type(cudf::type_to_id<TypeParam>());
auto const column = create_random_column(data_type.id(), row_count{n_rows});
cudf::column_view input(column->view());
auto source = dir == direction::to ? cudf::strings::from_timestamps(input, "%Y-%m-%d %H:%M:%S")
: make_empty_column(cudf::data_type{cudf::type_id::STRING});
cudf::strings_column_view source_string(source->view());
for (auto _ : state) {
cuda_event_timer raii(state, true);
if (dir == direction::to)
cudf::strings::to_timestamps(source_string, data_type, "%Y-%m-%d %H:%M:%S");
else
cudf::strings::from_timestamps(input, "%Y-%m-%d %H:%M:%S");
}
auto const bytes = dir == direction::to ? source_string.chars_size() : n_rows * sizeof(TypeParam);
state.SetBytesProcessed(state.iterations() * bytes);
}
#define STR_BENCHMARK_DEFINE(name, type, dir) \
BENCHMARK_DEFINE_F(StringDateTime, name)(::benchmark::State & state) \
{ \
BM_convert_datetime<type>(state, dir); \
} \
BENCHMARK_REGISTER_F(StringDateTime, name) \
->RangeMultiplier(1 << 5) \
->Range(1 << 10, 1 << 25) \
->UseManualTime() \
->Unit(benchmark::kMicrosecond);
STR_BENCHMARK_DEFINE(from_days, cudf::timestamp_D, direction::from);
STR_BENCHMARK_DEFINE(from_seconds, cudf::timestamp_s, direction::from);
STR_BENCHMARK_DEFINE(from_mseconds, cudf::timestamp_ms, direction::from);
STR_BENCHMARK_DEFINE(from_useconds, cudf::timestamp_us, direction::from);
STR_BENCHMARK_DEFINE(from_nseconds, cudf::timestamp_ns, direction::from);
STR_BENCHMARK_DEFINE(to_days, cudf::timestamp_D, direction::to);
STR_BENCHMARK_DEFINE(to_seconds, cudf::timestamp_s, direction::to);
STR_BENCHMARK_DEFINE(to_mseconds, cudf::timestamp_ms, direction::to);
STR_BENCHMARK_DEFINE(to_useconds, cudf::timestamp_us, direction::to);
STR_BENCHMARK_DEFINE(to_nseconds, cudf::timestamp_ns, direction::to);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/split_re.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/split/split_re.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_split(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
auto prog = cudf::strings::regex_program::create("\\d+");
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{num_rows}, profile);
cudf::strings_column_view input(column->view());
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size); // all bytes are written
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::strings::split_record_re(input, *prog);
});
}
NVBENCH_BENCH(bench_split)
.set_name("split_re")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/replace.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <limits>
class StringReplace : public cudf::benchmark {};
enum replace_type { scalar, slice, multi };
static void BM_replace(benchmark::State& state, replace_type rt)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
cudf::string_scalar target("+");
cudf::string_scalar repl("");
cudf::test::strings_column_wrapper targets({"+", "-"});
cudf::test::strings_column_wrapper repls({"", ""});
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
switch (rt) {
case scalar: cudf::strings::replace(input, target, repl); break;
case slice: cudf::strings::replace_slice(input, repl, 1, 10); break;
case multi:
cudf::strings::replace(
input, cudf::strings_column_view(targets), cudf::strings_column_view(repls));
break;
}
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 2;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringReplace, name) \
(::benchmark::State & st) { BM_replace(st, replace_type::name); } \
BENCHMARK_REGISTER_F(StringReplace, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(scalar)
STRINGS_BENCHMARK_DEFINE(slice)
STRINGS_BENCHMARK_DEFINE(multi)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/factory.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/pair.h>
#include <thrust/transform.h>
#include <limits>
namespace {
using string_pair = thrust::pair<char const*, cudf::size_type>;
struct string_view_to_pair {
__device__ string_pair operator()(thrust::pair<cudf::string_view, bool> const& p)
{
return (p.second) ? string_pair{p.first.data(), p.first.size_bytes()} : string_pair{nullptr, 0};
}
};
} // namespace
class StringsFactory : public cudf::benchmark {};
static void BM_factory(benchmark::State& state)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
auto d_column = cudf::column_device_view::create(column->view());
rmm::device_uvector<string_pair> pairs(d_column->size(), cudf::get_default_stream());
thrust::transform(thrust::device,
d_column->pair_begin<cudf::string_view, true>(),
d_column->pair_end<cudf::string_view, true>(),
pairs.data(),
string_view_to_pair{});
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::make_strings_column(pairs, cudf::get_default_stream());
}
cudf::strings_column_view input(column->view());
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 4;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringsFactory, name) \
(::benchmark::State & st) { BM_factory(st); } \
BENCHMARK_REGISTER_F(StringsFactory, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(factory)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/case.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/case.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
void bench_case(nvbench::state& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const max_width = static_cast<int32_t>(state.get_int64("row_width"));
auto const encoding = state.get_string("encoding");
if (static_cast<std::size_t>(n_rows) * static_cast<std::size_t>(max_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
auto col_view = column->view();
cudf::column::contents ascii_contents;
if (encoding == "ascii") {
data_profile ascii_profile = data_profile_builder().no_validity().distribution(
cudf::type_id::INT8, distribution_id::UNIFORM, 32, 126); // nice ASCII range
auto input = cudf::strings_column_view(col_view);
auto ascii_column =
create_random_column(cudf::type_id::INT8, row_count{input.chars_size()}, ascii_profile);
auto ascii_data = ascii_column->view();
col_view = cudf::column_view(col_view.type(),
col_view.size(),
nullptr,
col_view.null_mask(),
col_view.null_count(),
0,
{input.offsets(), ascii_data});
ascii_contents = ascii_column->release();
}
auto input = cudf::strings_column_view(col_view);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.add_element_count(input.chars_size(), "chars_size");
state.add_global_memory_reads<nvbench::int8_t>(input.chars_size());
state.add_global_memory_writes<nvbench::int8_t>(input.chars_size());
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = cudf::strings::to_lower(input); });
}
NVBENCH_BENCH(bench_case)
.set_name("case")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_string_axis("encoding", {"ascii", "utf8"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/repeat_strings.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
static constexpr cudf::size_type default_repeat_times = 16;
static constexpr cudf::size_type min_repeat_times = -16;
static constexpr cudf::size_type max_repeat_times = 16;
static std::unique_ptr<cudf::table> create_data_table(cudf::size_type n_cols,
cudf::size_type n_rows,
cudf::size_type max_str_length)
{
CUDF_EXPECTS(n_cols == 1 || n_cols == 2, "Invalid number of columns.");
std::vector<cudf::type_id> dtype_ids{cudf::type_id::STRING};
auto builder = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
if (n_cols == 2) {
dtype_ids.push_back(cudf::type_id::INT32);
builder.distribution(
cudf::type_id::INT32, distribution_id::NORMAL, min_repeat_times, max_repeat_times);
}
return create_random_table(dtype_ids, row_count{n_rows}, data_profile{builder});
}
static void BM_repeat_strings_scalar_times(benchmark::State& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto const max_str_length = static_cast<cudf::size_type>(state.range(1));
auto const table = create_data_table(1, n_rows, max_str_length);
auto const strings_col = cudf::strings_column_view(table->view().column(0));
for ([[maybe_unused]] auto _ : state) {
[[maybe_unused]] cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::strings::repeat_strings(strings_col, default_repeat_times);
}
state.SetBytesProcessed(state.iterations() * strings_col.chars_size());
}
static void BM_repeat_strings_column_times(benchmark::State& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto const max_str_length = static_cast<cudf::size_type>(state.range(1));
auto const table = create_data_table(2, n_rows, max_str_length);
auto const strings_col = cudf::strings_column_view(table->view().column(0));
auto const repeat_times_col = table->view().column(1);
for ([[maybe_unused]] auto _ : state) {
[[maybe_unused]] cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::strings::repeat_strings(strings_col, repeat_times_col);
}
state.SetBytesProcessed(state.iterations() *
(strings_col.chars_size() + repeat_times_col.size() * sizeof(int32_t)));
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 8;
int const max_rows = 1 << 18;
int const row_mult = 4;
int const min_strlen = 1 << 4;
int const max_strlen = 1 << 8;
int const len_mult = 4;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_strlen, max_strlen, len_mult);
}
class RepeatStrings : public cudf::benchmark {};
#define REPEAT_STRINGS_SCALAR_TIMES_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(RepeatStrings, name) \
(::benchmark::State & st) { BM_repeat_strings_scalar_times(st); } \
BENCHMARK_REGISTER_F(RepeatStrings, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
#define REPEAT_STRINGS_COLUMN_TIMES_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(RepeatStrings, name) \
(::benchmark::State & st) { BM_repeat_strings_column_times(st); } \
BENCHMARK_REGISTER_F(RepeatStrings, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
REPEAT_STRINGS_SCALAR_TIMES_BENCHMARK_DEFINE(scalar_times)
REPEAT_STRINGS_COLUMN_TIMES_BENCHMARK_DEFINE(column_times)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/lengths.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_lengths(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
cudf::strings_column_view input(table->view().column(0));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int32_t>(num_rows); // output is an integer per row
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::strings::count_characters(input);
});
}
NVBENCH_BENCH(bench_lengths)
.set_name("lengths")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048, 4096})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/gather.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/copying.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_gather(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const input_table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
data_profile const map_profile = data_profile_builder().no_validity().distribution(
cudf::type_id::INT32, distribution_id::UNIFORM, 0, num_rows);
auto const map_table =
create_random_table({cudf::type_id::INT32}, row_count{num_rows}, map_profile);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = cudf::strings_column_view(input_table->view().column(0)).chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = cudf::gather(
input_table->view(), map_table->view().column(0), cudf::out_of_bounds_policy::NULLIFY);
});
}
NVBENCH_BENCH(bench_gather)
.set_name("gather")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048, 4096})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/string_bench_args.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <benchmark/benchmark.h>
#include <cudf/types.hpp>
#include <limits>
/**
* @brief Generate row count and row length argument ranges for a string benchmark.
*
* Generates a series of row count and row length arguments for string benchmarks.
* Combinations of row count and row length that would exceed the maximum string character
* column data length are not generated.
*
* @param b Benchmark to update with row count and row length arguments.
* @param min_rows Minimum row count argument to generate.
* @param max_rows Maximum row count argument to generate.
* @param rows_mult Row count multiplier to generate intermediate row count arguments.
* @param min_rowlen Minimum row length argument to generate.
* @param max_rowlen Maximum row length argument to generate.
* @param rowlen_mult Row length multiplier to generate intermediate row length arguments.
*/
inline void generate_string_bench_args(benchmark::internal::Benchmark* b,
int min_rows,
int max_rows,
int rows_mult,
int min_rowlen,
int max_rowlen,
int rowlen_mult)
{
for (int row_count = min_rows; row_count <= max_rows; row_count *= rows_mult) {
for (int rowlen = min_rowlen; rowlen <= max_rowlen; rowlen *= rowlen_mult) {
// avoid generating combinations that exceed the cudf column limit
size_t total_chars = static_cast<size_t>(row_count) * rowlen;
if (total_chars < static_cast<size_t>(std::numeric_limits<cudf::size_type>::max())) {
b->Args({row_count, rowlen});
}
}
}
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.