repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/combine.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
class StringCombine : public cudf::benchmark {};
static void BM_combine(benchmark::State& state)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const table = create_random_table(
{cudf::type_id::STRING, cudf::type_id::STRING}, row_count{n_rows}, table_profile);
cudf::strings_column_view input1(table->view().column(0));
cudf::strings_column_view input2(table->view().column(1));
cudf::string_scalar separator("+");
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::strings::concatenate(table->view(), separator);
}
state.SetBytesProcessed(state.iterations() * (input1.chars_size() + input2.chars_size()));
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 4;
int const max_rowlen = 1 << 11;
int const len_mult = 4;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringCombine, name) \
(::benchmark::State & st) { BM_combine(st); } \
BENCHMARK_REGISTER_F(StringCombine, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(concat)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/filter.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/strip.hpp>
#include <cudf/strings/translate.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <vector>
enum FilterAPI { filter, filter_chars, strip };
class StringFilterChars : public cudf::benchmark {};
static void BM_filter_chars(benchmark::State& state, FilterAPI api)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
auto const types = cudf::strings::string_character_types::SPACE;
std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> filter_table{
{cudf::char_utf8{'a'}, cudf::char_utf8{'c'}}};
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
switch (api) {
case filter: cudf::strings::filter_characters_of_type(input, types); break;
case filter_chars: cudf::strings::filter_characters(input, filter_table); break;
case strip: cudf::strings::strip(input); break;
}
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_multiplier = 8;
int const min_length = 1 << 5;
int const max_length = 1 << 13;
int const length_multiplier = 2;
generate_string_bench_args(
b, min_rows, max_rows, row_multiplier, min_length, max_length, length_multiplier);
}
#define STRINGS_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(StringFilterChars, name) \
(::benchmark::State & st) { BM_filter_chars(st, FilterAPI::name); } \
BENCHMARK_REGISTER_F(StringFilterChars, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(filter)
STRINGS_BENCHMARK_DEFINE(filter_chars)
STRINGS_BENCHMARK_DEFINE(strip)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/contains.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/filling.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
std::unique_ptr<cudf::column> build_input_column(cudf::size_type n_rows,
cudf::size_type row_width,
int32_t hit_rate)
{
// build input table using the following data
auto raw_data = cudf::test::strings_column_wrapper(
{
"123 abc 4567890 DEFGHI 0987 5W43", // matches both patterns;
"012345 6789 01234 56789 0123 456", // the rest do not match
"abc 4567890 DEFGHI 0987 Wxyz 123",
"abcdefghijklmnopqrstuvwxyz 01234",
"",
"AbcéDEFGHIJKLMNOPQRSTUVWXYZ 01",
"9876543210,abcdefghijklmnopqrstU",
"9876543210,abcdefghijklmnopqrstU",
"123 édf 4567890 DéFG 0987 X5",
"1",
})
.release();
if (row_width / 32 > 1) {
std::vector<cudf::column_view> columns;
for (int i = 0; i < row_width / 32; ++i) {
columns.push_back(raw_data->view());
}
raw_data = cudf::strings::concatenate(cudf::table_view(columns));
}
auto data_view = raw_data->view();
// compute number of rows in n_rows that should match
auto matches = static_cast<int32_t>(n_rows * hit_rate) / 100;
// Create a randomized gather-map to build a column out of the strings in data.
data_profile gather_profile =
data_profile_builder().cardinality(0).null_probability(0.0).distribution(
cudf::type_id::INT32, distribution_id::UNIFORM, 1, data_view.size() - 1);
auto gather_table =
create_random_table({cudf::type_id::INT32}, row_count{n_rows}, gather_profile);
gather_table->get_column(0).set_null_mask(rmm::device_buffer{}, 0);
// Create scatter map by placing 0-index values throughout the gather-map
auto scatter_data = cudf::sequence(
matches, cudf::numeric_scalar<int32_t>(0), cudf::numeric_scalar<int32_t>(n_rows / matches));
auto zero_scalar = cudf::numeric_scalar<int32_t>(0);
auto table = cudf::scatter({zero_scalar}, scatter_data->view(), gather_table->view());
auto gather_map = table->view().column(0);
table = cudf::gather(cudf::table_view({data_view}), gather_map);
return std::move(table->release().front());
}
// longer pattern lengths demand more working memory per string
std::string patterns[] = {"^\\d+ [a-z]+", "[A-Z ]+\\d+ +\\d+[A-Z]+\\d+$"};
static void bench_contains(nvbench::state& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const pattern_index = static_cast<cudf::size_type>(state.get_int64("pattern"));
auto const hit_rate = static_cast<cudf::size_type>(state.get_int64("hit_rate"));
if (static_cast<std::size_t>(n_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
auto col = build_input_column(n_rows, row_width, hit_rate);
auto input = cudf::strings_column_view(col->view());
auto pattern = patterns[pattern_index];
auto program = cudf::strings::regex_program::create(pattern);
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size");
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int32_t>(input.size());
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { cudf::strings::contains_re(input, *program); });
}
NVBENCH_BENCH(bench_contains)
.set_name("contains")
.add_int64_axis("row_width", {32, 64, 128, 256, 512})
.add_int64_axis("num_rows", {32768, 262144, 2097152, 16777216})
.add_int64_axis("hit_rate", {50, 100}) // percentage
.add_int64_axis("pattern", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/like.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/copying.hpp>
#include <cudf/filling.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
namespace {
std::unique_ptr<cudf::column> build_input_column(cudf::size_type n_rows,
cudf::size_type row_width,
int32_t hit_rate)
{
// build input table using the following data
auto raw_data = cudf::test::strings_column_wrapper(
{
"123 abc 4567890 DEFGHI 0987 5W43", // matches always;
"012345 6789 01234 56789 0123 456", // the rest do not match
"abc 4567890 DEFGHI 0987 Wxyz 123",
"abcdefghijklmnopqrstuvwxyz 01234",
"",
"AbcéDEFGHIJKLMNOPQRSTUVWXYZ 01",
"9876543210,abcdefghijklmnopqrstU",
"9876543210,abcdefghijklmnopqrstU",
"123 édf 4567890 DéFG 0987 X5",
"1",
})
.release();
if (row_width / 32 > 1) {
std::vector<cudf::column_view> columns;
for (int i = 0; i < row_width / 32; ++i) {
columns.push_back(raw_data->view());
}
raw_data = cudf::strings::concatenate(cudf::table_view(columns));
}
auto data_view = raw_data->view();
// compute number of rows in n_rows that should match
auto matches = static_cast<int32_t>(n_rows * hit_rate) / 100;
// Create a randomized gather-map to build a column out of the strings in data.
data_profile gather_profile =
data_profile_builder().cardinality(0).null_probability(0.0).distribution(
cudf::type_id::INT32, distribution_id::UNIFORM, 1, data_view.size() - 1);
auto gather_table =
create_random_table({cudf::type_id::INT32}, row_count{n_rows}, gather_profile);
gather_table->get_column(0).set_null_mask(rmm::device_buffer{}, 0);
// Create scatter map by placing 0-index values throughout the gather-map
auto scatter_data = cudf::sequence(
matches, cudf::numeric_scalar<int32_t>(0), cudf::numeric_scalar<int32_t>(n_rows / matches));
auto zero_scalar = cudf::numeric_scalar<int32_t>(0);
auto table = cudf::scatter({zero_scalar}, scatter_data->view(), gather_table->view());
auto gather_map = table->view().column(0);
table = cudf::gather(cudf::table_view({data_view}), gather_map);
return std::move(table->release().front());
}
} // namespace
static void bench_like(nvbench::state& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const hit_rate = static_cast<int32_t>(state.get_int64("hit_rate"));
if (static_cast<std::size_t>(n_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
auto col = build_input_column(n_rows, row_width, hit_rate);
auto input = cudf::strings_column_view(col->view());
// This pattern forces reading the entire target string (when matched expected)
auto pattern = std::string("% 5W4_"); // regex equivalent: ".* 5W4.$"
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size"); // number of bytes;
state.add_global_memory_reads<nvbench::int8_t>(chars_size); // all bytes are read;
state.add_global_memory_writes<nvbench::int8_t>(n_rows); // writes are BOOL8
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = cudf::strings::like(input, pattern); });
}
NVBENCH_BENCH(bench_like)
.set_name("strings_like")
.add_int64_axis("row_width", {32, 64, 128, 256, 512})
.add_int64_axis("num_rows", {32768, 262144, 2097152, 16777216})
.add_int64_axis("hit_rate", {10, 25, 70, 100});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/translate.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "string_bench_args.hpp"
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/translate.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
class StringTranslate : public cudf::benchmark {};
using entry_type = std::pair<cudf::char_utf8, cudf::char_utf8>;
static void BM_translate(benchmark::State& state, int entry_count)
{
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.range(0))};
cudf::size_type const max_str_length{static_cast<cudf::size_type>(state.range(1))};
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
std::vector<entry_type> entries(entry_count);
std::transform(thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(entry_count),
entries.begin(),
[](auto idx) -> entry_type {
return entry_type{'!' + idx, '~' - idx};
});
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::strings::translate(input, entries);
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 1 << 5;
int const max_rowlen = 1 << 13;
int const len_mult = 4;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define STRINGS_BENCHMARK_DEFINE(name, entries) \
BENCHMARK_DEFINE_F(StringTranslate, name) \
(::benchmark::State & st) { BM_translate(st, entries); } \
BENCHMARK_REGISTER_F(StringTranslate, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
STRINGS_BENCHMARK_DEFINE(translate_small, 5)
STRINGS_BENCHMARK_DEFINE(translate_medium, 25)
STRINGS_BENCHMARK_DEFINE(translate_large, 50)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/replace_re.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_replace(nvbench::state& state)
{
auto const n_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const rtype = state.get_string("type");
if (static_cast<std::size_t>(n_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
auto program = cudf::strings::regex_program::create("(\\d+)");
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size");
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
if (rtype == "backref") {
auto replacement = std::string("#\\1X");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::strings::replace_with_backrefs(input, *program, replacement);
});
} else {
auto replacement = std::string("77");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
cudf::strings::replace_re(input, *program, replacement);
});
}
}
NVBENCH_BENCH(bench_replace)
.set_name("replace_re")
.add_int64_axis("row_width", {32, 64, 128, 256, 512})
.add_int64_axis("num_rows", {32768, 262144, 2097152, 16777216})
.add_string_axis("type", {"replace", "backref"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/string/count.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_count(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, table_profile);
cudf::strings_column_view input(table->view().column(0));
std::string pattern = "\\d+";
auto prog = cudf::strings::regex_program::create(pattern);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
// gather some throughput statistics as well
auto chars_size = input.chars_size();
state.add_element_count(chars_size, "chars_size");
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int32_t>(input.size());
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = cudf::strings::count_re(input, *prog); });
}
NVBENCH_BENCH(bench_count)
.set_name("count")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024, 2048})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/copying/scatter.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/reverse.h>
#include <thrust/shuffle.h>
class Scatter : public cudf::benchmark {};
template <class TypeParam, bool coalesce>
void BM_scatter(benchmark::State& state)
{
auto const source_size{static_cast<cudf::size_type>(state.range(0))};
auto const n_cols{static_cast<cudf::size_type>(state.range(1))};
// Gather indices
auto scatter_map_table =
create_sequence_table({cudf::type_to_id<cudf::size_type>()}, row_count{source_size});
auto scatter_map = scatter_map_table->get_column(0).mutable_view();
if (coalesce) {
thrust::reverse(
thrust::device, scatter_map.begin<cudf::size_type>(), scatter_map.end<cudf::size_type>());
} else {
thrust::shuffle(thrust::device,
scatter_map.begin<cudf::size_type>(),
scatter_map.end<cudf::size_type>(),
thrust::default_random_engine());
}
// Every element is valid
auto source_table = create_sequence_table(cycle_dtypes({cudf::type_to_id<TypeParam>()}, n_cols),
row_count{source_size});
auto target_table = create_sequence_table(cycle_dtypes({cudf::type_to_id<TypeParam>()}, n_cols),
row_count{source_size});
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::scatter(*source_table, scatter_map, *target_table);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * state.range(0) * n_cols * 2 *
sizeof(TypeParam));
}
#define SBM_BENCHMARK_DEFINE(name, type, coalesce) \
BENCHMARK_DEFINE_F(Scatter, name)(::benchmark::State & state) \
{ \
BM_scatter<type, coalesce>(state); \
} \
BENCHMARK_REGISTER_F(Scatter, name) \
->RangeMultiplier(2) \
->Ranges({{1 << 10, 1 << 25}, {1, 8}}) \
->UseManualTime();
SBM_BENCHMARK_DEFINE(double_coalesce_x, double, true);
SBM_BENCHMARK_DEFINE(double_coalesce_o, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/copying/contiguous_split.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/contiguous_split.hpp>
#include <thrust/iterator/counting_iterator.h>
void contiguous_split(cudf::table_view const& src_table, std::vector<cudf::size_type> const& splits)
{
auto result = cudf::contiguous_split(src_table, splits);
}
void chunked_pack(cudf::table_view const& src_table, std::vector<cudf::size_type> const&)
{
auto const mr = rmm::mr::get_current_device_resource();
auto const stream = cudf::get_default_stream();
auto user_buffer = rmm::device_uvector<std::uint8_t>(100L * 1024 * 1024, stream, mr);
auto chunked_pack = cudf::chunked_pack::create(src_table, user_buffer.size(), mr);
while (chunked_pack->has_next()) {
auto iter_size = chunked_pack->next(user_buffer);
}
stream.synchronize();
}
template <typename T, typename ContigSplitImpl>
void BM_contiguous_split_common(benchmark::State& state,
std::vector<T>& src_cols,
int64_t num_rows,
int64_t num_splits,
int64_t bytes_total,
ContigSplitImpl& impl)
{
// generate splits
std::vector<cudf::size_type> splits;
if (num_splits > 0) {
cudf::size_type const split_stride = num_rows / num_splits;
// start after the first element.
auto iter = thrust::make_counting_iterator(1);
splits.reserve(num_splits);
std::transform(iter,
iter + num_splits,
std::back_inserter(splits),
[split_stride, num_rows](cudf::size_type i) {
return std::min(i * split_stride, static_cast<cudf::size_type>(num_rows));
});
}
for (auto const& col : src_cols)
// computing the null count is not a part of the benchmark's target code path, and we want the
// property to be pre-computed so that we measure the performance of only the intended code path
[[maybe_unused]]
auto const nulls = col->null_count();
auto const src_table = cudf::table(std::move(src_cols));
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
impl(src_table, splits);
}
// it's 2x bytes_total because we're both reading and writing.
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * bytes_total * 2);
}
class ContiguousSplit : public cudf::benchmark {};
class ChunkedPack : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
cudf::size_type el_size = 4; // ints and floats
int64_t const num_rows = total_desired_bytes / (num_cols * el_size);
// generate input table
auto builder = data_profile_builder().cardinality(0).distribution<int>(cudf::type_id::INT32,
distribution_id::UNIFORM);
if (not include_validity) builder.no_validity();
auto src_cols = create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
row_count{static_cast<cudf::size_type>(num_rows)},
data_profile{builder})
->release();
int64_t const total_bytes =
total_desired_bytes +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
class ContiguousSplitStrings : public cudf::benchmark {};
class ChunkedPackStrings : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split_strings(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
constexpr int64_t string_len = 8;
std::vector<char const*> h_strings{
"aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd", "eeeeeeee", "ffffffff", "gggggggg", "hhhhhhhh"};
int64_t const col_len_bytes = total_desired_bytes / num_cols;
int64_t const num_rows = col_len_bytes / string_len;
// generate input table
data_profile profile = data_profile_builder().no_validity().cardinality(0).distribution(
cudf::type_id::INT32,
distribution_id::UNIFORM,
0ul,
include_validity ? h_strings.size() * 2 : h_strings.size() - 1); // out of bounds nullified
cudf::test::strings_column_wrapper one_col(h_strings.begin(), h_strings.end());
std::vector<std::unique_ptr<cudf::column>> src_cols(num_cols);
for (int64_t idx = 0; idx < num_cols; idx++) {
auto random_indices = create_random_column(
cudf::type_id::INT32, row_count{static_cast<cudf::size_type>(num_rows)}, profile);
auto str_table = cudf::gather(cudf::table_view{{one_col}},
*random_indices,
(include_validity ? cudf::out_of_bounds_policy::NULLIFY
: cudf::out_of_bounds_policy::DONT_CHECK));
src_cols[idx] = std::move(str_table->release()[0]);
}
int64_t const total_bytes =
total_desired_bytes + ((num_rows + 1) * sizeof(cudf::size_type)) +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
#define CSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplit, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplit, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplitStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplitStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPack, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPack, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPackStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPackStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/copying/gather.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/reverse.h>
#include <thrust/shuffle.h>
class Gather : public cudf::benchmark {};
template <class TypeParam, bool coalesce>
void BM_gather(benchmark::State& state)
{
cudf::size_type const source_size{(cudf::size_type)state.range(0)};
auto const n_cols = (cudf::size_type)state.range(1);
// Gather indices
auto gather_map_table =
create_sequence_table({cudf::type_to_id<cudf::size_type>()}, row_count{source_size});
auto gather_map = gather_map_table->get_column(0).mutable_view();
if (coalesce) {
thrust::reverse(
thrust::device, gather_map.begin<cudf::size_type>(), gather_map.end<cudf::size_type>());
} else {
thrust::shuffle(thrust::device,
gather_map.begin<cudf::size_type>(),
gather_map.end<cudf::size_type>(),
thrust::default_random_engine());
}
// Every element is valid
auto source_table = create_sequence_table(cycle_dtypes({cudf::type_to_id<TypeParam>()}, n_cols),
row_count{source_size});
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::gather(*source_table, gather_map);
}
state.SetBytesProcessed(state.iterations() * state.range(0) * n_cols * 2 * sizeof(TypeParam));
}
#define GBM_BENCHMARK_DEFINE(name, type, coalesce) \
BENCHMARK_DEFINE_F(Gather, name)(::benchmark::State & state) \
{ \
BM_gather<type, coalesce>(state); \
} \
BENCHMARK_REGISTER_F(Gather, name) \
->RangeMultiplier(2) \
->Ranges({{1 << 10, 1 << 26}, {1, 8}}) \
->UseManualTime();
GBM_BENCHMARK_DEFINE(double_coalesce_x, double, true);
GBM_BENCHMARK_DEFINE(double_coalesce_o, double, false);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/copying/shift.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
template <typename T, typename ScalarType = cudf::scalar_type_t<T>>
std::unique_ptr<cudf::scalar> make_scalar(
T value = 0,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto s = new ScalarType(value, true, stream, mr);
return std::unique_ptr<cudf::scalar>(s);
}
template <typename T>
struct value_func {
T* data;
cudf::size_type offset;
__device__ T operator()(int idx) { return data[idx - offset]; }
};
struct validity_func {
cudf::size_type size;
cudf::size_type offset;
__device__ bool operator()(int idx)
{
auto source_idx = idx - offset;
return source_idx < 0 || source_idx >= size;
}
};
template <bool use_validity, int shift_factor>
static void BM_shift(benchmark::State& state)
{
cudf::size_type size = state.range(0);
cudf::size_type offset = size * (static_cast<double>(shift_factor) / 100.0);
auto constexpr column_type_id = cudf::type_id::INT32;
using column_type = cudf::id_to_type<column_type_id>;
auto const input_table = create_sequence_table(
{column_type_id}, row_count{size}, use_validity ? std::optional<double>{1.0} : std::nullopt);
cudf::column_view input{input_table->get_column(0)};
auto fill = use_validity ? make_scalar<column_type>() : make_scalar<column_type>(777);
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto output = cudf::shift(input, offset, *fill);
}
auto const elems_read = (size - offset);
auto const bytes_read = elems_read * sizeof(column_type);
// If 'use_validity' is false, the fill value is a number, and the entire column
// (excluding the null bitmask) needs to be written. On the other hand, if 'use_validity'
// is true, only the elements that can be shifted are written, along with the full null bitmask.
auto const elems_written = use_validity ? (size - offset) : size;
auto const bytes_written = elems_written * sizeof(column_type);
auto const null_bytes = use_validity ? 2 * cudf::bitmask_allocation_size_bytes(size) : 0;
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(bytes_written + bytes_read + null_bytes));
}
class Shift : public cudf::benchmark {};
#define SHIFT_BM_BENCHMARK_DEFINE(name, use_validity, shift_factor) \
BENCHMARK_DEFINE_F(Shift, name)(::benchmark::State & state) \
{ \
BM_shift<use_validity, shift_factor>(state); \
} \
BENCHMARK_REGISTER_F(Shift, name) \
->RangeMultiplier(32) \
->Range(1 << 10, 1 << 30) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
SHIFT_BM_BENCHMARK_DEFINE(shift_zero, false, 0);
SHIFT_BM_BENCHMARK_DEFINE(shift_zero_nullable_out, true, 0);
SHIFT_BM_BENCHMARK_DEFINE(shift_ten_percent, false, 10);
SHIFT_BM_BENCHMARK_DEFINE(shift_ten_percent_nullable_out, true, 10);
SHIFT_BM_BENCHMARK_DEFINE(shift_half, false, 50);
SHIFT_BM_BENCHMARK_DEFINE(shift_half_nullable_out, true, 50);
SHIFT_BM_BENCHMARK_DEFINE(shift_full, false, 100);
SHIFT_BM_BENCHMARK_DEFINE(shift_full_nullable_out, true, 100);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/copying/copy_if_else.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/copying.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_buffer.hpp>
class CopyIfElse : public cudf::benchmark {};
template <class TypeParam>
static void BM_copy_if_else(benchmark::State& state, bool nulls)
{
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
auto input_type = cudf::type_to_id<TypeParam>();
auto bool_type = cudf::type_id::BOOL8;
auto const input = create_random_table({input_type, input_type, bool_type}, row_count{n_rows});
if (!nulls) {
input->get_column(2).set_null_mask(rmm::device_buffer{}, 0);
input->get_column(1).set_null_mask(rmm::device_buffer{}, 0);
input->get_column(0).set_null_mask(rmm::device_buffer{}, 0);
}
cudf::column_view decision(input->view().column(2));
cudf::column_view rhs(input->view().column(1));
cudf::column_view lhs(input->view().column(0));
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
cudf::copy_if_else(lhs, rhs, decision);
}
auto const bytes_read = n_rows * (sizeof(TypeParam) + sizeof(bool));
auto const bytes_written = n_rows * sizeof(TypeParam);
auto const null_bytes = nulls ? 2 * cudf::bitmask_allocation_size_bytes(n_rows) : 0;
// Use number of bytes read and written.
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(bytes_read + bytes_written + null_bytes));
}
#define COPY_BENCHMARK_DEFINE(name, type, b) \
BENCHMARK_DEFINE_F(CopyIfElse, name) \
(::benchmark::State & st) { BM_copy_if_else<type>(st, b); } \
BENCHMARK_REGISTER_F(CopyIfElse, name) \
->RangeMultiplier(8) \
->Ranges({{1 << 12, 1 << 27}}) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
COPY_BENCHMARK_DEFINE(int16, int16_t, true)
COPY_BENCHMARK_DEFINE(uint32, uint32_t, true)
COPY_BENCHMARK_DEFINE(float64, double, true)
COPY_BENCHMARK_DEFINE(int16_no_nulls, int16_t, false)
COPY_BENCHMARK_DEFINE(uint32_no_nulls, uint32_t, false)
COPY_BENCHMARK_DEFINE(float64_no_nulls, double, false)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/search/contains_table.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/lists/list_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <nvbench/nvbench.cuh>
auto constexpr num_unique_elements = 1000;
template <typename Type>
static void nvbench_contains_table(nvbench::state& state, nvbench::type_list<Type>)
{
auto const size = state.get_int64("table_size");
auto const dtype = cudf::type_to_id<Type>();
double const null_probability = state.get_float64("null_probability");
auto builder = data_profile_builder().null_probability(null_probability);
if (dtype == cudf::type_id::LIST) {
builder.distribution(dtype, distribution_id::UNIFORM, 0, num_unique_elements)
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, num_unique_elements)
.list_depth(1);
} else {
builder.distribution(dtype, distribution_id::UNIFORM, 0, num_unique_elements);
}
auto const haystack = create_random_table(
{dtype}, table_size_bytes{static_cast<size_t>(size)}, data_profile{builder}, 0);
auto const needles = create_random_table(
{dtype}, table_size_bytes{static_cast<size_t>(size)}, data_profile{builder}, 1);
auto mem_stats_logger = cudf::memory_stats_logger();
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto const stream_view = rmm::cuda_stream_view{launch.get_stream()};
[[maybe_unused]] auto const result =
cudf::detail::contains(haystack->view(),
needles->view(),
cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL,
stream_view,
rmm::mr::get_current_device_resource());
});
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
}
NVBENCH_BENCH_TYPES(nvbench_contains_table,
NVBENCH_TYPE_AXES(nvbench::type_list<int32_t, cudf::list_view>))
.set_name("contains_table")
.set_type_axes_names({"type"})
.add_float64_axis("null_probability", {0.0, 0.1})
.add_int64_axis("table_size", {10'000, 100'000, 1'000'000, 10'000'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/search/contains_scalar.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
namespace {
template <typename Type>
std::unique_ptr<cudf::column> create_column_data(cudf::size_type n_rows, bool has_nulls = false)
{
data_profile profile = data_profile_builder().cardinality(0).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 1000);
profile.set_null_probability(has_nulls ? std::optional{0.1} : std::nullopt);
return create_random_column(cudf::type_to_id<Type>(), row_count{n_rows}, profile);
}
} // namespace
static void nvbench_contains_scalar(nvbench::state& state)
{
using Type = int;
auto const has_nulls = static_cast<bool>(state.get_int64("has_nulls"));
auto const size = state.get_int64("data_size");
auto const haystack = create_column_data<Type>(size, has_nulls);
auto const needle = cudf::make_fixed_width_scalar<Type>(size / 2);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto const stream_view = rmm::cuda_stream_view{launch.get_stream()};
[[maybe_unused]] auto const result = cudf::detail::contains(*haystack, *needle, stream_view);
});
}
NVBENCH_BENCH(nvbench_contains_scalar)
.set_name("contains_scalar")
.add_int64_power_of_two_axis("data_size", {10, 12, 14, 16, 18, 20, 22, 24, 26})
.add_int64_axis("has_nulls", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/search/search.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/search.hpp>
#include <cudf/sorting.hpp>
#include <cudf/types.hpp>
class Search : public cudf::benchmark {};
void BM_column(benchmark::State& state, bool nulls)
{
auto const column_size{static_cast<cudf::size_type>(state.range(0))};
auto const values_size = column_size;
auto init_data = cudf::make_fixed_width_scalar<float>(static_cast<float>(0));
auto init_value = cudf::make_fixed_width_scalar<float>(static_cast<float>(values_size));
auto step = cudf::make_fixed_width_scalar<float>(static_cast<float>(-1));
auto column = cudf::sequence(column_size, *init_data);
auto values = cudf::sequence(values_size, *init_value, *step);
if (nulls) {
auto [column_null_mask, column_null_count] = create_random_null_mask(column->size(), 0.1, 1);
column->set_null_mask(std::move(column_null_mask), column_null_count);
auto [values_null_mask, values_null_count] = create_random_null_mask(values->size(), 0.1, 2);
values->set_null_mask(std::move(values_null_mask), values_null_count);
}
auto data_table = cudf::sort(cudf::table_view({*column}));
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto col = cudf::upper_bound(data_table->view(),
cudf::table_view({*values}),
{cudf::order::ASCENDING},
{cudf::null_order::BEFORE});
}
}
BENCHMARK_DEFINE_F(Search, Column_AllValid)(::benchmark::State& state) { BM_column(state, false); }
BENCHMARK_DEFINE_F(Search, Column_Nulls)(::benchmark::State& state) { BM_column(state, true); }
BENCHMARK_REGISTER_F(Search, Column_AllValid)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(100000000);
BENCHMARK_REGISTER_F(Search, Column_Nulls)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Arg(100000000);
void BM_table(benchmark::State& state)
{
using Type = float;
auto const num_columns{static_cast<cudf::size_type>(state.range(0))};
auto const column_size{static_cast<cudf::size_type>(state.range(1))};
auto const values_size = column_size;
data_profile profile = data_profile_builder().cardinality(0).null_probability(0.1).distribution(
cudf::type_to_id<Type>(), distribution_id::UNIFORM, 0, 100);
auto data_table = create_random_table(
cycle_dtypes({cudf::type_to_id<Type>()}, num_columns), row_count{column_size}, profile);
auto values_table = create_random_table(
cycle_dtypes({cudf::type_to_id<Type>()}, num_columns), row_count{values_size}, profile);
std::vector<cudf::order> orders(num_columns, cudf::order::ASCENDING);
std::vector<cudf::null_order> null_orders(num_columns, cudf::null_order::BEFORE);
auto sorted = cudf::sort(*data_table);
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto col = cudf::lower_bound(sorted->view(), *values_table, orders, null_orders);
}
}
BENCHMARK_DEFINE_F(Search, Table)(::benchmark::State& state) { BM_table(state); }
static void CustomArguments(benchmark::internal::Benchmark* b)
{
for (int num_cols = 1; num_cols <= 10; num_cols *= 2)
for (int col_size = 1000; col_size <= 100000000; col_size *= 10)
b->Args({num_cols, col_size});
}
BENCHMARK_REGISTER_F(Search, Table)
->UseManualTime()
->Unit(benchmark::kMillisecond)
->Apply(CustomArguments);
void BM_contains(benchmark::State& state, bool nulls)
{
auto const column_size{static_cast<cudf::size_type>(state.range(0))};
auto const values_size = column_size;
auto init_data = cudf::make_fixed_width_scalar<float>(static_cast<float>(0));
auto init_value = cudf::make_fixed_width_scalar<float>(static_cast<float>(values_size));
auto step = cudf::make_fixed_width_scalar<float>(static_cast<float>(-1));
auto column = cudf::sequence(column_size, *init_data);
auto values = cudf::sequence(values_size, *init_value, *step);
if (nulls) {
auto [column_null_mask, column_null_count] = create_random_null_mask(column->size(), 0.1, 1);
column->set_null_mask(std::move(column_null_mask), column_null_count);
auto [values_null_mask, values_null_count] = create_random_null_mask(values->size(), 0.1, 2);
values->set_null_mask(std::move(values_null_mask), values_null_count);
}
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto col = cudf::contains(*column, *values);
}
}
BENCHMARK_DEFINE_F(Search, ColumnContains_AllValid)(::benchmark::State& state)
{
BM_contains(state, false);
}
BENCHMARK_DEFINE_F(Search, ColumnContains_Nulls)(::benchmark::State& state)
{
BM_contains(state, true);
}
BENCHMARK_REGISTER_F(Search, ColumnContains_AllValid)
->RangeMultiplier(8)
->Ranges({{1 << 10, 1 << 26}})
->UseManualTime()
->Unit(benchmark::kMillisecond);
BENCHMARK_REGISTER_F(Search, ColumnContains_Nulls)
->RangeMultiplier(8)
->Ranges({{1 << 10, 1 << 26}})
->UseManualTime()
->Unit(benchmark::kMillisecond);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/column/concatenate.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/fixture/templated_benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <algorithm>
#include <vector>
class Concatenate : public cudf::benchmark {};
template <typename T, bool Nullable>
static void BM_concatenate(benchmark::State& state)
{
cudf::size_type const num_rows = state.range(0);
cudf::size_type const num_cols = state.range(1);
auto input = create_sequence_table(cycle_dtypes({cudf::type_to_id<T>()}, num_cols),
row_count{num_rows},
Nullable ? std::optional<double>{2.0 / 3.0} : std::nullopt);
auto input_columns = input->view();
std::vector<cudf::column_view> column_views(input_columns.begin(), input_columns.end());
CUDF_CHECK_CUDA(0);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::concatenate(column_views);
}
state.SetBytesProcessed(state.iterations() * num_cols * num_rows * sizeof(T));
}
#define CONCAT_BENCHMARK_DEFINE(type, nullable) \
BENCHMARK_DEFINE_F(Concatenate, BM_concatenate##_##nullable_##nullable) \
(::benchmark::State & st) { BM_concatenate<type, nullable>(st); } \
BENCHMARK_REGISTER_F(Concatenate, BM_concatenate##_##nullable_##nullable) \
->RangeMultiplier(8) \
->Ranges({{1 << 6, 1 << 18}, {2, 1024}}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime();
CONCAT_BENCHMARK_DEFINE(int64_t, false)
CONCAT_BENCHMARK_DEFINE(int64_t, true)
template <typename T, bool Nullable>
static void BM_concatenate_tables(benchmark::State& state)
{
cudf::size_type const num_rows = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_tables = state.range(2);
std::vector<std::unique_ptr<cudf::table>> tables(num_tables);
std::generate_n(tables.begin(), num_tables, [&]() {
return create_sequence_table(cycle_dtypes({cudf::type_to_id<T>()}, num_cols),
row_count{num_rows},
Nullable ? std::optional<double>{2.0 / 3.0} : std::nullopt);
});
// Generate table views
std::vector<cudf::table_view> table_views(num_tables);
std::transform(tables.begin(), tables.end(), table_views.begin(), [](auto& table) mutable {
return table->view();
});
CUDF_CHECK_CUDA(0);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::concatenate(table_views);
}
state.SetBytesProcessed(state.iterations() * num_cols * num_rows * num_tables * sizeof(T));
}
#define CONCAT_TABLES_BENCHMARK_DEFINE(type, nullable) \
BENCHMARK_DEFINE_F(Concatenate, BM_concatenate_tables##_##nullable_##nullable) \
(::benchmark::State & st) { BM_concatenate_tables<type, nullable>(st); } \
BENCHMARK_REGISTER_F(Concatenate, BM_concatenate_tables##_##nullable_##nullable) \
->RangeMultiplier(8) \
->Ranges({{1 << 8, 1 << 12}, {2, 32}, {2, 128}}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime();
CONCAT_TABLES_BENCHMARK_DEFINE(int64_t, false)
CONCAT_TABLES_BENCHMARK_DEFINE(int64_t, true)
class ConcatenateStrings : public cudf::benchmark {};
template <bool Nullable>
static void BM_concatenate_strings(benchmark::State& state)
{
using column_wrapper = cudf::test::strings_column_wrapper;
auto const num_rows = state.range(0);
auto const num_chars = state.range(1);
auto const num_cols = state.range(2);
std::string str(num_chars, 'a');
// Create owning columns
std::vector<column_wrapper> columns;
columns.reserve(num_cols);
std::generate_n(std::back_inserter(columns), num_cols, [num_rows, c_str = str.c_str()]() {
auto iter = thrust::make_constant_iterator(c_str);
if (Nullable) {
auto count_it = thrust::make_counting_iterator(0);
auto valid_iter =
thrust::make_transform_iterator(count_it, [](auto i) { return i % 3 == 0; });
return column_wrapper(iter, iter + num_rows, valid_iter);
} else {
return column_wrapper(iter, iter + num_rows);
}
});
// Generate column views
std::vector<cudf::column_view> column_views;
column_views.reserve(columns.size());
std::transform(
columns.begin(), columns.end(), std::back_inserter(column_views), [](auto const& col) {
return static_cast<cudf::column_view>(col);
});
CUDF_CHECK_CUDA(0);
for (auto _ : state) {
cuda_event_timer raii(state, true, cudf::get_default_stream());
auto result = cudf::concatenate(column_views);
}
state.SetBytesProcessed(state.iterations() * num_cols * num_rows *
(sizeof(int32_t) + num_chars)); // offset + chars
}
#define CONCAT_STRINGS_BENCHMARK_DEFINE(nullable) \
BENCHMARK_DEFINE_F(Concatenate, BM_concatenate_strings##_##nullable_##nullable) \
(::benchmark::State & st) { BM_concatenate_strings<nullable>(st); } \
BENCHMARK_REGISTER_F(Concatenate, BM_concatenate_strings##_##nullable_##nullable) \
->RangeMultiplier(8) \
->Ranges({{1 << 8, 1 << 14}, {8, 128}, {2, 256}}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime();
CONCAT_STRINGS_BENCHMARK_DEFINE(false)
CONCAT_STRINGS_BENCHMARK_DEFINE(true)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/merge/merge_structs.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/detail/merge.hpp>
#include <cudf/detail/sorting.hpp>
#include <nvbench/nvbench.cuh>
void nvbench_merge_struct(nvbench::state& state)
{
rmm::cuda_stream_view stream;
auto const input1 = create_structs_data(state);
auto const sorted_input1 =
cudf::detail::sort(*input1, {}, {}, stream, rmm::mr::get_current_device_resource());
auto const input2 = create_structs_data(state);
auto const sorted_input2 =
cudf::detail::sort(*input2, {}, {}, stream, rmm::mr::get_current_device_resource());
stream.synchronize();
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
cudf::detail::merge({*sorted_input1, *sorted_input2},
{0},
{cudf::order::ASCENDING},
{},
stream_view,
rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH(nvbench_merge_struct)
.set_name("merge_struct")
.add_int64_power_of_two_axis("NumRows", {10, 18, 26})
.add_int64_axis("Depth", {0, 1, 8})
.add_int64_axis("Nulls", {0, 1});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/merge/merge_lists.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_nested_types.hpp>
#include <cudf/detail/merge.hpp>
#include <cudf/detail/sorting.hpp>
#include <nvbench/nvbench.cuh>
void nvbench_merge_list(nvbench::state& state)
{
rmm::cuda_stream_view stream;
auto const input1 = create_lists_data(state);
auto const sorted_input1 =
cudf::detail::sort(*input1, {}, {}, stream, rmm::mr::get_current_device_resource());
auto const input2 = create_lists_data(state);
auto const sorted_input2 =
cudf::detail::sort(*input2, {}, {}, stream, rmm::mr::get_current_device_resource());
stream.synchronize();
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
cudf::detail::merge({*sorted_input1, *sorted_input2},
{0},
{cudf::order::ASCENDING},
{},
stream_view,
rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH(nvbench_merge_list)
.set_name("merge_lists")
.add_int64_power_of_two_axis("size_bytes", {10, 18, 24, 28})
.add_int64_axis("depth", {1, 4})
.add_float64_axis("null_frequency", {0, 0.2});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/merge/merge.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/merge.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <random>
// to enable, run cmake with -DBUILD_BENCHMARKS=ON
// Fixture that enables RMM pool mode
class Merge : public cudf::benchmark {};
using IntColWrap = cudf::test::fixed_width_column_wrapper<int32_t>;
void BM_merge(benchmark::State& state)
{
cudf::size_type const avg_rows = 1 << 19; // 512K rows
int const num_tables = state.range(0);
// Content is irrelevant for the benchmark
auto data_sequence = thrust::make_constant_iterator(0);
// Using 0 seed to ensure consistent pseudo-numbers on each run
std::mt19937 rand_gen(0);
// Gaussian distribution with 98% of elements are in range [0, avg_rows*2]
std::normal_distribution<> table_size_dist(avg_rows, avg_rows / 2);
// Used to generate a random monotonic sequence for each table key column
std::uniform_int_distribution<> key_dist(0, 10);
std::vector<std::pair<IntColWrap, IntColWrap>> columns;
size_t total_rows = 0;
std::vector<cudf::table_view> tables;
for (int i = 0; i < num_tables; ++i) {
cudf::size_type const rows = std::round(table_size_dist(rand_gen));
// Ensure size in range [0, avg_rows*2]
auto const clamped_rows = std::clamp(rows, 0, avg_rows * 2);
int32_t prev_key = 0;
auto key_sequence = cudf::detail::make_counting_transform_iterator(0, [&](auto row) {
prev_key += key_dist(rand_gen);
return prev_key;
});
columns.emplace_back(
std::pair<IntColWrap, IntColWrap>{IntColWrap(key_sequence, key_sequence + clamped_rows),
IntColWrap(data_sequence, data_sequence + clamped_rows)});
tables.push_back(cudf::table_view{{columns.back().first, columns.back().second}});
total_rows += clamped_rows;
}
std::vector<cudf::size_type> const key_cols{0};
std::vector<cudf::order> const column_order{cudf::order::ASCENDING};
std::vector<cudf::null_order> const null_precedence{};
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
auto result = cudf::merge(tables, key_cols, column_order, null_precedence);
}
state.SetBytesProcessed(state.iterations() * 2 * sizeof(int32_t) * total_rows);
}
#define MBM_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(Merge, name)(::benchmark::State & state) { BM_merge(state); } \
BENCHMARK_REGISTER_F(Merge, name) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->RangeMultiplier(2) \
->Ranges({{2, 128}});
MBM_BENCHMARK_DEFINE(pow2tables);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/ast/transform.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <list>
#include <memory>
#include <optional>
#include <vector>
enum class TreeType {
IMBALANCED_LEFT // All operator expressions have a left child operator expression and a right
// child column reference
};
template <typename key_type, TreeType tree_type, bool reuse_columns, bool Nullable>
class AST : public cudf::benchmark {};
template <typename key_type, TreeType tree_type, bool reuse_columns, bool Nullable>
static void BM_ast_transform(benchmark::State& state)
{
auto const table_size{static_cast<cudf::size_type>(state.range(0))};
auto const tree_levels{static_cast<cudf::size_type>(state.range(1))};
// Create table data
auto const n_cols = reuse_columns ? 1 : tree_levels + 1;
auto const source_table =
create_sequence_table(cycle_dtypes({cudf::type_to_id<key_type>()}, n_cols),
row_count{table_size},
Nullable ? std::optional<double>{0.5} : std::nullopt);
auto table = source_table->view();
// Create column references
auto column_refs = std::vector<cudf::ast::column_reference>();
std::transform(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(n_cols),
std::back_inserter(column_refs),
[](auto const& column_id) {
return cudf::ast::column_reference(reuse_columns ? 0 : column_id);
});
// Create expression trees
// Note that a std::list is required here because of its guarantees against reference invalidation
// when items are added or removed. References to items in a std::vector are not safe if the
// vector must re-allocate.
auto expressions = std::list<cudf::ast::operation>();
// Construct tree that chains additions like (((a + b) + c) + d)
auto const op = cudf::ast::ast_operator::ADD;
if (reuse_columns) {
expressions.push_back(cudf::ast::operation(op, column_refs.at(0), column_refs.at(0)));
for (cudf::size_type i = 0; i < tree_levels - 1; i++) {
expressions.push_back(cudf::ast::operation(op, expressions.back(), column_refs.at(0)));
}
} else {
expressions.push_back(cudf::ast::operation(op, column_refs.at(0), column_refs.at(1)));
std::transform(std::next(column_refs.cbegin(), 2),
column_refs.cend(),
std::back_inserter(expressions),
[&](auto const& column_ref) {
return cudf::ast::operation(op, expressions.back(), column_ref);
});
}
auto const& expression_tree_root = expressions.back();
// Execute benchmark
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
cudf::compute_column(table, expression_tree_root);
}
// Use the number of bytes read from global memory
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * state.range(0) *
(tree_levels + 1) * sizeof(key_type));
}
static void CustomRanges(benchmark::internal::Benchmark* b)
{
auto row_counts = std::vector<cudf::size_type>{100'000, 1'000'000, 10'000'000, 100'000'000};
auto operation_counts = std::vector<cudf::size_type>{1, 5, 10};
for (auto const& row_count : row_counts) {
for (auto const& operation_count : operation_counts) {
b->Args({row_count, operation_count});
}
}
}
#define AST_TRANSFORM_BENCHMARK_DEFINE(name, key_type, tree_type, reuse_columns, nullable) \
BENCHMARK_TEMPLATE_DEFINE_F(AST, name, key_type, tree_type, reuse_columns, nullable) \
(::benchmark::State & st) \
{ \
BM_ast_transform<key_type, tree_type, reuse_columns, nullable>(st); \
} \
BENCHMARK_REGISTER_F(AST, name) \
->Apply(CustomRanges) \
->Unit(benchmark::kMillisecond) \
->UseManualTime();
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_int32_imbalanced_unique, int32_t, TreeType::IMBALANCED_LEFT, false, false);
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_int32_imbalanced_reuse, int32_t, TreeType::IMBALANCED_LEFT, true, false);
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_double_imbalanced_unique, double, TreeType::IMBALANCED_LEFT, false, false);
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_int32_imbalanced_unique_nulls, int32_t, TreeType::IMBALANCED_LEFT, false, true);
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_int32_imbalanced_reuse_nulls, int32_t, TreeType::IMBALANCED_LEFT, true, true);
AST_TRANSFORM_BENCHMARK_DEFINE(
ast_double_imbalanced_unique_nulls, double, TreeType::IMBALANCED_LEFT, false, true);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/fixture/nvbench_main.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/nvbench_fixture.hpp>
#define NVBENCH_ENVIRONMENT cudf::nvbench_base_fixture
#include <nvbench/main.cuh>
#include <vector>
// strip off the rmm_mode parameter before passing the
// remaining arguments to nvbench::option_parser
#undef NVBENCH_MAIN_PARSE
#define NVBENCH_MAIN_PARSE(argc, argv) \
nvbench::option_parser parser; \
std::vector<std::string> m_args; \
for (int i = 0; i < argc; ++i) { \
std::string arg = argv[i]; \
if (arg == cudf::detail::rmm_mode_param) { \
i += 2; \
} else { \
m_args.push_back(arg); \
} \
} \
parser.parse(m_args)
// this declares/defines the main() function using the definitions above
NVBENCH_MAIN
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/fixture/nvbench_fixture.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/utilities/error.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <string>
namespace cudf {
namespace detail {
static std::string rmm_mode_param{"--rmm_mode"}; ///< RMM mode command-line parameter name
} // namespace detail
/**
* Base fixture for cudf benchmarks using nvbench.
*
* Initializes the default memory resource to use the RMM pool device resource.
*/
struct nvbench_base_fixture {
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_async() { return std::make_shared<rmm::mr::cuda_async_memory_resource>(); }
inline auto make_managed() { return std::make_shared<rmm::mr::managed_memory_resource>(); }
inline auto make_arena()
{
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda());
}
inline auto make_managed_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_managed());
}
inline std::shared_ptr<rmm::mr::device_memory_resource> create_memory_resource(
std::string const& mode)
{
if (mode == "cuda") return make_cuda();
if (mode == "pool") return make_pool();
if (mode == "async") return make_async();
if (mode == "arena") return make_arena();
if (mode == "managed") return make_managed();
if (mode == "managed_pool") return make_managed_pool();
CUDF_FAIL("Unknown rmm_mode parameter: " + mode +
"\nExpecting: cuda, pool, async, arena, managed, or managed_pool");
}
nvbench_base_fixture(int argc, char const* const* argv)
{
for (int i = 1; i < argc - 1; ++i) {
std::string arg = argv[i];
if (arg == detail::rmm_mode_param) {
i++;
rmm_mode = argv[i];
}
}
mr = create_memory_resource(rmm_mode);
rmm::mr::set_current_device_resource(mr.get());
std::cout << "RMM memory resource = " << rmm_mode << "\n";
}
std::shared_ptr<rmm::mr::device_memory_resource> mr;
std::string rmm_mode{"pool"};
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/fixture/templated_benchmark_fixture.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <benchmark/benchmark.h>
namespace cudf {
/**
* @brief Templated Google Benchmark with fixture
*
* Extends Google benchmarks to support templated Benchmarks with non-templated fixture class.
*
* The SetUp and TearDown methods is called before each templated benchmark function is run.
* These methods are called automatically by Google Benchmark
*
* Example:
*
* @code
* template <class T, class U>
* void my_benchmark(::benchmark::State& state) {
* std::vector<T> v1(state.range(0));
* std::vector<U> v2(state.range(0));
* for (auto _ : state) {
* // benchmark stuff
* }
* }
*
* TEMPLATED_BENCHMARK_F(cudf::benchmark, my_benchmark, int, double)->Range(128, 512);
* @endcode
*/
template <class Fixture>
class FunctionTemplateBenchmark : public Fixture {
public:
FunctionTemplateBenchmark(char const* name, ::benchmark::internal::Function* func)
: Fixture(), func_(func)
{
this->SetName(name);
}
virtual void Run(::benchmark::State& st)
{
this->SetUp(st);
this->BenchmarkCase(st);
this->TearDown(st);
}
private:
::benchmark::internal::Function* func_;
protected:
virtual void BenchmarkCase(::benchmark::State& st) { func_(st); }
};
#define TEMPLATED_BENCHMARK_F(BaseClass, n, ...) \
BENCHMARK_PRIVATE_DECLARE(n) = (::benchmark::internal::RegisterBenchmarkInternal( \
new cudf::FunctionTemplateBenchmark<BaseClass>(#BaseClass "/" #n "<" #__VA_ARGS__ ">", \
n<__VA_ARGS__>)))
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/fixture/benchmark_fixture.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <benchmark/benchmark.h>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <rmm/mr/device/statistics_resource_adaptor.hpp>
namespace cudf {
namespace {
// memory resource factory helpers
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_pool_instance()
{
static rmm::mr::cuda_memory_resource cuda_mr;
static auto pool_mr =
std::make_shared<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>(&cuda_mr);
return pool_mr;
}
} // namespace
/**
* @brief Google Benchmark fixture for libcudf benchmarks
*
* libcudf benchmarks should use a fixture derived from this fixture class to
* ensure that the RAPIDS Memory Manager pool mode is used in benchmarks, which
* eliminates memory allocation / deallocation performance overhead from the
* benchmark.
*
* The SetUp and TearDown methods of this fixture initialize RMM into pool mode
* and finalize it, respectively. These methods are called automatically by
* Google Benchmark
*
* Example:
*
* template <class T>
* class my_benchmark : public cudf::benchmark {
* public:
* using TypeParam = T;
* };
*
* Then:
*
* BENCHMARK_TEMPLATE_DEFINE_F(my_benchmark, my_test_name, int)
* (::benchmark::State& state) {
* for (auto _ : state) {
* // benchmark stuff
* }
* }
*
* BENCHMARK_REGISTER_F(my_benchmark, my_test_name)->Range(128, 512);
*/
class benchmark : public ::benchmark::Fixture {
public:
benchmark() : ::benchmark::Fixture()
{
char const* env_iterations = std::getenv("CUDF_BENCHMARK_ITERATIONS");
if (env_iterations != nullptr) { this->Iterations(std::max(0L, atol(env_iterations))); }
}
void SetUp(::benchmark::State const& state) override
{
mr = make_pool_instance();
rmm::mr::set_current_device_resource(mr.get()); // set default resource to pool
}
void TearDown(::benchmark::State const& state) override
{
// reset default resource to the initial resource
rmm::mr::set_current_device_resource(nullptr);
mr.reset();
}
// eliminate partial override warnings (see benchmark/benchmark.h)
void SetUp(::benchmark::State& st) override { SetUp(const_cast<::benchmark::State const&>(st)); }
void TearDown(::benchmark::State& st) override
{
TearDown(const_cast<::benchmark::State const&>(st));
}
std::shared_ptr<rmm::mr::device_memory_resource> mr;
};
class memory_stats_logger {
public:
memory_stats_logger()
: existing_mr(rmm::mr::get_current_device_resource()),
statistics_mr(rmm::mr::make_statistics_adaptor(existing_mr))
{
rmm::mr::set_current_device_resource(&statistics_mr);
}
~memory_stats_logger() { rmm::mr::set_current_device_resource(existing_mr); }
[[nodiscard]] size_t peak_memory_usage() const noexcept
{
return statistics_mr.get_bytes_counter().peak;
}
private:
rmm::mr::device_memory_resource* existing_mr;
rmm::mr::statistics_resource_adaptor<rmm::mr::device_memory_resource> statistics_mr;
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/common/generate_nested_types.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "generate_input.hpp"
#include <cudf_test/column_wrapper.hpp>
// This error appears in GCC 11.3 and may be a compiler bug or nvbench bug.
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include <nvbench/nvbench.cuh>
#pragma GCC diagnostic pop
#include <random>
inline std::unique_ptr<cudf::table> create_lists_data(nvbench::state& state,
cudf::size_type const num_columns = 1,
cudf::size_type const min_val = 0,
cudf::size_type const max_val = 5)
{
size_t const size_bytes(state.get_int64("size_bytes"));
cudf::size_type const depth{static_cast<cudf::size_type>(state.get_int64("depth"))};
auto const null_frequency{state.get_float64("null_frequency")};
data_profile table_profile;
table_profile.set_distribution_params(
cudf::type_id::LIST, distribution_id::UNIFORM, min_val, max_val);
table_profile.set_list_depth(depth);
table_profile.set_null_probability(null_frequency);
return create_random_table(std::vector<cudf::type_id>(num_columns, cudf::type_id::LIST),
table_size_bytes{size_bytes},
table_profile);
}
inline std::unique_ptr<cudf::table> create_structs_data(nvbench::state& state,
cudf::size_type const n_cols = 1)
{
using Type = int;
using column_wrapper = cudf::test::fixed_width_column_wrapper<Type>;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, 100);
cudf::size_type const n_rows{static_cast<cudf::size_type>(state.get_int64("NumRows"))};
cudf::size_type const depth{static_cast<cudf::size_type>(state.get_int64("Depth"))};
bool const nulls{static_cast<bool>(state.get_int64("Nulls"))};
// Create columns with values in the range [0,100)
std::vector<column_wrapper> columns;
columns.reserve(n_cols);
std::generate_n(std::back_inserter(columns), n_cols, [&]() {
auto const elements = cudf::detail::make_counting_transform_iterator(
0, [&](auto row) { return distribution(generator); });
if (!nulls) return column_wrapper(elements, elements + n_rows);
auto valids =
cudf::detail::make_counting_transform_iterator(0, [](auto i) { return i % 10 != 0; });
return column_wrapper(elements, elements + n_rows, valids);
});
std::vector<std::unique_ptr<cudf::column>> cols;
std::transform(columns.begin(), columns.end(), std::back_inserter(cols), [](column_wrapper& col) {
return col.release();
});
std::vector<std::unique_ptr<cudf::column>> child_cols = std::move(cols);
// Nest the child columns in a struct, then nest that struct column inside another
// struct column up to the desired depth
for (int i = 0; i < depth; i++) {
std::vector<bool> struct_validity;
std::uniform_int_distribution<int> bool_distribution(0, 100 * (i + 1));
std::generate_n(
std::back_inserter(struct_validity), n_rows, [&]() { return bool_distribution(generator); });
cudf::test::structs_column_wrapper struct_col(std::move(child_cols), struct_validity);
child_cols = std::vector<std::unique_ptr<cudf::column>>{};
child_cols.push_back(struct_col.release());
}
// Create table view
return std::make_unique<cudf::table>(std::move(child_cols));
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/common/generate_input.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "generate_input.hpp"
#include "random_distribution_factory.cuh"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/filling.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/random/uniform_real_distribution.h>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <random>
#include <utility>
#include <vector>
/**
* @brief Mersenne Twister pseudo-random engine.
*/
auto deterministic_engine(unsigned seed) { return thrust::minstd_rand{seed}; }
/**
* Computes the mean value for a distribution of given type and value bounds.
*/
template <typename T>
T get_distribution_mean(distribution_params<T> const& dist)
{
switch (dist.id) {
case distribution_id::NORMAL:
case distribution_id::UNIFORM: return (dist.lower_bound / 2.) + (dist.upper_bound / 2.);
case distribution_id::GEOMETRIC: {
auto const range_size = dist.lower_bound < dist.upper_bound
? dist.upper_bound - dist.lower_bound
: dist.lower_bound - dist.upper_bound;
auto const p = geometric_dist_p(range_size);
if (dist.lower_bound < dist.upper_bound)
return dist.lower_bound + (1. / p);
else
return dist.lower_bound - (1. / p);
}
default: CUDF_FAIL("Unsupported distribution type.");
}
}
/**
* @brief Computes the average element size in a column, given the data profile.
*
* Random distribution parameters like average string length and maximum list nesting level affect
* the element size of non-fixed-width columns. For lists and structs, `avg_element_size` is called
* recursively to determine the size of nested columns.
*/
size_t avg_element_size(data_profile const& profile, cudf::data_type dtype);
// Utilities to determine the mean size of an element, given the data profile
template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_width<T>())>
size_t non_fixed_width_size(data_profile const& profile)
{
CUDF_FAIL("Should not be called, use `size_of` for this type instead");
}
template <typename T, CUDF_ENABLE_IF(!cudf::is_fixed_width<T>())>
size_t non_fixed_width_size(data_profile const& profile)
{
CUDF_FAIL("not implemented!");
}
template <>
size_t non_fixed_width_size<cudf::string_view>(data_profile const& profile)
{
auto const dist = profile.get_distribution_params<cudf::string_view>().length_params;
return get_distribution_mean(dist);
}
double geometric_sum(size_t n, double p)
{
if (p == 1) { return n; }
return (1 - std::pow(p, n)) / (1 - p);
}
template <>
size_t non_fixed_width_size<cudf::list_view>(data_profile const& profile)
{
auto const dist_params = profile.get_distribution_params<cudf::list_view>();
auto const single_level_mean = get_distribution_mean(dist_params.length_params);
auto const element_size = avg_element_size(profile, cudf::data_type{dist_params.element_type});
auto const element_count = std::pow(single_level_mean, dist_params.max_depth);
// Each nesting level includes offsets, this is the sum of all levels
// Also include an additional offset per level for the size of the last element
auto const total_offset_count =
geometric_sum(dist_params.max_depth, single_level_mean) + dist_params.max_depth;
return sizeof(cudf::size_type) * total_offset_count + element_size * element_count;
}
template <>
size_t non_fixed_width_size<cudf::struct_view>(data_profile const& profile)
{
auto const dist_params = profile.get_distribution_params<cudf::struct_view>();
return std::accumulate(dist_params.leaf_types.cbegin(),
dist_params.leaf_types.cend(),
0ul,
[&](auto& sum, auto type_id) {
return sum + avg_element_size(profile, cudf::data_type{type_id});
});
}
struct non_fixed_width_size_fn {
template <typename T>
size_t operator()(data_profile const& profile)
{
return non_fixed_width_size<T>(profile);
}
};
size_t avg_element_size(data_profile const& profile, cudf::data_type dtype)
{
if (cudf::is_fixed_width(dtype)) { return cudf::size_of(dtype); }
return cudf::type_dispatcher(dtype, non_fixed_width_size_fn{}, profile);
}
/**
* @brief bool generator with given probability [0.0 - 1.0] of returning true.
*/
struct bool_generator {
thrust::minstd_rand engine;
thrust::uniform_real_distribution<float> dist;
double probability_true;
bool_generator(thrust::minstd_rand engine, double probability_true)
: engine(engine), dist{0, 1}, probability_true{probability_true}
{
}
bool_generator(unsigned seed, double probability_true)
: engine(seed), dist{0, 1}, probability_true{probability_true}
{
}
__device__ bool operator()(size_t n)
{
engine.discard(n);
return dist(engine) < probability_true;
}
};
/**
* @brief Functor that computes a random column element with the given data profile.
*
* The implementation is SFINAEd for different type groups. Currently only used for fixed-width
* types.
*/
template <typename T, typename Enable = void>
struct random_value_fn;
/**
* @brief Creates an random timestamp/duration value
*/
template <typename T>
struct random_value_fn<T, std::enable_if_t<cudf::is_chrono<T>()>> {
distribution_fn<int64_t> seconds_gen;
distribution_fn<int64_t> nanoseconds_gen;
random_value_fn(distribution_params<T> params)
{
using cuda::std::chrono::duration_cast;
std::pair<cudf::duration_s, cudf::duration_s> const range_s = {
duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.lower_bound}),
duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.upper_bound})};
if (range_s.first != range_s.second) {
seconds_gen =
make_distribution<int64_t>(params.id, range_s.first.count(), range_s.second.count());
nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, 0l, 1000000000l);
} else {
// Don't need a random seconds generator for sub-second intervals
seconds_gen = [range_s](thrust::minstd_rand&, size_t size) {
rmm::device_uvector<int64_t> result(size, cudf::get_default_stream());
thrust::fill(thrust::device, result.begin(), result.end(), range_s.second.count());
return result;
};
std::pair<cudf::duration_ns, cudf::duration_ns> const range_ns = {
duration_cast<cudf::duration_ns>(typename T::duration{params.lower_bound}),
duration_cast<cudf::duration_ns>(typename T::duration{params.upper_bound})};
nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM,
std::min(range_ns.first.count(), 0l),
std::max(range_ns.second.count(), 0l));
}
}
rmm::device_uvector<T> operator()(thrust::minstd_rand& engine, unsigned size)
{
auto const sec = seconds_gen(engine, size);
auto const ns = nanoseconds_gen(engine, size);
rmm::device_uvector<T> result(size, cudf::get_default_stream());
thrust::transform(
thrust::device,
sec.begin(),
sec.end(),
ns.begin(),
result.begin(),
[] __device__(int64_t sec_value, int64_t nanoseconds_value) {
auto const timestamp_ns =
cudf::duration_s{sec_value} + cudf::duration_ns{nanoseconds_value};
// Return value in the type's precision
return T(cuda::std::chrono::duration_cast<typename T::duration>(timestamp_ns));
});
return result;
}
};
/**
* @brief Creates an random fixed_point value.
*/
template <typename T>
struct random_value_fn<T, std::enable_if_t<cudf::is_fixed_point<T>()>> {
using DeviceType = cudf::device_storage_type_t<T>;
DeviceType const lower_bound;
DeviceType const upper_bound;
distribution_fn<DeviceType> dist;
std::optional<numeric::scale_type> scale;
random_value_fn(distribution_params<DeviceType> const& desc)
: lower_bound{desc.lower_bound},
upper_bound{desc.upper_bound},
dist{make_distribution<DeviceType>(desc.id, desc.lower_bound, desc.upper_bound)}
{
}
[[nodiscard]] numeric::scale_type get_scale(thrust::minstd_rand& engine)
{
if (not scale.has_value()) {
constexpr int max_scale = std::numeric_limits<DeviceType>::digits10;
std::uniform_int_distribution<int> scale_dist{-max_scale, max_scale};
std::mt19937 engine_scale(engine());
scale = numeric::scale_type{scale_dist(engine_scale)};
}
return scale.value_or(numeric::scale_type{0});
}
rmm::device_uvector<DeviceType> operator()(thrust::minstd_rand& engine, unsigned size)
{
return dist(engine, size);
}
};
/**
* @brief Creates an random numeric value with the given distribution.
*/
template <typename T>
struct random_value_fn<T, std::enable_if_t<!std::is_same_v<T, bool> && cudf::is_numeric<T>()>> {
T const lower_bound;
T const upper_bound;
distribution_fn<T> dist;
random_value_fn(distribution_params<T> const& desc)
: lower_bound{desc.lower_bound},
upper_bound{desc.upper_bound},
dist{make_distribution<T>(desc.id, desc.lower_bound, desc.upper_bound)}
{
}
auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); }
};
/**
* @brief Creates an boolean value with given probability of returning `true`.
*/
template <typename T>
struct random_value_fn<T, typename std::enable_if_t<std::is_same_v<T, bool>>> {
// Bernoulli distribution
distribution_fn<bool> dist;
random_value_fn(distribution_params<bool> const& desc)
: dist{[valid_prob = desc.probability_true](thrust::minstd_rand& engine,
size_t size) -> rmm::device_uvector<bool> {
rmm::device_uvector<bool> result(size, cudf::get_default_stream());
thrust::tabulate(
thrust::device, result.begin(), result.end(), bool_generator(engine, valid_prob));
return result;
}}
{
}
auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); }
};
auto create_run_length_dist(cudf::size_type avg_run_len)
{
// Distribution with low probability of generating 0-1 even with a low `avg_run_len` value
static constexpr float alpha = 4.f;
return std::gamma_distribution<float>{alpha, avg_run_len / alpha};
}
/**
* @brief Generate indices within range [0 , cardinality) repeating with average run length
* `avg_run_len`
*
* @param avg_run_len Average run length of the generated indices
* @param cardinality Number of unique values in the output vector
* @param num_rows Number of indices to generate
* @param engine Random engine
* @return Generated indices of type `cudf::size_type`
*/
rmm::device_uvector<cudf::size_type> sample_indices_with_run_length(cudf::size_type avg_run_len,
cudf::size_type cardinality,
cudf::size_type num_rows,
thrust::minstd_rand& engine)
{
auto sample_dist = random_value_fn<cudf::size_type>{
distribution_params<cudf::size_type>{distribution_id::UNIFORM, 0, cardinality - 1}};
if (avg_run_len > 1) {
auto avglen_dist =
random_value_fn<int>{distribution_params<int>{distribution_id::UNIFORM, 1, 2 * avg_run_len}};
auto const approx_run_len = num_rows / avg_run_len + 1;
auto run_lens = avglen_dist(engine, approx_run_len);
thrust::inclusive_scan(
thrust::device, run_lens.begin(), run_lens.end(), run_lens.begin(), std::plus<int>{});
auto const samples_indices = sample_dist(engine, approx_run_len + 1);
// This is gather.
auto avg_repeated_sample_indices_iterator = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[rb = run_lens.begin(),
re = run_lens.end(),
samples_indices = samples_indices.begin()] __device__(cudf::size_type i) {
auto sample_idx = thrust::upper_bound(thrust::seq, rb, re, i) - rb;
return samples_indices[sample_idx];
});
rmm::device_uvector<cudf::size_type> repeated_sample_indices(num_rows,
cudf::get_default_stream());
thrust::copy(thrust::device,
avg_repeated_sample_indices_iterator,
avg_repeated_sample_indices_iterator + num_rows,
repeated_sample_indices.begin());
return repeated_sample_indices;
} else {
// generate n samples.
return sample_dist(engine, num_rows);
}
}
/**
* @brief Creates a column with random content of type @ref T.
*
* @param profile Parameters for the random generator
* @param engine Pseudo-random engine
* @param num_rows Size of the output column
*
* @tparam T Data type of the output column
* @return Column filled with random data
*/
template <typename T>
std::unique_ptr<cudf::column> create_random_column(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
// Bernoulli distribution
auto valid_dist = random_value_fn<bool>(
distribution_params<bool>{1. - profile.get_null_probability().value_or(0)});
auto value_dist = random_value_fn<T>{profile.get_distribution_params<T>()};
using DeviceType = cudf::device_storage_type_t<T>;
cudf::data_type const dtype = [&]() {
if constexpr (cudf::is_fixed_point<T>())
return cudf::data_type{cudf::type_to_id<T>(), value_dist.get_scale(engine)};
else
return cudf::data_type{cudf::type_to_id<T>()};
}();
// Distribution for picking elements from the array of samples
auto const avg_run_len = profile.get_avg_run_length();
rmm::device_uvector<DeviceType> data(0, cudf::get_default_stream());
rmm::device_uvector<bool> null_mask(0, cudf::get_default_stream());
if (profile.get_cardinality() == 0 and avg_run_len == 1) {
data = value_dist(engine, num_rows);
null_mask = valid_dist(engine, num_rows);
} else {
auto const cardinality = [profile_cardinality = profile.get_cardinality(), num_rows] {
return (profile_cardinality == 0 or profile_cardinality > num_rows) ? num_rows
: profile_cardinality;
}();
rmm::device_uvector<bool> samples_null_mask = valid_dist(engine, cardinality);
rmm::device_uvector<DeviceType> samples = value_dist(engine, cardinality);
// generate n samples and gather.
auto const sample_indices =
sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine);
data = rmm::device_uvector<DeviceType>(num_rows, cudf::get_default_stream());
null_mask = rmm::device_uvector<bool>(num_rows, cudf::get_default_stream());
thrust::gather(
thrust::device, sample_indices.begin(), sample_indices.end(), samples.begin(), data.begin());
thrust::gather(thrust::device,
sample_indices.begin(),
sample_indices.end(),
samples_null_mask.begin(),
null_mask.begin());
}
auto [result_bitmask, null_count] =
cudf::detail::valid_if(null_mask.begin(),
null_mask.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return std::make_unique<cudf::column>(
dtype,
num_rows,
data.release(),
profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{},
profile.get_null_probability().has_value() ? null_count : 0);
}
struct valid_or_zero {
template <typename T>
__device__ T operator()(thrust::tuple<T, bool> len_valid) const
{
return thrust::get<1>(len_valid) ? thrust::get<0>(len_valid) : T{0};
}
};
struct string_generator {
char* chars;
thrust::minstd_rand engine;
thrust::uniform_int_distribution<unsigned char> char_dist;
string_generator(char* c, thrust::minstd_rand& engine)
: chars(c), engine(engine), char_dist(32, 137)
// ~90% ASCII, ~10% UTF-8.
// ~80% not-space, ~20% space.
// range 32-127 is ASCII; 127-136 will be multi-byte UTF-8
{
}
__device__ void operator()(thrust::tuple<cudf::size_type, cudf::size_type> str_begin_end)
{
auto begin = thrust::get<0>(str_begin_end);
auto end = thrust::get<1>(str_begin_end);
engine.discard(begin);
for (auto i = begin; i < end; ++i) {
auto ch = char_dist(engine);
if (i == end - 1 && ch >= '\x7F') ch = ' '; // last element ASCII only.
if (ch >= '\x7F') // x7F is at the top edge of ASCII
chars[i++] = '\xC4'; // these characters are assigned two bytes
chars[i] = static_cast<char>(ch + (ch >= '\x7F'));
}
}
};
/**
* @brief Create a UTF-8 string column with the average length.
*
*/
std::unique_ptr<cudf::column> create_random_utf8_string_column(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
auto len_dist =
random_value_fn<uint32_t>{profile.get_distribution_params<cudf::string_view>().length_params};
auto valid_dist = random_value_fn<bool>(
distribution_params<bool>{1. - profile.get_null_probability().value_or(0)});
auto lengths = len_dist(engine, num_rows + 1);
auto null_mask = valid_dist(engine, num_rows + 1);
thrust::transform_if(
thrust::device,
lengths.begin(),
lengths.end(),
null_mask.begin(),
lengths.begin(),
[] __device__(auto) { return 0; },
thrust::logical_not<bool>{});
auto valid_lengths = thrust::make_transform_iterator(
thrust::make_zip_iterator(thrust::make_tuple(lengths.begin(), null_mask.begin())),
valid_or_zero{});
rmm::device_uvector<cudf::size_type> offsets(num_rows + 1, cudf::get_default_stream());
thrust::exclusive_scan(
thrust::device, valid_lengths, valid_lengths + lengths.size(), offsets.begin());
// offsets are ready.
auto chars_length = *thrust::device_pointer_cast(offsets.end() - 1);
rmm::device_uvector<char> chars(chars_length, cudf::get_default_stream());
thrust::for_each_n(thrust::device,
thrust::make_zip_iterator(offsets.begin(), offsets.begin() + 1),
num_rows,
string_generator{chars.data(), engine});
auto [result_bitmask, null_count] =
cudf::detail::valid_if(null_mask.begin(),
null_mask.end() - 1,
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return cudf::make_strings_column(
num_rows,
std::move(offsets),
std::move(chars),
profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{},
null_count);
}
/**
* @brief Creates a string column with random content.
*
* @param profile Parameters for the random generator
* @param engine Pseudo-random engine
* @param num_rows Size of the output column
*
* @return Column filled with random strings
*/
template <>
std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
auto const cardinality = std::min(profile.get_cardinality(), num_rows);
auto const avg_run_len = profile.get_avg_run_length();
auto sample_strings =
create_random_utf8_string_column(profile, engine, cardinality == 0 ? num_rows : cardinality);
if (cardinality == 0) { return sample_strings; }
auto sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine);
auto str_table = cudf::detail::gather(cudf::table_view{{sample_strings->view()}},
sample_indices,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
return std::move(str_table->release()[0]);
}
template <>
std::unique_ptr<cudf::column> create_random_column<cudf::dictionary32>(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
CUDF_FAIL("not implemented yet");
}
/**
* @brief Functor to dispatch create_random_column calls.
*/
struct create_rand_col_fn {
public:
template <typename T>
std::unique_ptr<cudf::column> operator()(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
return create_random_column<T>(profile, engine, num_rows);
}
};
/**
* @brief Calculates the number of direct parents needed to generate a struct column hierarchy with
* lowest maximum number of children in any nested column.
*
* Used to generate an "evenly distributed" struct column hierarchy with the given number of leaf
* columns and nesting levels. The column tree is considered evenly distributed if all columns have
* nearly the same number of child columns (difference not larger than one).
*/
int num_direct_parents(int num_lvls, int num_leaf_columns)
{
// Estimated average number of children in the hierarchy;
auto const num_children_avg = std::pow(num_leaf_columns, 1. / num_lvls);
// Minimum number of children columns for any column in the hierarchy
int const num_children_min = std::floor(num_children_avg);
// Maximum number of children columns for any column in the hierarchy
int const num_children_max = num_children_min + 1;
// Minimum number of columns needed so that their number of children does not exceed the maximum
int const min_for_current_nesting = std::ceil((double)num_leaf_columns / num_children_max);
// Minimum number of columns needed so that columns at the higher levels have at least the minimum
// number of children
int const min_for_upper_nesting = std::pow(num_children_min, num_lvls - 1);
// Both conditions need to be satisfied
return std::max(min_for_current_nesting, min_for_upper_nesting);
}
template <>
std::unique_ptr<cudf::column> create_random_column<cudf::struct_view>(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
auto const dist_params = profile.get_distribution_params<cudf::struct_view>();
// Generate leaf columns
std::vector<std::unique_ptr<cudf::column>> children;
children.reserve(dist_params.leaf_types.size());
std::transform(dist_params.leaf_types.cbegin(),
dist_params.leaf_types.cend(),
std::back_inserter(children),
[&](auto& type_id) {
return cudf::type_dispatcher(
cudf::data_type(type_id), create_rand_col_fn{}, profile, engine, num_rows);
});
auto valid_dist = random_value_fn<bool>(
distribution_params<bool>{1. - profile.get_null_probability().value_or(0)});
// Generate the column bottom-up
for (int lvl = dist_params.max_depth; lvl > 0; --lvl) {
// Generating the next level
std::vector<std::unique_ptr<cudf::column>> parents;
parents.resize(num_direct_parents(lvl, children.size()));
auto current_child = children.begin();
for (auto current_parent = parents.begin(); current_parent != parents.end(); ++current_parent) {
auto [null_mask, null_count] = [&]() {
if (profile.get_null_probability().has_value()) {
auto valids = valid_dist(engine, num_rows);
return cudf::detail::valid_if(valids.begin(),
valids.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
}
return std::pair<rmm::device_buffer, cudf::size_type>{};
}();
// Adopt remaining children as evenly as possible
auto const num_to_adopt = cudf::util::div_rounding_up_unsafe(
std::distance(current_child, children.end()), std::distance(current_parent, parents.end()));
CUDF_EXPECTS(num_to_adopt > 0, "No children columns left to adopt");
std::vector<std::unique_ptr<cudf::column>> children_to_adopt;
children_to_adopt.insert(children_to_adopt.end(),
std::make_move_iterator(current_child),
std::make_move_iterator(current_child + num_to_adopt));
current_child += children_to_adopt.size();
*current_parent = cudf::make_structs_column(
num_rows, std::move(children_to_adopt), null_count, std::move(null_mask));
}
if (lvl == 1) {
CUDF_EXPECTS(parents.size() == 1, "There should be one top-level column");
return std::move(parents.front());
}
children = std::move(parents);
}
CUDF_FAIL("Reached unreachable code in struct column creation");
}
template <typename T>
struct clamp_down : public thrust::unary_function<T, T> {
T max;
clamp_down(T max) : max(max) {}
__host__ __device__ T operator()(T x) const { return min(x, max); }
};
/**
* @brief Creates a list column with random content.
*
* The data profile determines the list length distribution, number of nested level, and the data
* type of the bottom level.
*
* @param profile Parameters for the random generator
* @param engine Pseudo-random engine
* @param num_rows Size of the output column
*
* @return Column filled with random lists
*/
template <>
std::unique_ptr<cudf::column> create_random_column<cudf::list_view>(data_profile const& profile,
thrust::minstd_rand& engine,
cudf::size_type num_rows)
{
auto const dist_params = profile.get_distribution_params<cudf::list_view>();
auto const single_level_mean = get_distribution_mean(dist_params.length_params);
auto const num_elements = num_rows * pow(single_level_mean, dist_params.max_depth);
auto leaf_column = cudf::type_dispatcher(
cudf::data_type(dist_params.element_type), create_rand_col_fn{}, profile, engine, num_elements);
auto len_dist =
random_value_fn<uint32_t>{profile.get_distribution_params<cudf::list_view>().length_params};
auto valid_dist = random_value_fn<bool>(
distribution_params<bool>{1. - profile.get_null_probability().value_or(0)});
// Generate the list column bottom-up
auto list_column = std::move(leaf_column);
for (int lvl = 0; lvl < dist_params.max_depth; ++lvl) {
// Generating the next level - offsets point into the current list column
auto current_child_column = std::move(list_column);
cudf::size_type const num_rows = current_child_column->size() / single_level_mean;
auto offsets = len_dist(engine, num_rows + 1);
auto valids = valid_dist(engine, num_rows);
// to ensure these values <= current_child_column->size()
auto output_offsets = thrust::make_transform_output_iterator(
offsets.begin(), clamp_down{current_child_column->size()});
thrust::exclusive_scan(thrust::device, offsets.begin(), offsets.end(), output_offsets);
thrust::device_pointer_cast(offsets.end())[-1] =
current_child_column->size(); // Always include all elements
auto offsets_column = std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT32},
num_rows + 1,
offsets.release(),
rmm::device_buffer{},
0);
auto [null_mask, null_count] = cudf::detail::valid_if(valids.begin(),
valids.end(),
thrust::identity<bool>{},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
list_column = cudf::make_lists_column(
num_rows,
std::move(offsets_column),
std::move(current_child_column),
profile.get_null_probability().has_value() ? null_count : 0,
profile.get_null_probability().has_value() ? std::move(null_mask) : rmm::device_buffer{});
}
return list_column; // return the top-level column
}
using columns_vector = std::vector<std::unique_ptr<cudf::column>>;
/**
* @brief Creates a vector of columns with random content.
*
* @param profile Parameters for the random generator
* @param dtype_ids vector of data type ids, one for each output column
* @param engine Pseudo-random engine
* @param num_rows Size of the output columns
*
* @return Column filled with random lists
*/
columns_vector create_random_columns(data_profile const& profile,
std::vector<cudf::type_id> dtype_ids,
thrust::minstd_rand engine,
cudf::size_type num_rows)
{
columns_vector output_columns;
std::transform(
dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) {
engine.discard(num_rows);
return cudf::type_dispatcher(
cudf::data_type(tid), create_rand_col_fn{}, profile, engine, num_rows);
});
return output_columns;
}
/**
* @brief Repeats the input data types cyclically order to fill a vector of @ref num_cols
* elements.
*/
std::vector<cudf::type_id> cycle_dtypes(std::vector<cudf::type_id> const& dtype_ids,
cudf::size_type num_cols)
{
if (dtype_ids.size() == static_cast<std::size_t>(num_cols)) { return dtype_ids; }
std::vector<cudf::type_id> out_dtypes;
out_dtypes.reserve(num_cols);
for (cudf::size_type col = 0; col < num_cols; ++col)
out_dtypes.push_back(dtype_ids[col % dtype_ids.size()]);
return out_dtypes;
}
/**
* @brief Repeat the given two data types with a given ratio of a:b.
*
* The first dtype will have 'first_num' columns and the second will have 'num_cols - first_num'
* columns.
*/
std::vector<cudf::type_id> mix_dtypes(std::pair<cudf::type_id, cudf::type_id> const& dtype_ids,
cudf::size_type num_cols,
int first_num)
{
std::vector<cudf::type_id> out_dtypes;
out_dtypes.reserve(num_cols);
for (cudf::size_type col = 0; col < first_num; ++col)
out_dtypes.push_back(dtype_ids.first);
for (cudf::size_type col = first_num; col < num_cols; ++col)
out_dtypes.push_back(dtype_ids.second);
return out_dtypes;
}
std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids,
table_size_bytes table_bytes,
data_profile const& profile,
unsigned seed)
{
size_t const avg_row_bytes =
std::accumulate(dtype_ids.begin(), dtype_ids.end(), 0ul, [&](size_t sum, auto tid) {
return sum + avg_element_size(profile, cudf::data_type(tid));
});
cudf::size_type const num_rows = table_bytes.size / avg_row_bytes;
return create_random_table(dtype_ids, row_count{num_rows}, profile, seed);
}
std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids,
row_count num_rows,
data_profile const& profile,
unsigned seed)
{
auto seed_engine = deterministic_engine(seed);
thrust::uniform_int_distribution<unsigned> seed_dist;
columns_vector output_columns;
std::transform(
dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) mutable {
return create_random_column(tid, num_rows, profile, seed_dist(seed_engine));
});
return std::make_unique<cudf::table>(std::move(output_columns));
}
std::unique_ptr<cudf::column> create_random_column(cudf::type_id dtype_id,
row_count num_rows,
data_profile const& profile,
unsigned seed)
{
auto engine = deterministic_engine(seed);
return cudf::type_dispatcher(
cudf::data_type(dtype_id), create_rand_col_fn{}, profile, engine, num_rows.count);
}
std::unique_ptr<cudf::table> create_sequence_table(std::vector<cudf::type_id> const& dtype_ids,
row_count num_rows,
std::optional<double> null_probability,
unsigned seed)
{
auto seed_engine = deterministic_engine(seed);
thrust::uniform_int_distribution<unsigned> seed_dist;
auto columns = std::vector<std::unique_ptr<cudf::column>>(dtype_ids.size());
std::transform(dtype_ids.begin(), dtype_ids.end(), columns.begin(), [&](auto dtype) mutable {
auto init = cudf::make_default_constructed_scalar(cudf::data_type{dtype});
auto col = cudf::sequence(num_rows.count, *init);
auto [mask, count] =
create_random_null_mask(num_rows.count, null_probability, seed_dist(seed_engine));
col->set_null_mask(std::move(mask), count);
return col;
});
return std::make_unique<cudf::table>(std::move(columns));
}
std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask(
cudf::size_type size, std::optional<double> null_probability, unsigned seed)
{
if (not null_probability.has_value()) { return {rmm::device_buffer{}, 0}; }
CUDF_EXPECTS(*null_probability >= 0.0 and *null_probability <= 1.0,
"Null probability must be within the range [0.0, 1.0]");
if (*null_probability == 0.0f) {
return {cudf::create_null_mask(size, cudf::mask_state::ALL_VALID), 0};
} else if (*null_probability == 1.0) {
return {cudf::create_null_mask(size, cudf::mask_state::ALL_NULL), size};
} else {
return cudf::detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(size),
bool_generator{seed, 1.0 - *null_probability},
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
}
}
std::vector<cudf::type_id> get_type_or_group(int32_t id)
{
// identity transformation when passing a concrete type_id
if (id < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS))
return {static_cast<cudf::type_id>(id)};
// if the value is larger that type_id::NUM_TYPE_IDS, it's a group id
type_group_id const group_id = static_cast<type_group_id>(id);
using trait_fn = bool (*)(cudf::data_type);
trait_fn is_integral = [](cudf::data_type type) {
return cudf::is_numeric(type) && !cudf::is_floating_point(type);
};
trait_fn is_integral_signed = [](cudf::data_type type) {
return cudf::is_numeric(type) && !cudf::is_floating_point(type) && !cudf::is_unsigned(type);
};
auto fn = [&]() -> trait_fn {
switch (group_id) {
case type_group_id::FLOATING_POINT: return cudf::is_floating_point;
case type_group_id::INTEGRAL: return is_integral;
case type_group_id::INTEGRAL_SIGNED: return is_integral_signed;
case type_group_id::NUMERIC: return cudf::is_numeric;
case type_group_id::TIMESTAMP: return cudf::is_timestamp;
case type_group_id::DURATION: return cudf::is_duration;
case type_group_id::FIXED_POINT: return cudf::is_fixed_point;
case type_group_id::COMPOUND: return cudf::is_compound;
case type_group_id::NESTED: return cudf::is_nested;
default: CUDF_FAIL("Invalid data type group");
}
}();
std::vector<cudf::type_id> types;
for (int type_int = 0; type_int < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS); ++type_int) {
auto const type = static_cast<cudf::type_id>(type_int);
if (type != cudf::type_id::EMPTY && fn(cudf::data_type(type))) types.push_back(type);
}
return types;
}
std::vector<cudf::type_id> get_type_or_group(std::vector<int32_t> const& ids)
{
std::vector<cudf::type_id> all_type_ids;
for (auto& id : ids) {
auto const type_ids = get_type_or_group(id);
all_type_ids.insert(std::end(all_type_ids), std::cbegin(type_ids), std::cend(type_ids));
}
return all_type_ids;
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/common/random_distribution_factory.cuh
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "generate_input.hpp"
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/random/normal_distribution.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/tabulate.h>
#include <algorithm>
#include <memory>
#include <type_traits>
/**
* @brief Real Type that has at least number of bits of integral type in its mantissa.
* number of bits of integrals < 23 bits of mantissa in float
* to allow full range of integer bits to be generated.
* @tparam T integral type
*/
template <typename T>
using integral_to_realType =
std::conditional_t<cuda::std::is_floating_point_v<T>,
T,
std::conditional_t<sizeof(T) * 8 <= 23, float, double>>;
/**
* @brief Generates a normal distribution between zero and upper_bound.
*/
template <typename T>
auto make_normal_dist(T lower_bound, T upper_bound)
{
using realT = integral_to_realType<T>;
T const mean = lower_bound + (upper_bound - lower_bound) / 2;
T const stddev = (upper_bound - lower_bound) / 6;
return thrust::random::normal_distribution<realT>(mean, stddev);
}
template <typename T, std::enable_if_t<cuda::std::is_integral_v<T>, T>* = nullptr>
auto make_uniform_dist(T range_start, T range_end)
{
return thrust::uniform_int_distribution<T>(range_start, range_end);
}
template <typename T, std::enable_if_t<cudf::is_floating_point<T>()>* = nullptr>
auto make_uniform_dist(T range_start, T range_end)
{
return thrust::uniform_real_distribution<T>(range_start, range_end);
}
template <typename T>
double geometric_dist_p(T range_size)
{
constexpr double percentage_in_range = 0.99;
double const p = 1 - exp(log(1 - percentage_in_range) / range_size);
return p ? p : std::numeric_limits<double>::epsilon();
}
/**
* @brief Generates a geometric distribution between lower_bound and upper_bound.
* This distribution is an approximation generated using normal distribution.
*
* @tparam T Result type of the number to produce.
*/
template <typename T>
class geometric_distribution : public thrust::random::normal_distribution<integral_to_realType<T>> {
using realType = integral_to_realType<T>;
using super_t = thrust::random::normal_distribution<realType>;
T _lower_bound;
T _upper_bound;
public:
using result_type = T;
__host__ __device__ explicit geometric_distribution(T lower_bound, T upper_bound)
: super_t(0, std::labs(upper_bound - lower_bound) / 4.0),
_lower_bound(lower_bound),
_upper_bound(upper_bound)
{
}
template <typename UniformRandomNumberGenerator>
__host__ __device__ result_type operator()(UniformRandomNumberGenerator& urng)
{
return _lower_bound < _upper_bound ? std::abs(super_t::operator()(urng)) + _lower_bound
: _lower_bound - std::abs(super_t::operator()(urng));
}
};
template <typename T, typename Generator>
struct value_generator {
using result_type = T;
value_generator(T lower_bound, T upper_bound, thrust::minstd_rand& engine, Generator gen)
: lower_bound(std::min(lower_bound, upper_bound)),
upper_bound(std::max(lower_bound, upper_bound)),
engine(engine),
dist(gen)
{
}
__device__ T operator()(size_t n)
{
engine.discard(n);
if constexpr (cuda::std::is_integral_v<T> &&
cuda::std::is_floating_point_v<decltype(dist(engine))>) {
return std::clamp(static_cast<T>(std::round(dist(engine))), lower_bound, upper_bound);
} else {
return std::clamp(dist(engine), lower_bound, upper_bound);
}
// Note: uniform does not need clamp, because already range is guaranteed to be within bounds.
}
T lower_bound;
T upper_bound;
thrust::minstd_rand engine;
Generator dist;
};
template <typename T>
using distribution_fn = std::function<rmm::device_uvector<T>(thrust::minstd_rand&, size_t)>;
template <
typename T,
std::enable_if_t<cuda::std::is_integral_v<T> or cuda::std::is_floating_point_v<T>, T>* = nullptr>
distribution_fn<T> make_distribution(distribution_id dist_id, T lower_bound, T upper_bound)
{
switch (dist_id) {
case distribution_id::NORMAL:
return [lower_bound, upper_bound, dist = make_normal_dist(lower_bound, upper_bound)](
thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<T> {
rmm::device_uvector<T> result(size, cudf::get_default_stream());
thrust::tabulate(thrust::device,
result.begin(),
result.end(),
value_generator{lower_bound, upper_bound, engine, dist});
return result;
};
case distribution_id::UNIFORM:
return [lower_bound, upper_bound, dist = make_uniform_dist(lower_bound, upper_bound)](
thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<T> {
rmm::device_uvector<T> result(size, cudf::get_default_stream());
thrust::tabulate(thrust::device,
result.begin(),
result.end(),
value_generator{lower_bound, upper_bound, engine, dist});
return result;
};
case distribution_id::GEOMETRIC:
// kind of exponential distribution from lower_bound to upper_bound.
return [lower_bound, upper_bound, dist = geometric_distribution<T>(lower_bound, upper_bound)](
thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<T> {
rmm::device_uvector<T> result(size, cudf::get_default_stream());
thrust::tabulate(thrust::device,
result.begin(),
result.end(),
value_generator{lower_bound, upper_bound, engine, dist});
return result;
};
default: CUDF_FAIL("Unsupported probability distribution");
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/common/generate_input.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/table/table.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <map>
#include <optional>
/**
* @file generate_input.hpp
* @brief Contains declarations of functions that generate columns filled with random data.
*
* Also includes the data profile descriptor classes.
*
* The create_random_table functions take a data profile, the information about table size and a
* seed to deterministically generate a table with given parameters.
*
* Currently, the data generation is done on the CPU and the data is then copied to the device
* memory.
*/
/**
* @brief Identifies a group of related column's logical element types.
*/
enum class type_group_id : int32_t {
INTEGRAL = static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS),
INTEGRAL_SIGNED,
FLOATING_POINT,
NUMERIC,
TIMESTAMP,
DURATION,
FIXED_POINT,
COMPOUND,
NESTED,
};
/**
* @brief Identifies a probability distribution type.
*/
enum class distribution_id : int8_t {
UNIFORM, ///< Uniform sampling between the given bounds. Provides the best coverage of the
///< overall value range. Real data rarely has this distribution.
NORMAL, ///< Gaussian sampling - most samples are close to the middle of the range. Good for
///< simulating real-world numeric data.
GEOMETRIC, ///< Geometric sampling - highest chance to sample close to the lower bound. Good for
///< simulating real data with asymmetric distribution (unsigned values, timestamps).
};
// Default distribution types for each type
namespace {
template <typename T, std::enable_if_t<cudf::is_chrono<T>()>* = nullptr>
distribution_id default_distribution_id()
{
return distribution_id::GEOMETRIC;
}
template <typename T, std::enable_if_t<!std::is_unsigned_v<T> && cudf::is_numeric<T>()>* = nullptr>
distribution_id default_distribution_id()
{
return distribution_id::NORMAL;
}
template <typename T,
std::enable_if_t<!std::is_same_v<T, bool> && std::is_unsigned_v<T> &&
cudf::is_numeric<T>()>* = nullptr>
distribution_id default_distribution_id()
{
return distribution_id::GEOMETRIC;
}
/**
* @brief Default range for the timestamp types: 1970 - 2020.
*
* The 2020 timestamp is used as a lower bound to bias the geometric distribution to recent
* timestamps.
*/
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()>* = nullptr>
std::pair<int64_t, int64_t> default_range()
{
using cuda::std::chrono::duration_cast;
auto const year = duration_cast<typename T::duration>(cudf::duration_D{365l});
return {50 * year.count(), 0};
}
/**
* @brief Default range for the duration types.
*
* If a geometric distribution is used, it will bias towards short duration values.
*/
template <typename T, std::enable_if_t<cudf::is_duration<T>()>* = nullptr>
std::pair<int64_t, int64_t> default_range()
{
using cuda::std::chrono::duration_cast;
auto const year = duration_cast<typename T::duration>(cudf::duration_D{365l});
return {0, 2 * year.count()};
}
template <typename T, std::enable_if_t<cudf::is_numeric<T>()>* = nullptr>
std::pair<T, T> default_range()
{
// Limits need to be such that `upper - lower` does not overflow
return {std::numeric_limits<T>::lowest() / 2, std::numeric_limits<T>::max() / 2};
}
} // namespace
/**
* @brief Enables partial specializations with SFINAE.
*/
template <typename T, typename Enable = void>
struct distribution_params;
/**
* @brief Numeric values are parameterized with a distribution type and bounds of the same type.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<!std::is_same_v<T, bool> && cudf::is_numeric<T>()>> {
distribution_id id;
T lower_bound;
T upper_bound;
};
/**
* @brief Booleans are parameterized with the probability of getting `true` value.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<std::is_same_v<T, bool>>> {
double probability_true;
};
/**
* @brief Timestamps and durations are parameterized with a distribution type and int64_t bounds.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<cudf::is_chrono<T>()>> {
distribution_id id;
int64_t lower_bound;
int64_t upper_bound;
};
/**
* @brief Strings are parameterized by the distribution of their length, as an integral value.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>> {
distribution_params<uint32_t> length_params;
};
/**
* @brief Lists are parameterized by the distribution of their length, maximal nesting level, and
* the element type.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>> {
cudf::type_id element_type;
distribution_params<uint32_t> length_params;
cudf::size_type max_depth;
};
/**
* @brief Structs are parameterized by the maximal nesting level, and the leaf column types.
*/
template <typename T>
struct distribution_params<T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>> {
std::vector<cudf::type_id> leaf_types;
cudf::size_type max_depth;
};
// Present for compilation only. To be implemented once reader/writers support the fixed width type.
template <typename T>
struct distribution_params<T, std::enable_if_t<cudf::is_fixed_point<T>()>> {};
/**
* @brief Returns a vector of types, corresponding to the input type or a type group.
*
* If the input is a `cudf::type_id` enumerator, function simply returns a vector containing this
* type. If the input value corresponds to a `type_group_id` enumerator, function returns a vector
* containing all types in the input group.
*
* @param id Integer equal to either a `cudf::type_id` enumerator or a `type_group_id` enumerator.
*/
std::vector<cudf::type_id> get_type_or_group(int32_t id);
/**
* @brief Returns a vector of types, corresponding to the input types or type groups.
*
* If an element of the input vector is a `cudf::type_id` enumerator, function return value simply
* includes this type. If an element of the input vector is a `type_group_id` enumerator, function
* return value includes all types corresponding to the group enumerator.
*
* @param ids Vector of integers equal to either a `cudf::type_id` enumerator or a `type_group_id`
* enumerator.
*/
std::vector<cudf::type_id> get_type_or_group(std::vector<int32_t> const& ids);
/**
* @brief Contains data parameters for all types.
*
* This class exposes APIs to set and get distribution parameters for each supported type.
* Parameters can be set for multiple types with a single call by passing a `type_group_id` instead
* of `cudf::type_id`.
*
* All types have default parameters so it's not necessary to set the parameters before using them.
*/
class data_profile {
std::map<cudf::type_id, distribution_params<uint64_t>> int_params;
std::map<cudf::type_id, distribution_params<double>> float_params;
distribution_params<cudf::string_view> string_dist_desc{{distribution_id::NORMAL, 0, 32}};
distribution_params<cudf::list_view> list_dist_desc{
cudf::type_id::INT32, {distribution_id::GEOMETRIC, 0, 100}, 2};
distribution_params<cudf::struct_view> struct_dist_desc{
{cudf::type_id::INT32, cudf::type_id::FLOAT32, cudf::type_id::STRING}, 2};
std::map<cudf::type_id, distribution_params<__uint128_t>> decimal_params;
double bool_probability_true = 0.5;
std::optional<double> null_probability = 0.01;
cudf::size_type cardinality = 2000;
cudf::size_type avg_run_length = 4;
public:
template <typename T,
std::enable_if_t<!std::is_same_v<T, bool> && cuda::std::is_integral_v<T>, T>* = nullptr>
distribution_params<T> get_distribution_params() const
{
auto it = int_params.find(cudf::type_to_id<T>());
if (it == int_params.end()) {
auto const range = default_range<T>();
return distribution_params<T>{default_distribution_id<T>(), range.first, range.second};
} else {
auto& desc = it->second;
return {desc.id, static_cast<T>(desc.lower_bound), static_cast<T>(desc.upper_bound)};
}
}
template <typename T, std::enable_if_t<std::is_floating_point_v<T>, T>* = nullptr>
distribution_params<T> get_distribution_params() const
{
auto it = float_params.find(cudf::type_to_id<T>());
if (it == float_params.end()) {
auto const range = default_range<T>();
return distribution_params<T>{default_distribution_id<T>(), range.first, range.second};
} else {
auto& desc = it->second;
return {desc.id, static_cast<T>(desc.lower_bound), static_cast<T>(desc.upper_bound)};
}
}
template <typename T, std::enable_if_t<std::is_same_v<T, bool>>* = nullptr>
distribution_params<T> get_distribution_params() const
{
return distribution_params<T>{bool_probability_true};
}
template <typename T, std::enable_if_t<cudf::is_chrono<T>()>* = nullptr>
distribution_params<T> get_distribution_params() const
{
auto it = int_params.find(cudf::type_to_id<T>());
if (it == int_params.end()) {
auto const range = default_range<T>();
return distribution_params<T>{default_distribution_id<T>(), range.first, range.second};
} else {
auto& desc = it->second;
return {
desc.id, static_cast<int64_t>(desc.lower_bound), static_cast<int64_t>(desc.upper_bound)};
}
}
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>>* = nullptr>
distribution_params<T> get_distribution_params() const
{
return string_dist_desc;
}
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>* = nullptr>
distribution_params<T> get_distribution_params() const
{
return list_dist_desc;
}
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>* = nullptr>
distribution_params<T> get_distribution_params() const
{
return struct_dist_desc;
}
template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr>
distribution_params<typename T::rep> get_distribution_params() const
{
using rep = typename T::rep;
auto it = decimal_params.find(cudf::type_to_id<T>());
if (it == decimal_params.end()) {
auto const range = default_range<rep>();
return distribution_params<rep>{default_distribution_id<rep>(), range.first, range.second};
} else {
auto& desc = it->second;
return {desc.id, static_cast<rep>(desc.lower_bound), static_cast<rep>(desc.upper_bound)};
}
}
auto get_bool_probability_true() const { return bool_probability_true; }
auto get_null_probability() const { return null_probability; };
[[nodiscard]] auto get_cardinality() const { return cardinality; };
[[nodiscard]] auto get_avg_run_length() const { return avg_run_length; };
// Users should pass integral values for bounds when setting the parameters for types that have
// discrete distributions (integers, strings, lists). Otherwise the call with have no effect.
template <typename T,
typename Type_enum,
std::enable_if_t<cuda::std::is_integral_v<T>, T>* = nullptr>
void set_distribution_params(Type_enum type_or_group,
distribution_id dist,
T lower_bound,
T upper_bound)
{
for (auto tid : get_type_or_group(static_cast<int32_t>(type_or_group))) {
if (tid == cudf::type_id::STRING) {
string_dist_desc.length_params = {
dist, static_cast<uint32_t>(lower_bound), static_cast<uint32_t>(upper_bound)};
} else if (tid == cudf::type_id::LIST) {
list_dist_desc.length_params = {
dist, static_cast<uint32_t>(lower_bound), static_cast<uint32_t>(upper_bound)};
} else {
int_params[tid] = {
dist, static_cast<uint64_t>(lower_bound), static_cast<uint64_t>(upper_bound)};
}
}
}
// Users should pass floating point values for bounds when setting the parameters for types that
// have continuous distributions (floating point types). Otherwise the call with have no effect.
template <typename T,
typename Type_enum,
std::enable_if_t<std::is_floating_point_v<T>, T>* = nullptr>
void set_distribution_params(Type_enum type_or_group,
distribution_id dist,
T lower_bound,
T upper_bound)
{
for (auto tid : get_type_or_group(static_cast<int32_t>(type_or_group))) {
float_params[tid] = {
dist, static_cast<double>(lower_bound), static_cast<double>(upper_bound)};
}
}
template <typename T, typename Type_enum, std::enable_if_t<cudf::is_chrono<T>(), T>* = nullptr>
void set_distribution_params(Type_enum type_or_group,
distribution_id dist,
typename T::rep lower_bound,
typename T::rep upper_bound)
{
for (auto tid : get_type_or_group(static_cast<int32_t>(type_or_group))) {
int_params[tid] = {
dist, static_cast<uint64_t>(lower_bound), static_cast<uint64_t>(upper_bound)};
}
}
void set_bool_probability_true(double p)
{
CUDF_EXPECTS(p >= 0. and p <= 1., "probability must be in range [0...1]");
bool_probability_true = p;
}
void set_null_probability(std::optional<double> p)
{
CUDF_EXPECTS(p.value_or(0.) >= 0. and p.value_or(0.) <= 1.,
"probability must be in range [0...1]");
null_probability = p;
}
void set_cardinality(cudf::size_type c) { cardinality = c; }
void set_avg_run_length(cudf::size_type avg_rl) { avg_run_length = avg_rl; }
void set_list_depth(cudf::size_type max_depth)
{
CUDF_EXPECTS(max_depth > 0, "List depth must be positive");
list_dist_desc.max_depth = max_depth;
}
void set_list_type(cudf::type_id type) { list_dist_desc.element_type = type; }
void set_struct_depth(cudf::size_type max_depth)
{
CUDF_EXPECTS(max_depth > 0, "Struct depth must be positive");
struct_dist_desc.max_depth = max_depth;
}
void set_struct_types(cudf::host_span<cudf::type_id const> types)
{
CUDF_EXPECTS(
std::none_of(
types.begin(), types.end(), [](auto& type) { return type == cudf::type_id::STRUCT; }),
"Cannot include STRUCT as its own subtype");
struct_dist_desc.leaf_types.assign(types.begin(), types.end());
}
};
/**
* @brief Builder to construct data profiles for the random data generator.
*
* Setters can be chained to set multiple properties in a single expression.
* For example, `data_profile` initialization
* @code{.pseudo}
* data_profile profile;
* profile.set_null_probability(0.0);
* profile.set_cardinality(0);
* profile.set_distribution_params(cudf::type_id::INT32, distribution_id::UNIFORM, 0, 100);
* @endcode
* becomes
* @code{.pseudo}
* data_profile const profile =
* data_profile_builder().cardinality(0).null_probability(0.0).distribution(
* cudf::type_id::INT32, distribution_id::UNIFORM, 0, 100);
* @endcode
* The builder makes it easier to have immutable `data_profile` objects even with the complex
* initialization. The `profile` object in the example above is initialized from
* `data_profile_builder` using an implicit conversion operator.
*
* The builder API also includes a few additional convenience setters:
* Overload of `distribution` that only takes the distribution type (not the range).
* `no_validity`, which is a simpler equivalent of `null_probability(std::nullopr)`.
*/
class data_profile_builder {
data_profile profile;
public:
/**
* @brief Sets random distribution type for a given set of data types.
*
* Only the distribution type is set; the distribution will use the default range.
*
* @param type_or_group Type or group ID, depending on whether the new distribution
* applies to a single type or a subset of types
* @param dist Random distribution type
* @tparam T Data type of the distribution range; does not need to match the data type
* @return this for chaining
*/
template <typename T, typename Type_enum>
data_profile_builder& distribution(Type_enum type_or_group, distribution_id dist)
{
auto const range = default_range<T>();
profile.set_distribution_params(type_or_group, dist, range.first, range.second);
return *this;
}
/**
* @brief Sets random distribution type and value range for a given set of data types.
*
* @tparam T Parameters that are forwarded to set_distribution_params
* @return this for chaining
*/
template <class... T>
data_profile_builder& distribution(T&&... t)
{
profile.set_distribution_params(std::forward<T>(t)...);
return *this;
}
/**
* @brief Sets the probability that a randomly generated boolean element with be `true`.
*
* For example, passing `0.9` means that 90% of values in boolean columns with be `true`.
*
* @param p Probability of `true` values, in range [0..1]
* @return this for chaining
*/
data_profile_builder& bool_probability_true(double p)
{
profile.set_bool_probability_true(p);
return *this;
}
/**
* @brief Sets the probability that a randomly generated element will be `null`.
*
* @param p Probability of `null` values, in range [0..1]
* @return this for chaining
*/
data_profile_builder& null_probability(std::optional<double> p)
{
profile.set_null_probability(p);
return *this;
}
/**
* @brief Disables the creation of null mask in the output columns.
*
* @return this for chaining
*/
data_profile_builder& no_validity()
{
profile.set_null_probability(std::nullopt);
return *this;
}
/**
* @brief Sets the maximum number of unique values in each output column.
*
* @param c Maximum number of unique values
* @return this for chaining
*/
data_profile_builder& cardinality(cudf::size_type c)
{
profile.set_cardinality(c);
return *this;
}
/**
* @brief Sets the average length of sequences of equal elements in output columns.
*
* @param avg_rl Average sequence length (run-length)
* @return this for chaining
*/
data_profile_builder& avg_run_length(cudf::size_type avg_rl)
{
profile.set_avg_run_length(avg_rl);
return *this;
}
/**
* @brief Sets the maximum nesting depth of generated list columns.
*
* @param max_depth maximum nesting depth
* @return this for chaining
*/
data_profile_builder& list_depth(cudf::size_type max_depth)
{
profile.set_list_depth(max_depth);
return *this;
}
/**
* @brief Sets the data type of list elements.
*
* @param type data type ID
* @return this for chaining
*/
data_profile_builder& list_type(cudf::type_id type)
{
profile.set_list_type(type);
return *this;
}
/**
* @brief Sets the maximum nesting depth of generated struct columns.
*
* @param max_depth maximum nesting depth
* @return this for chaining
*/
data_profile_builder& struct_depth(cudf::size_type max_depth)
{
profile.set_struct_depth(max_depth);
return *this;
}
/**
* @brief Sets the data types of struct fields.
*
* @param types data type IDs
* @return this for chaining
*/
data_profile_builder& struct_types(cudf::host_span<cudf::type_id const> types)
{
profile.set_struct_types(types);
return *this;
}
/**
* @brief move data_profile member once it's built.
*/
operator data_profile&&() { return std::move(profile); }
};
/**
* @brief Strongly typed table size in bytes. Used to disambiguate overloads of
* `create_random_table`.
*/
struct table_size_bytes {
size_t size;
};
/**
* @brief Strongly typed row count. Used to disambiguate overloads of `create_random_table`.
*/
struct row_count {
cudf::size_type count;
};
/**
* @brief Deterministically generates a table filled with data with the given parameters.
*
* @param dtype_ids Vector of requested column types
* @param table_bytes Target size of the output table, in bytes. Some type may not produce columns
* of exact size
* @param data_params Optional, set of data parameters describing the data profile for each type
* @param seed Optional, seed for the pseudo-random engine
*/
std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids,
table_size_bytes table_bytes,
data_profile const& data_params = data_profile{},
unsigned seed = 1);
/**
* @brief Deterministically generates a table filled with data with the given parameters.
*
* @param dtype_ids Vector of requested column types
* @param num_rows Number of rows in the output table
* @param data_params Optional, set of data parameters describing the data profile for each type
* @param seed Optional, seed for the pseudo-random engine
*/
std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids,
row_count num_rows,
data_profile const& data_params = data_profile{},
unsigned seed = 1);
/**
* @brief Deterministically generates a column filled with data with the given parameters.
*
* @param dtype_id Requested column type
* @param num_rows Number of rows in the output column
* @param data_params Optional, set of data parameters describing the data profile
* @param seed Optional, seed for the pseudo-random engine
*/
std::unique_ptr<cudf::column> create_random_column(cudf::type_id dtype_id,
row_count num_rows,
data_profile const& data_params = data_profile{},
unsigned seed = 1);
/**
* @brief Generate sequence columns starting with value 0 in first row and increasing by 1 in
* subsequent rows.
*
* @param dtype_ids Vector of requested column types
* @param num_rows Number of rows in the output table
* @param null_probability Optional, probability of a null value
* no value implies no null mask, =0 implies all valids, >=1 implies all nulls
* @param seed Optional, seed for the pseudo-random engine
* @return A table with the sequence columns.
*/
std::unique_ptr<cudf::table> create_sequence_table(
std::vector<cudf::type_id> const& dtype_ids,
row_count num_rows,
std::optional<double> null_probability = std::nullopt,
unsigned seed = 1);
/**
* @brief Repeats the input data types cyclically to fill a vector of @ref num_cols
* elements.
*
* @param dtype_ids Vector of requested column types
* @param num_cols Number of types in the output vector
* @return A vector of type_ids
*/
std::vector<cudf::type_id> cycle_dtypes(std::vector<cudf::type_id> const& dtype_ids,
cudf::size_type num_cols);
/**
* @brief Repeat the given two data types with a given ratio of a:b.
*
* The first dtype will have 'first_num' columns and the second will have 'num_cols - first_num'
* columns.
*
* @param dtype_ids Pair of requested column types
* @param num_cols Total number of columns in the output vector
* @param first_num Total number of columns of type `dtype_ids.first`
* @return A vector of type_ids
*/
std::vector<cudf::type_id> mix_dtypes(std::pair<cudf::type_id, cudf::type_id> const& dtype_ids,
cudf::size_type num_cols,
int first_num);
/**
* @brief Create a random null mask object
*
* @param size number of rows
* @param null_probability probability of a null value
* no value implies no null mask, =0 implies all valids, >=1 implies all nulls
* @param seed Optional, seed for the pseudo-random engine
* @return null mask device buffer with random null mask data and null count
*/
std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask(
cudf::size_type size, std::optional<double> null_probability = std::nullopt, unsigned seed = 1);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/type_dispatcher/type_dispatcher.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/filling.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/device_buffer.hpp>
#include <type_traits>
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, std::enable_if_t<std::is_floating_point_v<Float>>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) {
A[c][index] = F::f(A[c][index]);
}
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(cudf::mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
host_dispatching_kernel<functor_type, ColumnType><<<grid_size, block_size>>>(source_column);
}
template <typename ColumnType, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<ColumnType>())>
void operator()(cudf::mutable_column_device_view source_column, int work_per_thread)
{
CUDF_FAIL("Invalid type to benchmark.");
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())>
__device__ void operator()(cudf::mutable_column_device_view source, cudf::size_type index)
{
CUDF_UNREACHABLE("Unsupported type.");
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(cudf::mutable_table_device_view source)
{
cudf::size_type const n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(cudf::mutable_table_view input, T** d_ptr, int work_per_thread)
{
cudf::size_type const n_rows = input.num_rows();
cudf::size_type const n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = cudf::mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = cudf::mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
f<<<grid_size, block_size>>>(*d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
f<<<grid_size, block_size>>>(d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(::benchmark::State& state)
{
auto const n_cols = static_cast<cudf::size_type>(state.range(0));
auto const source_size = static_cast<cudf::size_type>(state.range(1));
auto const work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto init = cudf::make_fixed_width_scalar<TypeParam>(static_cast<TypeParam>(0));
std::vector<std::unique_ptr<cudf::column>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(cudf::sequence(source_size, *init));
source_columns.push_back(*source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_buffer> h_vec(n_cols);
std::vector<TypeParam*> h_vec_p(n_cols);
std::transform(h_vec.begin(), h_vec.end(), h_vec_p.begin(), [source_size](auto& col) {
col.resize(source_size * sizeof(TypeParam), cudf::get_default_stream());
return static_cast<TypeParam*>(col.data());
});
rmm::device_uvector<TypeParam*> d_vec(n_cols, cudf::get_default_stream());
if (dispatching_type == NO_DISPATCHING) {
CUDF_CUDA_TRY(
cudaMemcpy(d_vec.data(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, cudaMemcpyDefault));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
CUDF_CUDA_TRY(cudaDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/minhash.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/minhash.hpp>
#include <nvbench/nvbench.cuh>
#include <rmm/device_buffer.hpp>
static void bench_minhash(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const hash_width = static_cast<cudf::size_type>(state.get_int64("hash_width"));
auto const seed_count = static_cast<cudf::size_type>(state.get_int64("seed_count"));
auto const base64 = state.get_int64("hash_type") == 64;
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const strings_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const strings_table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, strings_profile);
cudf::strings_column_view input(strings_table->view().column(0));
data_profile const seeds_profile = data_profile_builder().null_probability(0).distribution(
cudf::type_to_id<cudf::hash_value_type>(), distribution_id::NORMAL, 0, row_width);
auto const seed_type = base64 ? cudf::type_id::UINT64 : cudf::type_id::UINT32;
auto const seeds_table = create_random_table({seed_type}, row_count{seed_count}, seeds_profile);
auto seeds = seeds_table->get_column(0);
seeds.set_null_mask(rmm::device_buffer{}, 0);
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int32_t>(num_rows); // output are hashes
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = base64 ? nvtext::minhash64(input, seeds.view(), hash_width)
: nvtext::minhash(input, seeds.view(), hash_width);
});
}
NVBENCH_BENCH(bench_minhash)
.set_name("minhash")
.add_int64_axis("num_rows", {1024, 8192, 16364, 131072})
.add_int64_axis("row_width", {128, 512, 2048})
.add_int64_axis("hash_width", {5, 10})
.add_int64_axis("seed_count", {2, 26})
.add_int64_axis("hash_type", {32, 64});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/edit_distance.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/edit_distance.hpp>
#include <nvbench/nvbench.cuh>
#include <rmm/device_buffer.hpp>
static void bench_edit_distance(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const strings_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const strings_table = create_random_table(
{cudf::type_id::STRING, cudf::type_id::STRING}, row_count{num_rows}, strings_profile);
cudf::strings_column_view input1(strings_table->view().column(0));
cudf::strings_column_view input2(strings_table->view().column(1));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input1.chars_size() + input2.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
// output are integers (one per row)
state.add_global_memory_writes<nvbench::int32_t>(num_rows);
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = nvtext::edit_distance(input1, input2); });
}
NVBENCH_BENCH(bench_edit_distance)
.set_name("edit_distance")
.add_int64_axis("num_rows", {1024, 4096, 8192, 16364, 32768, 262144})
.add_int64_axis("row_width", {8, 16, 32, 64, 128, 256});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/hash_ngrams.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/generate_ngrams.hpp>
#include <nvbench/nvbench.cuh>
#include <rmm/device_buffer.hpp>
static void bench_hash_ngrams(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const ngrams = static_cast<cudf::size_type>(state.get_int64("ngrams"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const strings_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const strings_table =
create_random_table({cudf::type_id::STRING}, row_count{num_rows}, strings_profile);
cudf::strings_column_view input(strings_table->view().column(0));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
// output are hashes: approximate total number of hashes
state.add_global_memory_writes<nvbench::int32_t>(num_rows * ngrams);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::hash_character_ngrams(input, ngrams);
});
}
NVBENCH_BENCH(bench_hash_ngrams)
.set_name("hash_ngrams")
.add_int64_axis("num_rows", {1024, 4096, 8192, 16364, 32768, 262144})
.add_int64_axis("row_width", {128, 512, 2048})
.add_int64_axis("ngrams", {5, 10});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/subword.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/subword_tokenize.hpp>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <vector>
static std::string create_hash_vocab_file()
{
std::string dir_template{std::filesystem::temp_directory_path().string()};
if (char const* env_p = std::getenv("WORKSPACE")) dir_template = env_p;
std::string hash_file = dir_template + "/hash_vocab.txt";
// create a fake hashed vocab text file for this test
// this only works with words in the strings in the benchmark code below
std::vector<std::pair<int, int>> coefficients(23, {65559, 0});
std::ofstream outfile(hash_file, std::ofstream::out);
outfile << "1\n0\n" << coefficients.size() << "\n";
for (auto c : coefficients)
outfile << c.first << " " << c.second << "\n";
std::vector<uint64_t> hash_table(23, 0);
outfile << hash_table.size() << "\n";
hash_table[0] = 3015668L;
hash_table[1] = 6205475701751155871L;
hash_table[5] = 6358029;
hash_table[16] = 451412625363L;
hash_table[20] = 6206321707968235495L;
for (auto h : hash_table)
outfile << h << "\n";
outfile << "100\n101\n102\n\n";
return hash_file;
}
static void BM_subword_tokenizer(benchmark::State& state)
{
auto const nrows = static_cast<cudf::size_type>(state.range(0));
std::vector<char const*> h_strings(nrows, "This is a test ");
cudf::test::strings_column_wrapper strings(h_strings.begin(), h_strings.end());
std::string hash_file = create_hash_vocab_file();
std::vector<uint32_t> offsets{14};
uint32_t max_sequence_length = 64;
uint32_t stride = 48;
uint32_t do_truncate = 0;
uint32_t do_lower = 1;
//
auto vocab = nvtext::load_vocabulary_file(hash_file);
for (auto _ : state) {
cuda_event_timer raii(state, true);
auto result = nvtext::subword_tokenize(cudf::strings_column_view{strings},
*vocab,
max_sequence_length,
stride,
do_lower,
do_truncate);
}
}
class Subword : public cudf::benchmark {};
#define SUBWORD_BM_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(Subword, name)(::benchmark::State & state) { BM_subword_tokenizer(state); } \
BENCHMARK_REGISTER_F(Subword, name) \
->RangeMultiplier(2) \
->Range(1 << 10, 1 << 17) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
SUBWORD_BM_BENCHMARK_DEFINE(BM_subword_tokenizer);
// BENCHMARK_MAIN();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/normalize.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvtext/normalize.hpp>
#include <nvbench/nvbench.cuh>
static void bench_normalize(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const normalize_type = state.get_string("type");
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{num_rows}, profile);
cudf::strings_column_view input(column->view());
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
if (normalize_type == "spaces") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = nvtext::normalize_spaces(input); });
} else {
bool const to_lower = (normalize_type == "to_lower");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::normalize_characters(input, to_lower);
});
}
}
NVBENCH_BENCH(bench_normalize)
.set_name("normalize")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_string_axis("type", {"spaces", "characters", "to_lower"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/tokenize.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvtext/ngrams_tokenize.hpp>
#include <nvtext/tokenize.hpp>
#include <nvbench/nvbench.cuh>
static void bench_tokenize(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const tokenize_type = state.get_string("type");
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const column = create_random_column(cudf::type_id::STRING, row_count{num_rows}, profile);
cudf::strings_column_view input(column->view());
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
if (tokenize_type == "whitespace") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = nvtext::tokenize(input); });
} else if (tokenize_type == "multi") {
cudf::test::strings_column_wrapper delimiters({" ", "+", "-"});
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::tokenize(input, cudf::strings_column_view(delimiters));
});
} else if (tokenize_type == "count") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = nvtext::count_tokens(input); });
} else if (tokenize_type == "count_multi") {
cudf::test::strings_column_wrapper delimiters({" ", "+", "-"});
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::count_tokens(input, cudf::strings_column_view(delimiters));
});
} else if (tokenize_type == "ngrams") {
auto const delimiter = cudf::string_scalar("");
auto const separator = cudf::string_scalar("_");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::ngrams_tokenize(input, 2, delimiter, separator);
});
} else if (tokenize_type == "characters") {
state.exec(nvbench::exec_tag::sync,
[&](nvbench::launch& launch) { auto result = nvtext::character_tokenize(input); });
}
}
NVBENCH_BENCH(bench_tokenize)
.set_name("tokenize")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216})
.add_string_axis("type", {"whitespace", "multi", "count", "count_multi", "ngrams", "characters"});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/vocab.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/reduction.hpp>
#include <nvtext/tokenize.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
static void bench_vocab_tokenize(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
auto const column = [num_rows, row_width] {
data_profile const profile = data_profile_builder().no_validity().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width);
auto const col = create_random_column(cudf::type_id::STRING, row_count{num_rows}, profile);
return cudf::strings::filter_characters_of_type(
cudf::strings_column_view(col->view()),
cudf::strings::string_character_types::ALL_TYPES,
cudf::string_scalar(" "),
cudf::strings::string_character_types::ALPHANUM);
}();
cudf::strings_column_view input(column->view());
auto const vocab_col = [] {
data_profile const profile = data_profile_builder().no_validity().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, 15);
auto const col = create_random_column(cudf::type_id::STRING, row_count{100}, profile);
return cudf::strings::filter_characters_of_type(
cudf::strings_column_view(col->view()),
cudf::strings::string_character_types::ALL_TYPES,
cudf::string_scalar(""),
cudf::strings::string_character_types::ALPHANUM);
}();
auto const vocab = nvtext::load_vocabulary(cudf::strings_column_view(vocab_col->view()));
auto token_count = [input] {
auto const counts = nvtext::count_tokens(input);
auto const agg = cudf::make_sum_aggregation<cudf::reduce_aggregation>();
auto const count = cudf::reduce(counts->view(), *agg, counts->type());
return static_cast<cudf::scalar_type_t<cudf::size_type>*>(count.get())
->value(cudf::get_default_stream());
}();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input.chars_size() + cudf::strings_column_view(vocab_col->view()).chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int32_t>(token_count);
auto const delimiter = cudf::string_scalar("");
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::tokenize_with_vocabulary(input, *vocab, delimiter);
});
}
NVBENCH_BENCH(bench_vocab_tokenize)
.set_name("vocab_tokenize")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024})
.add_int64_axis("num_rows", {262144, 524288, 1048576, 2097152, 4194304, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/jaccard.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/jaccard.hpp>
#include <nvbench/nvbench.cuh>
#include <rmm/device_buffer.hpp>
static void bench_jaccard(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
auto const substring_width = static_cast<cudf::size_type>(state.get_int64("substring_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
data_profile const strings_profile =
data_profile_builder()
.distribution(cudf::type_id::STRING, distribution_id::NORMAL, 0, row_width)
.no_validity();
auto const input_table = create_random_table(
{cudf::type_id::STRING, cudf::type_id::STRING}, row_count{num_rows}, strings_profile);
cudf::strings_column_view input1(input_table->view().column(0));
cudf::strings_column_view input2(input_table->view().column(1));
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = input1.chars_size() + input2.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::float32_t>(num_rows);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::jaccard_index(input1, input2, substring_width);
});
}
NVBENCH_BENCH(bench_jaccard)
.set_name("jaccard")
.add_int64_axis("num_rows", {1024, 4096, 8192, 16364, 32768, 262144})
.add_int64_axis("row_width", {128, 512, 2048})
.add_int64_axis("substring_width", {5, 10});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/replace.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/replace.hpp>
#include <nvbench/nvbench.cuh>
#include <random>
static void bench_replace(nvbench::state& state)
{
auto const num_rows = static_cast<cudf::size_type>(state.get_int64("num_rows"));
auto const row_width = static_cast<cudf::size_type>(state.get_int64("row_width"));
if (static_cast<std::size_t>(num_rows) * static_cast<std::size_t>(row_width) >=
static_cast<std::size_t>(std::numeric_limits<cudf::size_type>::max())) {
state.skip("Skip benchmarks greater than size_type limit");
}
std::vector<std::string> words{" ", "one ", "two ", "three ", "four ",
"five ", "six ", "sevén ", "eight ", "nine ",
"ten ", "eleven ", "twelve ", "thirteen ", "fourteen ",
"fifteen ", "sixteen ", "seventeen ", "eighteen ", "nineteen "};
std::default_random_engine generator;
std::uniform_int_distribution<int> tokens_dist(0, words.size() - 1);
std::string row; // build a row of random tokens
while (static_cast<cudf::size_type>(row.size()) < row_width)
row += words[tokens_dist(generator)];
std::uniform_int_distribution<int> position_dist(0, 16);
auto elements = cudf::detail::make_counting_transform_iterator(
0, [&](auto idx) { return row.c_str() + position_dist(generator); });
cudf::test::strings_column_wrapper input(elements, elements + num_rows);
cudf::strings_column_view view(input);
cudf::test::strings_column_wrapper targets({"one", "two", "sevén", "zero"});
cudf::test::strings_column_wrapper replacements({"1", "2", "7", "0"});
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
auto chars_size = view.chars_size();
state.add_global_memory_reads<nvbench::int8_t>(chars_size);
state.add_global_memory_writes<nvbench::int8_t>(chars_size);
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto result = nvtext::replace_tokens(
view, cudf::strings_column_view(targets), cudf::strings_column_view(replacements));
});
}
NVBENCH_BENCH(bench_replace)
.set_name("replace")
.add_int64_axis("row_width", {32, 64, 128, 256, 512, 1024})
.add_int64_axis("num_rows", {4096, 32768, 262144, 2097152, 16777216});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/text/ngrams.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/string/string_bench_args.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <nvtext/generate_ngrams.hpp>
class TextNGrams : public cudf::benchmark {};
enum class ngrams_type { tokens, characters };
static void BM_ngrams(benchmark::State& state, ngrams_type nt)
{
auto const n_rows = static_cast<cudf::size_type>(state.range(0));
auto const max_str_length = static_cast<cudf::size_type>(state.range(1));
data_profile const profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length);
auto const column = create_random_column(cudf::type_id::STRING, row_count{n_rows}, profile);
cudf::strings_column_view input(column->view());
auto const separator = cudf::string_scalar("_");
for (auto _ : state) {
cuda_event_timer raii(state, true);
switch (nt) {
case ngrams_type::tokens: nvtext::generate_ngrams(input, 2, separator); break;
case ngrams_type::characters: nvtext::generate_character_ngrams(input); break;
}
}
state.SetBytesProcessed(state.iterations() * input.chars_size());
}
static void generate_bench_args(benchmark::internal::Benchmark* b)
{
int const min_rows = 1 << 12;
int const max_rows = 1 << 24;
int const row_mult = 8;
int const min_rowlen = 5;
int const max_rowlen = 40;
int const len_mult = 2;
generate_string_bench_args(b, min_rows, max_rows, row_mult, min_rowlen, max_rowlen, len_mult);
}
#define NVTEXT_BENCHMARK_DEFINE(name) \
BENCHMARK_DEFINE_F(TextNGrams, name) \
(::benchmark::State & st) { BM_ngrams(st, ngrams_type::name); } \
BENCHMARK_REGISTER_F(TextNGrams, name) \
->Apply(generate_bench_args) \
->UseManualTime() \
->Unit(benchmark::kMillisecond);
NVTEXT_BENCHMARK_DEFINE(tokens)
NVTEXT_BENCHMARK_DEFINE(characters)
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/minmax.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/types.hpp>
class Reduction : public cudf::benchmark {};
template <typename type>
void BM_reduction(benchmark::State& state)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
auto const input_column =
create_random_column(dtype, row_count{column_size}, data_profile_builder().no_validity());
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::minmax(*input_column);
}
}
#define concat(a, b, c) a##b##c
#define get_agg(op) concat(cudf::make_, op, _aggregation())
// TYPE, OP
#define RBM_BENCHMARK_DEFINE(name, type, aggregation) \
BENCHMARK_DEFINE_F(Reduction, name)(::benchmark::State & state) { BM_reduction<type>(state); } \
BENCHMARK_REGISTER_F(Reduction, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
#define REDUCE_BENCHMARK_DEFINE(type, aggregation) \
RBM_BENCHMARK_DEFINE(concat(type, _, aggregation), type, aggregation)
REDUCE_BENCHMARK_DEFINE(bool, minmax);
REDUCE_BENCHMARK_DEFINE(int8_t, minmax);
REDUCE_BENCHMARK_DEFINE(int32_t, minmax);
using cudf::timestamp_ms;
REDUCE_BENCHMARK_DEFINE(timestamp_ms, minmax);
REDUCE_BENCHMARK_DEFINE(float, minmax);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/segmented_reduce.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/aggregation.hpp>
#include <cudf/column/column.hpp>
#include <cudf/filling.hpp>
#include <cudf/reduction.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <nvbench/nvbench.cuh>
#include <memory>
bool constexpr is_boolean_output_agg(cudf::segmented_reduce_aggregation::Kind kind)
{
return kind == cudf::segmented_reduce_aggregation::ALL ||
kind == cudf::segmented_reduce_aggregation::ANY;
}
bool constexpr is_float_output_agg(cudf::segmented_reduce_aggregation::Kind kind)
{
return kind == cudf::segmented_reduce_aggregation::MEAN ||
kind == cudf::segmented_reduce_aggregation::VARIANCE ||
kind == cudf::segmented_reduce_aggregation::STD;
}
template <cudf::segmented_reduce_aggregation::Kind kind>
std::unique_ptr<cudf::segmented_reduce_aggregation> make_reduce_aggregation()
{
switch (kind) {
case cudf::segmented_reduce_aggregation::SUM:
return cudf::make_sum_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::PRODUCT:
return cudf::make_product_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::MIN:
return cudf::make_min_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::MAX:
return cudf::make_max_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::ALL:
return cudf::make_all_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::ANY:
return cudf::make_any_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::SUM_OF_SQUARES:
return cudf::make_sum_of_squares_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::MEAN:
return cudf::make_mean_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::VARIANCE:
return cudf::make_variance_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::STD:
return cudf::make_std_aggregation<cudf::segmented_reduce_aggregation>();
case cudf::segmented_reduce_aggregation::NUNIQUE:
return cudf::make_nunique_aggregation<cudf::segmented_reduce_aggregation>();
default: CUDF_FAIL("Unsupported segmented reduce aggregation in this benchmark");
}
}
template <typename DataType>
std::pair<std::unique_ptr<cudf::column>, std::unique_ptr<cudf::column>> make_test_data(
nvbench::state& state)
{
auto const column_size{cudf::size_type(state.get_int64("column_size"))};
auto const num_segments{cudf::size_type(state.get_int64("num_segments"))};
auto segment_length = column_size / num_segments;
auto const dtype = cudf::type_to_id<DataType>();
data_profile profile = data_profile_builder().cardinality(0).no_validity().distribution(
dtype, distribution_id::UNIFORM, 0, 100);
auto input = create_random_column(dtype, row_count{column_size}, profile);
auto offsets = cudf::sequence(num_segments + 1,
cudf::numeric_scalar<cudf::size_type>(0),
cudf::numeric_scalar<cudf::size_type>(segment_length));
return std::pair(std::move(input), std::move(offsets));
}
template <typename DataType, cudf::aggregation::Kind kind>
void BM_Segmented_Reduction(nvbench::state& state,
nvbench::type_list<DataType, nvbench::enum_type<kind>>)
{
auto const column_size{cudf::size_type(state.get_int64("column_size"))};
auto const num_segments{cudf::size_type(state.get_int64("num_segments"))};
auto [input, offsets] = make_test_data<DataType>(state);
auto agg = make_reduce_aggregation<kind>();
auto const output_type = [] {
if (is_boolean_output_agg(kind)) { return cudf::data_type{cudf::type_id::BOOL8}; }
if (is_float_output_agg(kind)) { return cudf::data_type{cudf::type_id::FLOAT64}; }
if (kind == cudf::segmented_reduce_aggregation::NUNIQUE) {
return cudf::data_type{cudf::type_to_id<cudf::size_type>()};
}
return cudf::data_type{cudf::type_to_id<DataType>()};
}();
state.add_element_count(column_size);
state.add_global_memory_reads<DataType>(column_size);
if (is_boolean_output_agg(kind)) {
state.add_global_memory_writes<nvbench::int8_t>(num_segments); // BOOL8
} else {
state.add_global_memory_writes<DataType>(num_segments);
}
auto const input_view = input->view();
auto const offsets_view = offsets->view();
auto const offset_span = cudf::device_span<cudf::size_type const>{
offsets_view.template data<cudf::size_type>(), static_cast<std::size_t>(offsets_view.size())};
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(
nvbench::exec_tag::sync, [input_view, output_type, offset_span, &agg](nvbench::launch& launch) {
segmented_reduce(input_view, offset_span, *agg, output_type, cudf::null_policy::INCLUDE);
});
}
using Types = nvbench::type_list<bool, int32_t, float, double>;
// Skip benchmarking MAX/ANY since they are covered by MIN/ALL respectively.
// Also VARIANCE includes STD calculation.
using AggKinds = nvbench::enum_type_list<cudf::aggregation::SUM,
cudf::aggregation::PRODUCT,
cudf::aggregation::MIN,
cudf::aggregation::ALL,
cudf::aggregation::MEAN,
cudf::aggregation::VARIANCE,
cudf::aggregation::NUNIQUE>;
NVBENCH_BENCH_TYPES(BM_Segmented_Reduction, NVBENCH_TYPE_AXES(Types, AggKinds))
.set_name("segmented_reduction")
.set_type_axes_names({"DataType", "AggregationKinds"})
.add_int64_axis("column_size", {100'000, 1'000'000, 10'000'000, 100'000'000})
.add_int64_axis("num_segments", {1'000, 10'000, 100'000});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/rank.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/detail/scan.hpp>
#include <cudf/filling.hpp>
#include <cudf/lists/list_view.hpp>
#include <nvbench/nvbench.cuh>
template <typename type>
static void nvbench_reduction_scan(nvbench::state& state, nvbench::type_list<type>)
{
auto const dtype = cudf::type_to_id<type>();
double const null_probability = state.get_float64("null_probability");
size_t const size = state.get_int64("data_size");
data_profile const profile = data_profile_builder()
.null_probability(null_probability)
.distribution(dtype, distribution_id::UNIFORM, 0, 5);
auto const table = create_random_table({dtype}, table_size_bytes{size / 2}, profile);
auto const new_tbl = cudf::repeat(table->view(), 2);
cudf::column_view input(new_tbl->view().column(0));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
rmm::cuda_stream_view stream_view{launch.get_stream()};
auto result = cudf::detail::inclusive_dense_rank_scan(
input, stream_view, rmm::mr::get_current_device_resource());
});
}
using data_type = nvbench::type_list<int32_t, cudf::list_view>;
NVBENCH_BENCH_TYPES(nvbench_reduction_scan, NVBENCH_TYPE_AXES(data_type))
.set_name("rank_scan")
.add_float64_axis("null_probability", {0, 0.1, 0.5, 0.9})
.add_int64_axis("data_size",
{
10000, // 10k
100000, // 100k
1000000, // 1M
10000000, // 10M
100000000, // 100M
});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/dictionary.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/dictionary/encode.hpp>
#include <cudf/reduction.hpp>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
class ReductionDictionary : public cudf::benchmark {};
template <typename T>
void BM_reduction_dictionary(benchmark::State& state,
std::unique_ptr<cudf::reduce_aggregation> const& agg)
{
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};
// int column and encoded dictionary column
data_profile const profile = data_profile_builder().cardinality(0).no_validity().distribution(
cudf::type_to_id<long>(),
distribution_id::UNIFORM,
(agg->kind == cudf::aggregation::ALL ? 1 : 0),
(agg->kind == cudf::aggregation::ANY ? 0 : 100));
auto int_column = create_random_column(cudf::type_to_id<long>(), row_count{column_size}, profile);
auto number_col = cudf::cast(*int_column, cudf::data_type{cudf::type_to_id<T>()});
auto values = cudf::dictionary::encode(*number_col);
cudf::data_type output_dtype = [&] {
if (agg->kind == cudf::aggregation::ANY || agg->kind == cudf::aggregation::ALL)
return cudf::data_type{cudf::type_id::BOOL8};
if (agg->kind == cudf::aggregation::MEAN) return cudf::data_type{cudf::type_id::FLOAT64};
return cudf::data_type{cudf::type_to_id<T>()};
}();
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::reduce(*values, *agg, output_dtype);
}
}
#define concat(a, b, c) a##b##c
#define get_agg(op) concat(cudf::make_, op, _aggregation<cudf::reduce_aggregation>())
// TYPE, OP
#define RBM_BENCHMARK_DEFINE(name, type, aggregation) \
BENCHMARK_DEFINE_F(ReductionDictionary, name)(::benchmark::State & state) \
{ \
BM_reduction_dictionary<type>(state, get_agg(aggregation)); \
} \
BENCHMARK_REGISTER_F(ReductionDictionary, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
#define REDUCE_BENCHMARK_DEFINE(type, aggregation) \
RBM_BENCHMARK_DEFINE(concat(type, _, aggregation), type, aggregation)
REDUCE_BENCHMARK_DEFINE(int32_t, all);
REDUCE_BENCHMARK_DEFINE(float, all);
REDUCE_BENCHMARK_DEFINE(int32_t, any);
REDUCE_BENCHMARK_DEFINE(float, any);
REDUCE_BENCHMARK_DEFINE(int32_t, min);
REDUCE_BENCHMARK_DEFINE(float, min);
REDUCE_BENCHMARK_DEFINE(int32_t, max);
REDUCE_BENCHMARK_DEFINE(float, max);
REDUCE_BENCHMARK_DEFINE(int32_t, mean);
REDUCE_BENCHMARK_DEFINE(float, mean);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/anyall.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/types.hpp>
#include <memory>
class Reduction : public cudf::benchmark {};
template <typename type>
void BM_reduction_anyall(benchmark::State& state,
std::unique_ptr<cudf::reduce_aggregation> const& agg)
{
cudf::size_type const column_size{static_cast<cudf::size_type>(state.range(0))};
auto const dtype = cudf::type_to_id<type>();
data_profile const profile = data_profile_builder().no_validity().distribution(
dtype, distribution_id::UNIFORM, 0, agg->kind == cudf::aggregation::ANY ? 0 : 100);
auto const values = create_random_column(dtype, row_count{column_size}, profile);
cudf::data_type output_dtype{cudf::type_id::BOOL8};
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::reduce(*values, *agg, output_dtype);
}
}
#define concat(a, b, c) a##b##c
#define get_agg(op) concat(cudf::make_, op, _aggregation<cudf::reduce_aggregation>())
// TYPE, OP
#define RBM_BENCHMARK_DEFINE(name, type, aggregation) \
BENCHMARK_DEFINE_F(Reduction, name)(::benchmark::State & state) \
{ \
BM_reduction_anyall<type>(state, get_agg(aggregation)); \
} \
BENCHMARK_REGISTER_F(Reduction, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
#define REDUCE_BENCHMARK_DEFINE(type, aggregation) \
RBM_BENCHMARK_DEFINE(concat(type, _, aggregation), type, aggregation)
REDUCE_BENCHMARK_DEFINE(bool, all);
REDUCE_BENCHMARK_DEFINE(int8_t, all);
REDUCE_BENCHMARK_DEFINE(int32_t, all);
REDUCE_BENCHMARK_DEFINE(float, all);
REDUCE_BENCHMARK_DEFINE(bool, any);
REDUCE_BENCHMARK_DEFINE(int8_t, any);
REDUCE_BENCHMARK_DEFINE(int32_t, any);
REDUCE_BENCHMARK_DEFINE(float, any);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/scan_structs.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/scan.hpp>
#include <nvbench/nvbench.cuh>
static constexpr cudf::size_type num_struct_members = 8;
static constexpr cudf::size_type max_int = 100;
static constexpr cudf::size_type max_str_length = 32;
static void nvbench_structs_scan(nvbench::state& state)
{
auto const null_probability = [&] {
auto const null_prob_val = state.get_float64("null_probability");
return null_prob_val > 0 ? std::optional{null_prob_val} : std::nullopt;
}();
auto const size = static_cast<cudf::size_type>(state.get_int64("data_size"));
auto const profile = static_cast<data_profile>(
data_profile_builder()
.null_probability(null_probability)
.distribution(cudf::type_id::INT32, distribution_id::UNIFORM, 0, max_int)
.distribution(cudf::type_id::STRING, distribution_id::NORMAL, 0, max_str_length));
auto data_table = create_random_table(
cycle_dtypes({cudf::type_id::INT32, cudf::type_id::STRING}, num_struct_members),
row_count{size},
profile);
auto [null_mask, null_count] = create_random_null_mask(size, null_probability);
auto const input = cudf::make_structs_column(
size, std::move(data_table->release()), null_count, std::move(null_mask));
auto const agg = cudf::make_min_aggregation<cudf::scan_aggregation>();
auto const null_policy = static_cast<cudf::null_policy>(state.get_int64("null_policy"));
auto const stream = cudf::get_default_stream();
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
auto const result = cudf::detail::scan_inclusive(
*input, *agg, null_policy, stream, rmm::mr::get_current_device_resource());
});
}
NVBENCH_BENCH(nvbench_structs_scan)
.set_name("structs_scan")
.add_float64_axis("null_probability", {0, 0.1, 0.5, 0.9})
.add_int64_axis("null_policy", {0, 1})
.add_int64_axis("data_size",
{
10000, // 10k
100000, // 100k
1000000, // 1M
10000000, // 10M
});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/scan.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/reduction.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
class ReductionScan : public cudf::benchmark {};
template <typename type>
static void BM_reduction_scan(benchmark::State& state, bool include_nulls)
{
cudf::size_type const n_rows{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
auto const column = create_random_column(dtype, row_count{n_rows});
if (!include_nulls) column->set_null_mask(rmm::device_buffer{}, 0);
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::scan(
*column, *cudf::make_min_aggregation<cudf::scan_aggregation>(), cudf::scan_type::INCLUSIVE);
}
}
#define SCAN_BENCHMARK_DEFINE(name, type, nulls) \
BENCHMARK_DEFINE_F(ReductionScan, name) \
(::benchmark::State & state) { BM_reduction_scan<type>(state, nulls); } \
BENCHMARK_REGISTER_F(ReductionScan, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
SCAN_BENCHMARK_DEFINE(int8_no_nulls, int8_t, false);
SCAN_BENCHMARK_DEFINE(int32_no_nulls, int32_t, false);
SCAN_BENCHMARK_DEFINE(uint64_no_nulls, uint64_t, false);
SCAN_BENCHMARK_DEFINE(float_no_nulls, float, false);
SCAN_BENCHMARK_DEFINE(int16_nulls, int16_t, true);
SCAN_BENCHMARK_DEFINE(uint32_nulls, uint32_t, true);
SCAN_BENCHMARK_DEFINE(double_nulls, double, true);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/reduction/reduce.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/reduction.hpp>
#include <cudf/types.hpp>
#include <memory>
class Reduction : public cudf::benchmark {};
template <typename type>
void BM_reduction(benchmark::State& state, std::unique_ptr<cudf::reduce_aggregation> const& agg)
{
cudf::size_type const column_size{(cudf::size_type)state.range(0)};
auto const dtype = cudf::type_to_id<type>();
data_profile const profile =
data_profile_builder().no_validity().distribution(dtype, distribution_id::UNIFORM, 0, 100);
auto const input_column = create_random_column(dtype, row_count{column_size}, profile);
cudf::data_type output_dtype =
(agg->kind == cudf::aggregation::MEAN || agg->kind == cudf::aggregation::VARIANCE ||
agg->kind == cudf::aggregation::STD)
? cudf::data_type{cudf::type_id::FLOAT64}
: input_column->type();
for (auto _ : state) {
cuda_event_timer timer(state, true);
auto result = cudf::reduce(*input_column, *agg, output_dtype);
}
}
#define concat(a, b, c) a##b##c
#define get_agg(op) concat(cudf::make_, op, _aggregation<cudf::reduce_aggregation>())
// TYPE, OP
#define RBM_BENCHMARK_DEFINE(name, type, aggregation) \
BENCHMARK_DEFINE_F(Reduction, name)(::benchmark::State & state) \
{ \
BM_reduction<type>(state, get_agg(aggregation)); \
} \
BENCHMARK_REGISTER_F(Reduction, name) \
->UseManualTime() \
->Arg(10000) /* 10k */ \
->Arg(100000) /* 100k */ \
->Arg(1000000) /* 1M */ \
->Arg(10000000) /* 10M */ \
->Arg(100000000); /* 100M */
#define REDUCE_BENCHMARK_DEFINE(type, aggregation) \
RBM_BENCHMARK_DEFINE(concat(type, _, aggregation), type, aggregation)
#define REDUCE_BENCHMARK_NUMERIC(aggregation) \
REDUCE_BENCHMARK_DEFINE(bool, aggregation); \
REDUCE_BENCHMARK_DEFINE(int8_t, aggregation); \
REDUCE_BENCHMARK_DEFINE(int32_t, aggregation); \
REDUCE_BENCHMARK_DEFINE(int64_t, aggregation); \
REDUCE_BENCHMARK_DEFINE(float, aggregation); \
REDUCE_BENCHMARK_DEFINE(double, aggregation);
REDUCE_BENCHMARK_NUMERIC(sum);
REDUCE_BENCHMARK_DEFINE(int32_t, product);
REDUCE_BENCHMARK_DEFINE(float, product);
REDUCE_BENCHMARK_DEFINE(int64_t, min);
REDUCE_BENCHMARK_DEFINE(double, min);
using cudf::timestamp_ms;
REDUCE_BENCHMARK_DEFINE(timestamp_ms, min);
REDUCE_BENCHMARK_DEFINE(int8_t, mean);
REDUCE_BENCHMARK_DEFINE(float, mean);
REDUCE_BENCHMARK_DEFINE(int32_t, variance);
REDUCE_BENCHMARK_DEFINE(double, variance);
REDUCE_BENCHMARK_DEFINE(int64_t, std);
REDUCE_BENCHMARK_DEFINE(float, std);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/fst.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <io/fst/lookup_tables.cuh>
#include <io/utilities/hostdevice_vector.hpp> //TODO find better replacement
#include <tests/io/fst/common.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/iterator/discard_iterator.h>
#include <nvbench/nvbench.cuh>
#include <cstdlib>
namespace {
auto make_test_json_data(nvbench::state& state)
{
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
// Test input
std::string input = R"( {)"
R"("category": "reference",)"
R"("index:" [4,12,42],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} )"
R"({)"
R"("category": "reference",)"
R"("index:" [4,{},null,{"a":[]}],)"
R"("author": "Nigel Rees",)"
R"("title": "Sayings of the Century",)"
R"("price": 8.95)"
R"(} {} [] [ ])";
auto d_input_scalar = cudf::make_string_scalar(input);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_input_scalar);
cudf::size_type const repeat_times = string_size / input.size();
return cudf::strings::repeat_string(d_string_scalar, repeat_times);
}
// Type used to represent the atomic symbol type used within the finite-state machine
using SymbolT = char;
// Type sufficiently large to index symbols within the input and output (may be unsigned)
using SymbolOffsetT = uint32_t;
constexpr std::size_t single_item = 1;
constexpr auto max_translation_table_size = TT_NUM_STATES * NUM_SYMBOL_GROUPS;
} // namespace
void BM_FST_JSON(nvbench::state& state)
{
CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<cudf::size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
auto input_string = make_test_json_data(state);
auto& d_input = static_cast<cudf::scalar_type_t<std::string>&>(*input_string);
state.add_element_count(d_input.size());
// Prepare input & output buffers
cudf::detail::hostdevice_vector<SymbolT> output_gpu(d_input.size(), stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> output_gpu_size(single_item, stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> out_indexes_gpu(d_input.size(), stream_view);
// Run algorithm
auto parser = cudf::io::fst::detail::make_fst(
cudf::io::fst::detail::make_symbol_group_lut(pda_sgs),
cudf::io::fst::detail::make_transition_table(pda_state_tt),
cudf::io::fst::detail::make_translation_table<max_translation_table_size>(pda_out_tt),
stream);
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
parser.Transduce(d_input.data(),
static_cast<SymbolOffsetT>(d_input.size()),
output_gpu.device_ptr(),
out_indexes_gpu.device_ptr(),
output_gpu_size.device_ptr(),
start_state,
stream.value());
});
}
void BM_FST_JSON_no_outidx(nvbench::state& state)
{
CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<cudf::size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
auto input_string = make_test_json_data(state);
auto& d_input = static_cast<cudf::scalar_type_t<std::string>&>(*input_string);
state.add_element_count(d_input.size());
// Prepare input & output buffers
cudf::detail::hostdevice_vector<SymbolT> output_gpu(d_input.size(), stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> output_gpu_size(single_item, stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> out_indexes_gpu(d_input.size(), stream_view);
// Run algorithm
auto parser = cudf::io::fst::detail::make_fst(
cudf::io::fst::detail::make_symbol_group_lut(pda_sgs),
cudf::io::fst::detail::make_transition_table(pda_state_tt),
cudf::io::fst::detail::make_translation_table<max_translation_table_size>(pda_out_tt),
stream);
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
parser.Transduce(d_input.data(),
static_cast<SymbolOffsetT>(d_input.size()),
output_gpu.device_ptr(),
thrust::make_discard_iterator(),
output_gpu_size.device_ptr(),
start_state,
stream.value());
});
}
void BM_FST_JSON_no_out(nvbench::state& state)
{
CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<cudf::size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
auto input_string = make_test_json_data(state);
auto& d_input = static_cast<cudf::scalar_type_t<std::string>&>(*input_string);
state.add_element_count(d_input.size());
// Prepare input & output buffers
cudf::detail::hostdevice_vector<SymbolOffsetT> output_gpu_size(single_item, stream_view);
// Run algorithm
auto parser = cudf::io::fst::detail::make_fst(
cudf::io::fst::detail::make_symbol_group_lut(pda_sgs),
cudf::io::fst::detail::make_transition_table(pda_state_tt),
cudf::io::fst::detail::make_translation_table<max_translation_table_size>(pda_out_tt),
stream);
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
parser.Transduce(d_input.data(),
static_cast<SymbolOffsetT>(d_input.size()),
thrust::make_discard_iterator(),
thrust::make_discard_iterator(),
output_gpu_size.device_ptr(),
start_state,
stream.value());
});
}
void BM_FST_JSON_no_str(nvbench::state& state)
{
CUDF_EXPECTS(state.get_int64("string_size") <= std::numeric_limits<cudf::size_type>::max(),
"Benchmarks only support up to size_type's maximum number of items");
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
// Prepare cuda stream for data transfers & kernels
rmm::cuda_stream stream{};
rmm::cuda_stream_view stream_view(stream);
auto input_string = make_test_json_data(state);
auto& d_input = static_cast<cudf::scalar_type_t<std::string>&>(*input_string);
state.add_element_count(d_input.size());
// Prepare input & output buffers
cudf::detail::hostdevice_vector<SymbolOffsetT> output_gpu_size(single_item, stream_view);
cudf::detail::hostdevice_vector<SymbolOffsetT> out_indexes_gpu(d_input.size(), stream_view);
// Run algorithm
auto parser = cudf::io::fst::detail::make_fst(
cudf::io::fst::detail::make_symbol_group_lut(pda_sgs),
cudf::io::fst::detail::make_transition_table(pda_state_tt),
cudf::io::fst::detail::make_translation_table<max_translation_table_size>(pda_out_tt),
stream);
state.set_cuda_stream(nvbench::make_cuda_stream_view(stream.value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
parser.Transduce(d_input.data(),
static_cast<SymbolOffsetT>(d_input.size()),
thrust::make_discard_iterator(),
out_indexes_gpu.device_ptr(),
output_gpu_size.device_ptr(),
start_state,
stream.value());
});
}
NVBENCH_BENCH(BM_FST_JSON)
.set_name("FST_JSON")
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 1));
NVBENCH_BENCH(BM_FST_JSON_no_outidx)
.set_name("FST_JSON_no_outidx")
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 1));
NVBENCH_BENCH(BM_FST_JSON_no_out)
.set_name("FST_JSON_no_out")
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 1));
NVBENCH_BENCH(BM_FST_JSON_no_str)
.set_name("FST_JSON_no_str")
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 1));
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/cuio_common.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/io/cuio_common.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/logger.hpp>
#include <cstdio>
#include <fstream>
#include <numeric>
#include <string>
#include <unistd.h>
temp_directory const cuio_source_sink_pair::tmpdir{"cudf_gbench"};
std::string random_file_in_dir(std::string const& dir_path)
{
// `mkstemp` modifies the template in place
std::string filename = dir_path + "io.XXXXXX";
// `mkstemp` opens the file; closing immediately, only need the name
close(mkstemp(const_cast<char*>(filename.data())));
return filename;
}
cuio_source_sink_pair::cuio_source_sink_pair(io_type type)
: type{type},
d_buffer{0, cudf::get_default_stream()},
file_name{random_file_in_dir(tmpdir.path())}
{
}
cudf::io::source_info cuio_source_sink_pair::make_source_info()
{
switch (type) {
case io_type::FILEPATH: return cudf::io::source_info(file_name);
case io_type::HOST_BUFFER: return cudf::io::source_info(h_buffer.data(), h_buffer.size());
case io_type::DEVICE_BUFFER: {
// TODO: make cuio_source_sink_pair stream-friendly and avoid implicit use of the default
// stream
auto const stream = cudf::get_default_stream();
d_buffer.resize(h_buffer.size(), stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(
d_buffer.data(), h_buffer.data(), h_buffer.size(), cudaMemcpyDefault, stream.value()));
return cudf::io::source_info(d_buffer);
}
default: CUDF_FAIL("invalid input type");
}
}
cudf::io::sink_info cuio_source_sink_pair::make_sink_info()
{
switch (type) {
case io_type::VOID: return cudf::io::sink_info(&void_sink);
case io_type::FILEPATH: return cudf::io::sink_info(file_name);
case io_type::HOST_BUFFER: [[fallthrough]];
case io_type::DEVICE_BUFFER: return cudf::io::sink_info(&h_buffer);
default: CUDF_FAIL("invalid output type");
}
}
size_t cuio_source_sink_pair::size()
{
switch (type) {
case io_type::VOID: return void_sink.bytes_written();
case io_type::FILEPATH:
return static_cast<size_t>(
std::ifstream(file_name, std::ifstream::ate | std::ifstream::binary).tellg());
case io_type::HOST_BUFFER: [[fallthrough]];
case io_type::DEVICE_BUFFER: return h_buffer.size();
default: CUDF_FAIL("invalid output type");
}
}
std::vector<cudf::type_id> dtypes_for_column_selection(std::vector<cudf::type_id> const& data_types,
column_selection col_sel)
{
std::vector<cudf::type_id> out_dtypes;
out_dtypes.reserve(2 * data_types.size());
switch (col_sel) {
case column_selection::ALL:
case column_selection::FIRST_HALF:
case column_selection::SECOND_HALF:
std::copy(data_types.begin(), data_types.end(), std::back_inserter(out_dtypes));
std::copy(data_types.begin(), data_types.end(), std::back_inserter(out_dtypes));
break;
case column_selection::ALTERNATE:
for (auto const& type : data_types) {
out_dtypes.push_back(type);
out_dtypes.push_back(type);
}
break;
}
return out_dtypes;
}
std::vector<int> select_column_indexes(int num_cols, column_selection col_sel)
{
std::vector<int> col_idxs(num_cols / 2);
switch (col_sel) {
case column_selection::ALL: col_idxs.resize(num_cols);
case column_selection::FIRST_HALF:
case column_selection::SECOND_HALF:
std::iota(std::begin(col_idxs),
std::end(col_idxs),
(col_sel == column_selection::SECOND_HALF) ? num_cols / 2 : 0);
break;
case column_selection::ALTERNATE:
for (size_t i = 0; i < col_idxs.size(); ++i)
col_idxs[i] = 2 * i;
break;
}
return col_idxs;
}
std::vector<std::string> select_column_names(std::vector<std::string> const& col_names,
column_selection col_sel)
{
auto const col_idxs_to_read = select_column_indexes(col_names.size(), col_sel);
std::vector<std::string> col_names_to_read;
std::transform(col_idxs_to_read.begin(),
col_idxs_to_read.end(),
std::back_inserter(col_names_to_read),
[&](auto& idx) { return col_names[idx]; });
return col_names_to_read;
}
std::vector<cudf::size_type> segments_in_chunk(int num_segments, int num_chunks, int chunk_idx)
{
CUDF_EXPECTS(num_segments >= num_chunks,
"Number of chunks cannot be greater than the number of segments in the file");
CUDF_EXPECTS(chunk_idx < num_chunks,
"Chunk index must be smaller than the number of chunks in the file");
auto const segments_in_chunk = cudf::util::div_rounding_up_unsafe(num_segments, num_chunks);
auto const begin_segment = std::min(chunk_idx * segments_in_chunk, num_segments);
auto const end_segment = std::min(begin_segment + segments_in_chunk, num_segments);
std::vector<cudf::size_type> selected_segments(end_segment - begin_segment);
std::iota(selected_segments.begin(), selected_segments.end(), begin_segment);
return selected_segments;
}
// Executes the command and returns stderr output
std::string exec_cmd(std::string_view cmd)
{
// Prevent the output from the command from mixing with the original process' output
std::fflush(nullptr);
// Switch stderr and stdout to only capture stderr
auto const redirected_cmd = std::string{"( "}.append(cmd).append(" 3>&2 2>&1 1>&3) 2>/dev/null");
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(redirected_cmd.c_str(), "r"), pclose);
CUDF_EXPECTS(pipe != nullptr, "popen() failed");
std::array<char, 128> buffer;
std::string error_out;
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
error_out += buffer.data();
}
return error_out;
}
void log_l3_warning_once()
{
static bool is_logged = false;
if (not is_logged) {
CUDF_LOG_WARN(
"Running benchmarks without dropping the L3 cache; results may not reflect file IO "
"throughput");
is_logged = true;
}
}
void try_drop_l3_cache()
{
static bool is_drop_cache_enabled = std::getenv("CUDF_BENCHMARK_DROP_CACHE") != nullptr;
if (not is_drop_cache_enabled) {
log_l3_warning_once();
return;
}
std::array drop_cache_cmds{"/sbin/sysctl vm.drop_caches=3", "sudo /sbin/sysctl vm.drop_caches=3"};
CUDF_EXPECTS(std::any_of(drop_cache_cmds.cbegin(),
drop_cache_cmds.cend(),
[](auto& cmd) { return exec_cmd(cmd).empty(); }),
"Failed to execute the drop cache command");
}
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/cuio_common.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf_test/file_utilities.hpp>
#include <cudf/io/data_sink.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/types.hpp>
#include <rmm/device_uvector.hpp>
using cudf::io::io_type;
std::string random_file_in_dir(std::string const& dir_path);
/**
* @brief Class to create a coupled `source_info` and `sink_info` of given type.
*/
class cuio_source_sink_pair {
class bytes_written_only_sink : public cudf::io::data_sink {
size_t _bytes_written = 0;
public:
void host_write(void const* data, size_t size) override { _bytes_written += size; }
void flush() override {}
size_t bytes_written() override { return _bytes_written; }
};
public:
cuio_source_sink_pair(io_type type);
~cuio_source_sink_pair()
{
// delete the temporary file
std::remove(file_name.c_str());
}
/**
* @brief Created a source info of the set type
*
* The `datasource` created using the returned `source_info` will read data from the same location
* that the result of a @ref `make_sink_info` call writes to.
*
* @return The description of the data source
*/
cudf::io::source_info make_source_info();
/**
* @brief Created a sink info of the set type
*
* The `data_sink` created using the returned `sink_info` will write data to the same location
* that the result of a @ref `make_source_info` call reads from.
*
* `io_type::DEVICE_BUFFER` source/sink is an exception where a host buffer sink will be created.
*
* @return The description of the data sink
*/
cudf::io::sink_info make_sink_info();
[[nodiscard]] size_t size();
private:
static temp_directory const tmpdir;
io_type const type;
std::vector<char> h_buffer;
rmm::device_uvector<std::byte> d_buffer;
std::string const file_name;
bytes_written_only_sink void_sink;
};
/**
* @brief Column selection strategy.
*/
enum class column_selection { ALL, ALTERNATE, FIRST_HALF, SECOND_HALF };
/**
* @brief Row selection strategy.
*
* Not all strategies are applicable to all readers.
*/
enum class row_selection { ALL, BYTE_RANGE, NROWS, SKIPFOOTER, STRIPES, ROW_GROUPS };
/**
* @brief Modify data types such that total selected columns size is a fix fraction of the total
* size.
*
* The data types are multiplied/rearranged such that the columns selected with the given column
* selection enumerator add up to a fixed fraction of the total table size, regardless of the data
* types.
*
* @param ids Array of column type IDs
* @param cs The column selection enumerator
*
* @return The duplicated/rearranged array of type IDs
*/
std::vector<cudf::type_id> dtypes_for_column_selection(std::vector<cudf::type_id> const& ids,
column_selection col_sel);
/**
* @brief Selects a subset of columns based on the input enumerator.
*/
std::vector<int> select_column_indexes(int num_cols, column_selection col_sel);
/**
* @brief Selects a subset of columns from the array of names, based on the input enumerator.
*/
std::vector<std::string> select_column_names(std::vector<std::string> const& col_names,
column_selection col_sel);
/**
* @brief Returns file segments that belong to the given chunk if the file is split into a given
* number of chunks.
*
* The segments could be Parquet row groups or ORC stripes.
*/
std::vector<cudf::size_type> segments_in_chunk(int num_segments, int num_chunks, int chunk);
/**
* @brief Drops L3 cache if `CUDF_BENCHMARK_DROP_CACHE` environment variable is set.
*
* Has no effect if the environment variable is not set.
* May require sudo access ro run successfully.
*
* @throw cudf::logic_error if the environment variable is set and the command fails
*/
void try_drop_l3_cache();
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/nvbench_helpers.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <cudf/io/types.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
enum class data_type : int32_t {
INTEGRAL = static_cast<int32_t>(type_group_id::INTEGRAL),
INTEGRAL_SIGNED = static_cast<int32_t>(type_group_id::INTEGRAL_SIGNED),
FLOAT = static_cast<int32_t>(type_group_id::FLOATING_POINT),
DECIMAL = static_cast<int32_t>(type_group_id::FIXED_POINT),
TIMESTAMP = static_cast<int32_t>(type_group_id::TIMESTAMP),
DURATION = static_cast<int32_t>(type_group_id::DURATION),
STRING = static_cast<int32_t>(cudf::type_id::STRING),
LIST = static_cast<int32_t>(cudf::type_id::LIST),
STRUCT = static_cast<int32_t>(cudf::type_id::STRUCT)
};
// NVBENCH_DECLARE_ENUM_TYPE_STRINGS macro must be used from global namespace scope
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
data_type,
[](data_type value) {
switch (value) {
case data_type::INTEGRAL: return "INTEGRAL";
case data_type::INTEGRAL_SIGNED: return "INTEGRAL_SIGNED";
case data_type::FLOAT: return "FLOAT";
case data_type::DECIMAL: return "DECIMAL";
case data_type::TIMESTAMP: return "TIMESTAMP";
case data_type::DURATION: return "DURATION";
case data_type::STRING: return "STRING";
case data_type::LIST: return "LIST";
case data_type::STRUCT: return "STRUCT";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::io::io_type,
[](auto value) {
switch (value) {
case cudf::io::io_type::FILEPATH: return "FILEPATH";
case cudf::io::io_type::HOST_BUFFER: return "HOST_BUFFER";
case cudf::io::io_type::DEVICE_BUFFER: return "DEVICE_BUFFER";
case cudf::io::io_type::VOID: return "VOID";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::io::compression_type,
[](auto value) {
switch (value) {
case cudf::io::compression_type::SNAPPY: return "SNAPPY";
case cudf::io::compression_type::NONE: return "NONE";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
enum class uses_index : bool { YES, NO };
enum class uses_numpy_dtype : bool { YES, NO };
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
uses_index,
[](auto value) {
switch (value) {
case uses_index::YES: return "YES";
case uses_index::NO: return "NO";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
uses_numpy_dtype,
[](auto value) {
switch (value) {
case uses_numpy_dtype::YES: return "YES";
case uses_numpy_dtype::NO: return "NO";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
column_selection,
[](auto value) {
switch (value) {
case column_selection::ALL: return "ALL";
case column_selection::ALTERNATE: return "ALTERNATE";
case column_selection::FIRST_HALF: return "FIRST_HALF";
case column_selection::SECOND_HALF: return "SECOND_HALF";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
row_selection,
[](auto value) {
switch (value) {
case row_selection::ALL: return "ALL";
case row_selection::BYTE_RANGE: return "BYTE_RANGE";
case row_selection::NROWS: return "NROWS";
case row_selection::SKIPFOOTER: return "SKIPFOOTER";
case row_selection::STRIPES: return "STRIPES";
case row_selection::ROW_GROUPS: return "ROW_GROUPS";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::type_id,
[](auto value) {
switch (value) {
case cudf::type_id::EMPTY: return "EMPTY";
case cudf::type_id::TIMESTAMP_NANOSECONDS: return "TIMESTAMP_NANOSECONDS";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
enum class converts_strings : bool { YES, NO };
enum class uses_pandas_metadata : bool { YES, NO };
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
converts_strings,
[](auto value) {
switch (value) {
case converts_strings::YES: return "YES";
case converts_strings::NO: return "NO";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
uses_pandas_metadata,
[](auto value) {
switch (value) {
case uses_pandas_metadata::YES: return "YES";
case uses_pandas_metadata::NO: return "NO";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/json/json_writer.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/json.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr size_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
void json_write_common(cudf::io::json_writer_options const& write_opts,
cuio_source_sink_pair& source_sink,
size_t const data_size,
nvbench::state& state)
{
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::io::write_json(write_opts);
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
template <cudf::io::io_type IO>
void BM_json_write_io(nvbench::state& state, nvbench::type_list<nvbench::enum_type<IO>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
auto const source_type = IO;
auto const tbl = create_random_table(
cycle_dtypes(d_type, num_cols), table_size_bytes{data_size}, data_profile_builder());
auto const view = tbl->view();
cuio_source_sink_pair source_sink(source_type);
cudf::io::json_writer_options write_opts =
cudf::io::json_writer_options::builder(source_sink.make_sink_info(), view)
.na_rep("null")
.rows_per_chunk(view.num_rows() / 10);
json_write_common(write_opts, source_sink, data_size, state);
}
void BM_json_writer_options(nvbench::state& state)
{
auto const source_type = io_type::HOST_BUFFER;
bool const json_lines = state.get_int64("json_lines");
bool const include_nulls = state.get_int64("include_nulls");
auto const rows_per_chunk = state.get_int64("rows_per_chunk");
if ((json_lines or include_nulls) and rows_per_chunk != 1 << 20) {
state.skip("Skipping for unrequired rows_per_chunk combinations");
return;
}
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
auto const tbl = create_random_table(
cycle_dtypes(d_type, num_cols), table_size_bytes{data_size}, data_profile_builder());
auto const view = tbl->view();
cuio_source_sink_pair source_sink(source_type);
cudf::io::json_writer_options write_opts =
cudf::io::json_writer_options::builder(source_sink.make_sink_info(), view)
.na_rep("null")
.lines(json_lines)
.include_nulls(include_nulls)
.rows_per_chunk(rows_per_chunk);
json_write_common(write_opts, source_sink, data_size, state);
}
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::DEVICE_BUFFER>;
NVBENCH_BENCH_TYPES(BM_json_write_io, NVBENCH_TYPE_AXES(io_list))
.set_name("json_write_io")
.set_type_axes_names({"io"})
.set_min_samples(4);
NVBENCH_BENCH(BM_json_writer_options)
.set_name("json_write_options")
.set_min_samples(4)
.add_int64_axis("json_lines", {false, true})
.add_int64_axis("include_nulls", {false, true})
.add_int64_power_of_two_axis("rows_per_chunk", {10, 15, 16, 18, 20});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/json/nested_json.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <io/json/nested_json.hpp>
#include <tests/io/fst/common.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/types.hpp>
#include <nvbench/nvbench.cuh>
#include <string>
#include <vector>
namespace {
// pre-generate all the number strings
std::vector<std::string> _num_to_string;
std::string num_to_string(int32_t num) { return _num_to_string.at(num); }
// List of List nested.
std::string generate_list_of_lists(int32_t max_depth, int32_t max_rows, std::string elem)
{
std::string json = "[";
if (max_depth > 1) json += std::string(max_depth - 1, '[');
for (int32_t row = 0; row < max_rows; ++row) {
json += elem;
if (row < max_rows - 1) { json += ", "; }
}
if (max_depth > 1) json += std::string(max_depth - 1, ']');
json += "]";
return json;
}
// Struct of Struct nested.
std::string generate_struct_of_structs(int32_t max_depth, int32_t max_rows, std::string elem)
{
if (max_depth <= 0) return "{}";
std::string json;
for (int32_t depth = 0; depth < max_depth / 2; ++depth) {
json += R"({"a)" + num_to_string(depth) + R"(": )";
}
if (max_rows == 0) json += "{}";
for (int32_t row = 0; row < max_rows; ++row) {
json += elem;
if (row < max_rows - 1) {
json += R"(, "a)" + num_to_string(max_depth / 2 - 1) + "_" + num_to_string(row) + R"(": )";
}
}
if (max_depth > 0) json += std::string(max_depth / 2, '}');
return json;
}
// Memoize the generated rows so we don't have to regenerate them.
std::map<std::tuple<int, int, int, int>, std::string> _row_cache;
std::string generate_row(
int num_columns, int max_depth, int max_list_size, int max_struct_size, size_t max_bytes)
{
std::string s = "{";
std::vector<std::string> const elems{
R"(1)", R"(-2)", R"(3.4)", R"("5")", R"("abcdefghij")", R"(true)", R"(null)"};
for (int i = 0; i < num_columns; i++) {
s += R"("col)" + num_to_string(i) + R"(": )";
if (auto it = _row_cache.find({i % 2, max_depth - 2, max_struct_size, i % elems.size()});
it != _row_cache.end()) {
s += it->second;
} else {
auto r =
(i % 2 == 0)
? generate_struct_of_structs(max_depth - 2, max_struct_size, elems[i % elems.size()])
: generate_list_of_lists(max_depth - 2, max_struct_size, elems[i % elems.size()]);
_row_cache[{i % 2, max_depth - 2, max_struct_size, i % elems.size()}] = r;
s += r;
}
if (s.length() > max_bytes) break;
if (i < num_columns - 1) s += ", ";
}
s += "}";
return s;
}
std::string generate_json(int num_rows,
int num_columns,
int max_depth,
int max_list_size,
int max_struct_size,
size_t max_json_bytes)
{
// std::to_string is slow, so we pre-generate all number strings we need.
_num_to_string.clear();
auto max_num_str =
std::max(std::max(num_columns, max_depth), std::max(max_list_size, max_struct_size));
for (int i = 0; i < max_num_str; i++)
_num_to_string.emplace_back(std::to_string(i));
_row_cache.clear();
std::string s = "[\n";
s.reserve(max_json_bytes + 1024);
for (int i = 0; i < num_rows; i++) {
s += generate_row(
num_columns, max_depth - 2, max_list_size, max_struct_size, max_json_bytes - s.length());
if (s.length() > max_json_bytes) break;
if (i != num_rows - 1) s += ",\n";
}
s += "\n]";
return s;
}
auto make_test_json_data(cudf::size_type string_size, rmm::cuda_stream_view stream)
{
// Test input
std::string input = R"(
{"a":1,"b":2,"c":[3], "d": {}},
{"a":1,"b":4.0,"c":[], "d": {"year":1882,"author": "Bharathi"}},
{"a":1,"b":6.0,"c":[5, 7], "d": null},
{"a":1,"b":null,"c":null},
{
"a" : 1
},
{"a":1,"b":Infinity,"c":[null], "d": {"year":-600,"author": "Kaniyan"}},
{"a": 1, "b": 8.0, "d": { "author": "Jean-Jacques Rousseau"}},)";
cudf::size_type const repeat_times = string_size / input.size();
auto d_input_scalar = cudf::make_string_scalar(input, stream);
auto& d_string_scalar = static_cast<cudf::string_scalar&>(*d_input_scalar);
auto d_scalar = cudf::strings::repeat_string(d_string_scalar, repeat_times);
auto data = const_cast<char*>(d_scalar->data());
CUDF_CUDA_TRY(cudaMemsetAsync(data, '[', 1, stream.value()));
CUDF_CUDA_TRY(cudaMemsetAsync(data + d_scalar->size() - 1, ']', 1, stream.value()));
return d_scalar;
}
} // namespace
void BM_NESTED_JSON(nvbench::state& state)
{
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
auto const default_options = cudf::io::json_reader_options{};
auto input = make_test_json_data(string_size, cudf::get_default_stream());
state.add_element_count(input->size());
// Run algorithm
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
cudf::io::json::detail::device_parse_nested_json(
cudf::device_span<char const>{input->data(), static_cast<size_t>(input->size())},
default_options,
cudf::get_default_stream(),
rmm::mr::get_current_device_resource());
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(string_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
}
NVBENCH_BENCH(BM_NESTED_JSON)
.set_name("nested_json_gpu_parser")
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 1));
void BM_NESTED_JSON_DEPTH(nvbench::state& state)
{
auto const string_size{cudf::size_type(state.get_int64("string_size"))};
auto const depth{cudf::size_type(state.get_int64("depth"))};
auto d_scalar = cudf::string_scalar(
generate_json(100'000'000, 10, depth, 10, 10, string_size), true, cudf::get_default_stream());
auto input = cudf::device_span<char const>(d_scalar.data(), d_scalar.size());
state.add_element_count(input.size());
auto const default_options = cudf::io::json_reader_options{};
// Run algorithm
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
// Allocate device-side temporary storage & run algorithm
cudf::io::json::detail::device_parse_nested_json(
input, default_options, cudf::get_default_stream(), rmm::mr::get_current_device_resource());
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(string_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
}
NVBENCH_BENCH(BM_NESTED_JSON_DEPTH)
.set_name("nested_json_gpu_parser_depth")
.add_int64_power_of_two_axis("depth", nvbench::range(1, 4, 1))
.add_int64_power_of_two_axis("string_size", nvbench::range(20, 30, 2));
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/json/json_reader_input.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/json.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr size_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
void json_read_common(cuio_source_sink_pair& source_sink, nvbench::state& state)
{
cudf::io::json_reader_options read_opts =
cudf::io::json_reader_options::builder(source_sink.make_source_info());
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::io::read_json(read_opts);
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
template <cudf::io::io_type IO>
void BM_json_read_io(nvbench::state& state, nvbench::type_list<nvbench::enum_type<IO>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
auto const source_type = IO;
cuio_source_sink_pair source_sink(source_type);
{
auto const tbl = create_random_table(
cycle_dtypes(d_type, num_cols), table_size_bytes{data_size}, data_profile_builder());
auto const view = tbl->view();
cudf::io::json_writer_options const write_opts =
cudf::io::json_writer_options::builder(source_sink.make_sink_info(), view)
.na_rep("null")
.rows_per_chunk(100'000);
cudf::io::write_json(write_opts);
}
json_read_common(source_sink, state);
}
template <data_type DataType, cudf::io::io_type IO>
void BM_json_read_data_type(
nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IO>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
auto const source_type = IO;
cuio_source_sink_pair source_sink(source_type);
{
auto const tbl = create_random_table(
cycle_dtypes(d_type, num_cols), table_size_bytes{data_size}, data_profile_builder());
auto const view = tbl->view();
cudf::io::json_writer_options const write_opts =
cudf::io::json_writer_options::builder(source_sink.make_sink_info(), view)
.na_rep("null")
.rows_per_chunk(100'000);
cudf::io::write_json(write_opts);
}
json_read_common(source_sink, state);
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::DURATION,
data_type::STRING,
data_type::LIST,
data_type::STRUCT>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::DEVICE_BUFFER>;
using compression_list =
nvbench::enum_type_list<cudf::io::compression_type::SNAPPY, cudf::io::compression_type::NONE>;
NVBENCH_BENCH_TYPES(BM_json_read_data_type,
NVBENCH_TYPE_AXES(d_type_list,
nvbench::enum_type_list<cudf::io::io_type::DEVICE_BUFFER>))
.set_name("json_read_data_type")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4);
NVBENCH_BENCH_TYPES(BM_json_read_io, NVBENCH_TYPE_AXES(io_list))
.set_name("json_read_io")
.set_type_axes_names({"io"})
.set_min_samples(4);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/csv/csv_reader_options.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
constexpr size_t data_size = 256 << 20;
template <column_selection ColSelection, row_selection RowSelection>
void BM_csv_read_varying_options(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<ColSelection>, nvbench::enum_type<RowSelection>>)
{
auto const data_types =
dtypes_for_column_selection(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING)}),
ColSelection);
auto const cols_to_read = select_column_indexes(data_types.size(), ColSelection);
auto const num_chunks = state.get_int64("num_chunks");
auto const tbl = create_random_table(data_types, table_size_bytes{data_size});
auto const view = tbl->view();
cuio_source_sink_pair source_sink(io_type::HOST_BUFFER);
cudf::io::csv_writer_options options =
cudf::io::csv_writer_options::builder(source_sink.make_sink_info(), view)
.include_header(true)
.line_terminator("\r\n");
cudf::io::write_csv(options);
cudf::io::csv_reader_options read_options =
cudf::io::csv_reader_options::builder(source_sink.make_source_info())
.use_cols_indexes(cols_to_read)
.thousands('\'')
.windowslinetermination(true)
.comment('#')
.prefix("BM_");
size_t const chunk_size = source_sink.size() / num_chunks;
cudf::size_type const chunk_row_cnt = view.num_rows() / num_chunks;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache(); // Drop L3 cache for accurate measurement
timer.start();
for (int32_t chunk = 0; chunk < num_chunks; ++chunk) {
// only read the header in the first chunk
read_options.set_header(chunk == 0 ? 0 : -1);
auto const is_last_chunk = chunk == (num_chunks - 1);
switch (RowSelection) {
case row_selection::ALL: break;
case row_selection::BYTE_RANGE:
read_options.set_byte_range_offset(chunk * chunk_size);
read_options.set_byte_range_size(chunk_size);
if (is_last_chunk) read_options.set_byte_range_size(0);
break;
case row_selection::NROWS:
read_options.set_skiprows(chunk * chunk_row_cnt);
read_options.set_nrows(chunk_row_cnt);
if (is_last_chunk) read_options.set_nrows(-1);
break;
case row_selection::SKIPFOOTER:
read_options.set_skiprows(chunk * chunk_row_cnt);
read_options.set_skipfooter(view.num_rows() - (chunk + 1) * chunk_row_cnt);
if (is_last_chunk) read_options.set_skipfooter(0);
break;
default: CUDF_FAIL("Unsupported row selection method");
}
cudf::io::read_csv(read_options);
}
timer.stop();
});
auto const elapsed_time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
auto const data_processed = data_size * cols_to_read.size() / view.num_columns();
state.add_element_count(static_cast<double>(data_processed) / elapsed_time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
using col_selections = nvbench::enum_type_list<column_selection::ALL,
column_selection::ALTERNATE,
column_selection::FIRST_HALF,
column_selection::SECOND_HALF>;
using row_selections = nvbench::
enum_type_list<row_selection::BYTE_RANGE, row_selection::NROWS, row_selection::SKIPFOOTER>;
NVBENCH_BENCH_TYPES(BM_csv_read_varying_options,
NVBENCH_TYPE_AXES(col_selections, nvbench::enum_type_list<row_selection::ALL>))
.set_name("csv_read_column_selection")
.set_type_axes_names({"column_selection", "row_selection"})
.set_min_samples(4)
.add_int64_axis("num_chunks", {1});
NVBENCH_BENCH_TYPES(BM_csv_read_varying_options,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<column_selection::ALL>,
row_selections))
.set_name("csv_read_row_selection")
.set_type_axes_names({"column_selection", "row_selection"})
.set_min_samples(4)
.add_int64_axis("num_chunks", {1, 8});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/csv/csv_writer.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/csv.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr size_t data_size = 256 << 20;
constexpr cudf::size_type num_cols = 64;
template <data_type DataType, cudf::io::io_type IO>
void BM_csv_write_dtype_io(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IO>>)
{
auto const data_types = get_type_or_group(static_cast<int32_t>(DataType));
auto const sink_type = IO;
auto const tbl =
create_random_table(cycle_dtypes(data_types, num_cols), table_size_bytes{data_size});
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(sink_type);
timer.start();
cudf::io::csv_writer_options options =
cudf::io::csv_writer_options::builder(source_sink.make_sink_info(), view);
cudf::io::write_csv(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
void BM_csv_write_varying_options(nvbench::state& state)
{
auto const na_per_len = state.get_int64("na_per_len");
auto const rows_per_chunk = state.get_int64("rows_per_chunk");
auto const data_types = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING)});
auto const tbl = create_random_table(data_types, table_size_bytes{data_size});
auto const view = tbl->view();
std::string const na_per(na_per_len, '#');
std::size_t encoded_file_size = 0;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::HOST_BUFFER);
timer.start();
cudf::io::csv_writer_options options =
cudf::io::csv_writer_options::builder(source_sink.make_sink_info(), view)
.na_rep(na_per)
.rows_per_chunk(rows_per_chunk);
cudf::io::write_csv(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::DURATION,
data_type::STRING>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::VOID>;
NVBENCH_BENCH_TYPES(BM_csv_write_dtype_io, NVBENCH_TYPE_AXES(d_type_list, io_list))
.set_name("csv_write_dtype_io")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4);
NVBENCH_BENCH(BM_csv_write_varying_options)
.set_name("csv_write_options")
.set_min_samples(4)
.add_int64_axis("na_per_len", {0, 16})
.add_int64_power_of_two_axis("rows_per_chunk", nvbench::range(8, 20, 2));
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/csv/csv_reader_input.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
constexpr size_t data_size = 256 << 20;
constexpr cudf::size_type num_cols = 64;
template <typename DataType>
void csv_read_common(DataType const& data_types,
cudf::io::io_type const& source_type,
nvbench::state& state)
{
auto const tbl =
create_random_table(cycle_dtypes(data_types, num_cols), table_size_bytes{data_size});
auto const view = tbl->view();
cuio_source_sink_pair source_sink(source_type);
cudf::io::csv_writer_options options =
cudf::io::csv_writer_options::builder(source_sink.make_sink_info(), view).include_header(true);
cudf::io::write_csv(options);
cudf::io::csv_reader_options const read_options =
cudf::io::csv_reader_options::builder(source_sink.make_source_info());
auto const mem_stats_logger = cudf::memory_stats_logger(); // init stats logger
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache(); // Drop L3 cache for accurate measurement
timer.start();
cudf::io::read_csv(read_options);
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
template <data_type DataType, cudf::io::io_type IOType>
void BM_csv_read_input(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IOType>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
auto const source_type = IOType;
csv_read_common(d_type, source_type, state);
}
template <cudf::io::io_type IOType>
void BM_csv_read_io(nvbench::state& state, nvbench::type_list<nvbench::enum_type<IOType>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING)});
auto const source_type = IOType;
csv_read_common(d_type, source_type, state);
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::DURATION,
data_type::STRING>;
using io_list =
nvbench::enum_type_list<cudf::io::io_type::FILEPATH, cudf::io::io_type::HOST_BUFFER>;
NVBENCH_BENCH_TYPES(BM_csv_read_input,
NVBENCH_TYPE_AXES(d_type_list,
nvbench::enum_type_list<cudf::io::io_type::DEVICE_BUFFER>))
.set_name("csv_read_data_type")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4);
NVBENCH_BENCH_TYPES(BM_csv_read_io, NVBENCH_TYPE_AXES(io_list))
.set_name("csv_read_io")
.set_type_axes_names({"io"})
.set_min_samples(4);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/orc/orc_reader_options.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/io/orc.hpp>
#include <cudf/io/orc_metadata.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr int64_t data_size = 512 << 20;
// The number of separate read calls to use when reading files in multiple chunks
// Each call reads roughly equal amounts of data
constexpr int32_t chunked_read_num_chunks = 4;
std::vector<std::string> get_top_level_col_names(cudf::io::source_info const& source)
{
auto const top_lvl_cols = cudf::io::read_orc_metadata(source).schema().root().children();
std::vector<std::string> col_names;
std::transform(top_lvl_cols.cbegin(),
top_lvl_cols.cend(),
std::back_inserter(col_names),
[](auto const& col_meta) { return col_meta.name(); });
return col_names;
}
template <column_selection ColSelection,
row_selection RowSelection,
uses_index UsesIndex,
uses_numpy_dtype UsesNumpyDType,
cudf::type_id Timestamp>
void BM_orc_read_varying_options(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<ColSelection>,
nvbench::enum_type<RowSelection>,
nvbench::enum_type<UsesIndex>,
nvbench::enum_type<UsesNumpyDType>,
nvbench::enum_type<Timestamp>>)
{
auto const num_chunks = RowSelection == row_selection::ALL ? 1 : chunked_read_num_chunks;
auto const use_index = UsesIndex == uses_index::YES;
auto const use_np_dtypes = UsesNumpyDType == uses_numpy_dtype::YES;
auto const ts_type = cudf::data_type{Timestamp};
// skip_rows is not supported on nested types
auto const data_types =
dtypes_for_column_selection(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING)}),
ColSelection);
auto const tbl = create_random_table(data_types, table_size_bytes{data_size});
auto const view = tbl->view();
cuio_source_sink_pair source_sink(io_type::HOST_BUFFER);
cudf::io::orc_writer_options options =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view);
cudf::io::write_orc(options);
auto const cols_to_read =
select_column_names(get_top_level_col_names(source_sink.make_source_info()), ColSelection);
cudf::io::orc_reader_options read_options =
cudf::io::orc_reader_options::builder(source_sink.make_source_info())
.columns(cols_to_read)
.use_index(use_index)
.use_np_dtypes(use_np_dtypes)
.timestamp_type(ts_type);
auto const num_stripes =
cudf::io::read_orc_metadata(source_sink.make_source_info()).num_stripes();
auto const chunk_row_cnt = cudf::util::div_rounding_up_unsafe(view.num_rows(), num_chunks);
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(
nvbench::exec_tag::sync | nvbench::exec_tag::timer, [&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::size_type rows_read = 0;
for (int32_t chunk = 0; chunk < num_chunks; ++chunk) {
switch (RowSelection) {
case row_selection::ALL: break;
case row_selection::STRIPES:
read_options.set_stripes({segments_in_chunk(num_stripes, num_chunks, chunk)});
break;
case row_selection::NROWS:
read_options.set_skip_rows(chunk * chunk_row_cnt);
read_options.set_num_rows(chunk_row_cnt);
break;
default: CUDF_FAIL("Unsupported row selection method");
}
rows_read += cudf::io::read_orc(read_options).tbl->num_rows();
}
CUDF_EXPECTS(rows_read == view.num_rows(), "Benchmark did not read the entire table");
timer.stop();
});
auto const elapsed_time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
auto const data_processed = data_size * cols_to_read.size() / view.num_columns();
state.add_element_count(static_cast<double>(data_processed) / elapsed_time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
using col_selections = nvbench::enum_type_list<column_selection::ALL,
column_selection::ALTERNATE,
column_selection::FIRST_HALF,
column_selection::SECOND_HALF>;
NVBENCH_BENCH_TYPES(BM_orc_read_varying_options,
NVBENCH_TYPE_AXES(col_selections,
nvbench::enum_type_list<row_selection::ALL>,
nvbench::enum_type_list<uses_index::YES>,
nvbench::enum_type_list<uses_numpy_dtype::YES>,
nvbench::enum_type_list<cudf::type_id::EMPTY>))
.set_name("orc_read_column_selection")
.set_type_axes_names(
{"column_selection", "row_selection", "uses_index", "uses_numpy_dtype", "timestamp_type"})
.set_min_samples(4);
using row_selections =
nvbench::enum_type_list<row_selection::ALL, row_selection::NROWS, row_selection::STRIPES>;
NVBENCH_BENCH_TYPES(BM_orc_read_varying_options,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<column_selection::ALL>,
row_selections,
nvbench::enum_type_list<uses_index::YES>,
nvbench::enum_type_list<uses_numpy_dtype::YES>,
nvbench::enum_type_list<cudf::type_id::EMPTY>))
.set_name("orc_read_row_selection")
.set_type_axes_names(
{"column_selection", "row_selection", "uses_index", "uses_numpy_dtype", "timestamp_type"})
.set_min_samples(4);
NVBENCH_BENCH_TYPES(
BM_orc_read_varying_options,
NVBENCH_TYPE_AXES(
nvbench::enum_type_list<column_selection::ALL>,
nvbench::enum_type_list<row_selection::ALL>,
nvbench::enum_type_list<uses_index::YES, uses_index::NO>,
nvbench::enum_type_list<uses_numpy_dtype::YES, uses_numpy_dtype::NO>,
nvbench::enum_type_list<cudf::type_id::EMPTY, cudf::type_id::TIMESTAMP_NANOSECONDS>))
.set_name("orc_read_misc_options")
.set_type_axes_names(
{"column_selection", "row_selection", "uses_index", "uses_numpy_dtype", "timestamp_type"})
.set_min_samples(4);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/orc/orc_writer.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/orc.hpp>
#include <cudf/io/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::io::statistics_freq,
[](auto value) {
switch (value) {
case cudf::io::statistics_freq::STATISTICS_NONE: return "STATISTICS_NONE";
case cudf::io::statistics_freq::STATISTICS_ROWGROUP: return "ORC_STATISTICS_STRIPE";
case cudf::io::statistics_freq::STATISTICS_PAGE: return "ORC_STATISTICS_ROW_GROUP";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr int64_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
template <data_type DataType>
void BM_orc_write_encode(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = cudf::io::compression_type::SNAPPY;
auto const sink_type = io_type::VOID;
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(sink_type);
timer.start();
cudf::io::orc_writer_options options =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
cudf::io::write_orc(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
template <cudf::io::io_type IO, cudf::io::compression_type Compression>
void BM_orc_write_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = Compression;
auto const sink_type = IO;
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(sink_type);
timer.start();
cudf::io::orc_writer_options options =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
cudf::io::write_orc(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
template <cudf::io::statistics_freq Statistics, cudf::io::compression_type Compression>
void BM_orc_write_statistics(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<Statistics>, nvbench::enum_type<Compression>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST)});
auto const compression = Compression;
auto const stats_freq = Statistics;
auto const tbl = create_random_table(d_type, table_size_bytes{data_size});
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::FILEPATH);
timer.start();
cudf::io::orc_writer_options const options =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression)
.enable_statistics(stats_freq);
cudf::io::write_orc(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL_SIGNED,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::STRING,
data_type::LIST,
data_type::STRUCT>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::VOID>;
using compression_list =
nvbench::enum_type_list<cudf::io::compression_type::SNAPPY, cudf::io::compression_type::NONE>;
using stats_list = nvbench::enum_type_list<cudf::io::STATISTICS_NONE,
cudf::io::ORC_STATISTICS_STRIPE,
cudf::io::ORC_STATISTICS_ROW_GROUP>;
NVBENCH_BENCH_TYPES(BM_orc_write_encode, NVBENCH_TYPE_AXES(d_type_list))
.set_name("orc_write_encode")
.set_type_axes_names({"data_type"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_orc_write_io_compression, NVBENCH_TYPE_AXES(io_list, compression_list))
.set_name("orc_write_io_compression")
.set_type_axes_names({"io", "compression"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_orc_write_statistics, NVBENCH_TYPE_AXES(stats_list, compression_list))
.set_name("orc_write_statistics")
.set_type_axes_names({"statistics", "compression"})
.set_min_samples(4);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/orc/orc_writer_chunks.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/column/column.hpp>
#include <cudf/io/orc.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/iterator/transform_iterator.h>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr int64_t data_size = 512 << 20;
void nvbench_orc_write(nvbench::state& state)
{
cudf::size_type num_cols = state.get_int64("num_columns");
auto tbl = create_random_table(
cycle_dtypes(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::STRUCT),
static_cast<int32_t>(data_type::LIST)}),
num_cols),
table_size_bytes{data_size});
cudf::table_view view = tbl->view();
auto mem_stats_logger = cudf::memory_stats_logger();
state.add_global_memory_reads<int64_t>(data_size);
state.add_element_count(view.num_columns() * view.num_rows());
size_t encoded_file_size = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::VOID);
timer.start();
cudf::io::orc_writer_options opts =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view);
cudf::io::write_orc(opts);
timer.stop();
encoded_file_size = source_sink.size();
});
state.add_buffer_size(mem_stats_logger.peak_memory_usage(), "pmu", "Peak Memory Usage");
state.add_buffer_size(encoded_file_size, "efs", "Encoded File Size");
state.add_element_count(view.num_rows(), "Total Rows");
}
void nvbench_orc_chunked_write(nvbench::state& state)
{
cudf::size_type num_cols = state.get_int64("num_columns");
cudf::size_type num_tables = state.get_int64("num_chunks");
std::vector<std::unique_ptr<cudf::table>> tables;
for (cudf::size_type idx = 0; idx < num_tables; idx++) {
tables.push_back(create_random_table(
cycle_dtypes(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::STRUCT),
static_cast<int32_t>(data_type::LIST)}),
num_cols),
table_size_bytes{size_t(data_size / num_tables)}));
}
auto mem_stats_logger = cudf::memory_stats_logger();
auto size_iter = thrust::make_transform_iterator(
tables.begin(), [](auto const& i) { return i->num_columns() * i->num_rows(); });
auto row_count_iter =
thrust::make_transform_iterator(tables.begin(), [](auto const& i) { return i->num_rows(); });
auto total_elements = std::accumulate(size_iter, size_iter + num_tables, 0);
auto total_rows = std::accumulate(row_count_iter, row_count_iter + num_tables, 0);
state.add_global_memory_reads<int64_t>(data_size);
state.add_element_count(total_elements);
size_t encoded_file_size = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(
nvbench::exec_tag::timer | nvbench::exec_tag::sync, [&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::VOID);
timer.start();
cudf::io::chunked_orc_writer_options opts =
cudf::io::chunked_orc_writer_options::builder(source_sink.make_sink_info());
cudf::io::orc_chunked_writer writer(opts);
std::for_each(tables.begin(),
tables.end(),
[&writer](std::unique_ptr<cudf::table> const& tbl) { writer.write(*tbl); });
writer.close();
timer.stop();
encoded_file_size = source_sink.size();
});
state.add_buffer_size(mem_stats_logger.peak_memory_usage(), "pmu", "Peak Memory Usage");
state.add_buffer_size(encoded_file_size, "efs", "Encoded File Size");
state.add_element_count(total_rows, "Total Rows");
}
NVBENCH_BENCH(nvbench_orc_write)
.set_name("orc_write")
.set_min_samples(4)
.add_int64_axis("num_columns", {8, 64});
NVBENCH_BENCH(nvbench_orc_chunked_write)
.set_name("orc_chunked_write")
.set_min_samples(4)
.add_int64_axis("num_columns", {8, 64})
.add_int64_axis("num_chunks", {8, 64});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/orc/orc_reader_input.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/orc.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr int64_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
void orc_read_common(cudf::io::orc_writer_options const& opts,
cuio_source_sink_pair& source_sink,
nvbench::state& state)
{
cudf::io::write_orc(opts);
cudf::io::orc_reader_options read_opts =
cudf::io::orc_reader_options::builder(source_sink.make_source_info());
auto mem_stats_logger = cudf::memory_stats_logger(); // init stats logger
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::io::read_orc(read_opts);
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
template <data_type DataType, cudf::io::io_type IOType>
void BM_orc_read_data(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IOType>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(IOType);
cudf::io::orc_writer_options opts =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view);
orc_read_common(opts, source_sink, state);
}
template <cudf::io::io_type IOType, cudf::io::compression_type Compression>
void BM_orc_read_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IOType>, nvbench::enum_type<Compression>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(IOType);
cudf::io::orc_writer_options opts =
cudf::io::orc_writer_options::builder(source_sink.make_sink_info(), view)
.compression(Compression);
orc_read_common(opts, source_sink, state);
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL_SIGNED,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::STRING,
data_type::LIST,
data_type::STRUCT>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::DEVICE_BUFFER>;
using compression_list =
nvbench::enum_type_list<cudf::io::compression_type::SNAPPY, cudf::io::compression_type::NONE>;
NVBENCH_BENCH_TYPES(BM_orc_read_data,
NVBENCH_TYPE_AXES(d_type_list,
nvbench::enum_type_list<cudf::io::io_type::DEVICE_BUFFER>))
.set_name("orc_read_decode")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_orc_read_io_compression, NVBENCH_TYPE_AXES(io_list, compression_list))
.set_name("orc_read_io_compression")
.set_type_axes_names({"io", "compression"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/text/multibyte_split.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/file_utilities.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/utilities/pinned_host_vector.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/text/data_chunk_source_factories.hpp>
#include <cudf/io/text/detail/bgzip_utils.hpp>
#include <cudf/io/text/multibyte_split.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <thrust/transform.h>
#include <nvbench/nvbench.cuh>
#include <cstdio>
#include <fstream>
#include <memory>
#include <random>
temp_directory const temp_dir("cudf_nvbench");
enum class data_chunk_source_type { device, file, file_datasource, host, host_pinned, file_bgzip };
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
data_chunk_source_type,
[](auto value) {
switch (value) {
case data_chunk_source_type::device: return "device";
case data_chunk_source_type::file: return "file";
case data_chunk_source_type::file_datasource: return "file_datasource";
case data_chunk_source_type::host: return "host";
case data_chunk_source_type::host_pinned: return "host_pinned";
case data_chunk_source_type::file_bgzip: return "file_bgzip";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
static cudf::string_scalar create_random_input(int32_t num_chars,
double delim_factor,
double deviation,
std::string delim)
{
auto const num_delims = static_cast<int32_t>((num_chars * delim_factor) / delim.size());
auto const num_delim_chars = num_delims * delim.size();
auto const num_value_chars = num_chars - num_delim_chars;
auto const num_rows = num_delims;
auto const value_size_avg = static_cast<int32_t>(num_value_chars / num_rows);
auto const value_size_min = static_cast<int32_t>(value_size_avg * (1 - deviation));
auto const value_size_max = static_cast<int32_t>(value_size_avg * (1 + deviation));
data_profile const table_profile = data_profile_builder().distribution(
cudf::type_id::STRING, distribution_id::NORMAL, value_size_min, value_size_max);
auto const values =
create_random_column(cudf::type_id::STRING, row_count{num_rows}, table_profile);
auto delim_scalar = cudf::make_string_scalar(delim);
auto delims_column = cudf::make_column_from_scalar(*delim_scalar, num_rows);
auto input_table = cudf::table_view({values->view(), delims_column->view()});
auto input_column = cudf::strings::concatenate(input_table);
// extract the chars from the returned strings column.
auto input_column_contents = input_column->release();
auto chars_column_contents = input_column_contents.children[1]->release();
auto chars_buffer = chars_column_contents.data.release();
// turn the chars in to a string scalar.
return cudf::string_scalar(std::move(*chars_buffer));
}
static void write_bgzip_file(cudf::host_span<char const> host_data, std::ostream& output_stream)
{
// a bit of variability with a decent amount of padding so we don't overflow 16 bit block sizes
std::uniform_int_distribution<std::size_t> chunk_size_dist{64000, 65000};
std::default_random_engine rng{};
std::size_t pos = 0;
while (pos < host_data.size()) {
auto const remainder = host_data.size() - pos;
auto const chunk_size = std::min(remainder, chunk_size_dist(rng));
cudf::io::text::detail::bgzip::write_compressed_block(output_stream,
{host_data.data() + pos, chunk_size});
pos += chunk_size;
}
// empty block denotes EOF
cudf::io::text::detail::bgzip::write_uncompressed_block(output_stream, {});
}
template <data_chunk_source_type source_type>
static void bench_multibyte_split(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<source_type>>)
{
auto const delim_size = state.get_int64("delim_size");
auto const delim_percent = state.get_int64("delim_percent");
auto const file_size_approx = state.get_int64("size_approx");
auto const byte_range_percent = state.get_int64("byte_range_percent");
auto const strip_delimiters = bool(state.get_int64("strip_delimiters"));
auto const byte_range_factor = static_cast<double>(byte_range_percent) / 100;
CUDF_EXPECTS(delim_percent >= 1, "delimiter percent must be at least 1");
CUDF_EXPECTS(delim_percent <= 50, "delimiter percent must be at most 50");
CUDF_EXPECTS(byte_range_percent >= 1, "byte range percent must be at least 1");
CUDF_EXPECTS(byte_range_percent <= 100, "byte range percent must be at most 100");
auto delim = std::string(delim_size, '0');
// the algorithm can only support 7 equal characters, so use different chars in the delimiter
std::iota(delim.begin(), delim.end(), '1');
auto const delim_factor = static_cast<double>(delim_percent) / 100;
std::unique_ptr<cudf::io::datasource> datasource;
auto device_input = create_random_input(file_size_approx, delim_factor, 0.05, delim);
auto host_input = std::vector<char>{};
auto host_pinned_input = cudf::detail::pinned_host_vector<char>{};
if (source_type != data_chunk_source_type::device &&
source_type != data_chunk_source_type::host_pinned) {
host_input = cudf::detail::make_std_vector_sync<char>(
{device_input.data(), static_cast<std::size_t>(device_input.size())},
cudf::get_default_stream());
}
if (source_type == data_chunk_source_type::host_pinned) {
host_pinned_input.resize(static_cast<std::size_t>(device_input.size()));
CUDF_CUDA_TRY(cudaMemcpy(
host_pinned_input.data(), device_input.data(), host_pinned_input.size(), cudaMemcpyDefault));
}
auto source = [&] {
switch (source_type) {
case data_chunk_source_type::file:
case data_chunk_source_type::file_datasource: {
auto const temp_file_name = random_file_in_dir(temp_dir.path());
std::ofstream(temp_file_name, std::ofstream::out)
.write(host_input.data(), host_input.size());
if (source_type == data_chunk_source_type::file) {
return cudf::io::text::make_source_from_file(temp_file_name);
} else {
datasource = cudf::io::datasource::create(temp_file_name);
return cudf::io::text::make_source(*datasource);
}
}
case data_chunk_source_type::host: //
return cudf::io::text::make_source(host_input);
case data_chunk_source_type::host_pinned:
return cudf::io::text::make_source(host_pinned_input);
case data_chunk_source_type::device: //
return cudf::io::text::make_source(device_input);
case data_chunk_source_type::file_bgzip: {
auto const temp_file_name = random_file_in_dir(temp_dir.path());
{
std::ofstream output_stream(temp_file_name, std::ofstream::out);
write_bgzip_file(host_input, output_stream);
}
return cudf::io::text::make_source_from_bgzip_file(temp_file_name);
}
default: CUDF_FAIL();
}
}();
auto mem_stats_logger = cudf::memory_stats_logger();
auto const range_size = static_cast<int64_t>(device_input.size() * byte_range_factor);
auto const range_offset = (device_input.size() - range_size) / 2;
cudf::io::text::byte_range_info range{range_offset, range_size};
cudf::io::text::parse_options options{range, strip_delimiters};
std::unique_ptr<cudf::column> output;
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync, [&](nvbench::launch& launch) {
try_drop_l3_cache();
output = cudf::io::text::multibyte_split(*source, delim, options);
});
state.add_buffer_size(mem_stats_logger.peak_memory_usage(), "pmu", "Peak Memory Usage");
// TODO adapt to consistent naming scheme once established
state.add_buffer_size(range_size, "efs", "Encoded file size");
}
using source_type_list = nvbench::enum_type_list<data_chunk_source_type::device,
data_chunk_source_type::file,
data_chunk_source_type::file_datasource,
data_chunk_source_type::host,
data_chunk_source_type::host_pinned,
data_chunk_source_type::file_bgzip>;
NVBENCH_BENCH_TYPES(bench_multibyte_split,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<data_chunk_source_type::file>))
.set_name("multibyte_split_delimiters")
.set_min_samples(4)
.add_int64_axis("strip_delimiters", {0, 1})
.add_int64_axis("delim_size", {1, 4, 7})
.add_int64_axis("delim_percent", {1, 25})
.add_int64_power_of_two_axis("size_approx", {15})
.add_int64_axis("byte_range_percent", {50});
NVBENCH_BENCH_TYPES(bench_multibyte_split, NVBENCH_TYPE_AXES(source_type_list))
.set_name("multibyte_split_source")
.set_min_samples(4)
.add_int64_axis("strip_delimiters", {1})
.add_int64_axis("delim_size", {1})
.add_int64_axis("delim_percent", {1})
.add_int64_power_of_two_axis("size_approx", {15, 30})
.add_int64_axis("byte_range_percent", {10, 100});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/parquet/parquet_reader_options.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr std::size_t data_size = 512 << 20;
// The number of separate read calls to use when reading files in multiple chunks
// Each call reads roughly equal amounts of data
constexpr int32_t chunked_read_num_chunks = 4;
std::vector<std::string> get_top_level_col_names(cudf::io::source_info const& source)
{
auto const top_lvl_cols = cudf::io::read_parquet_metadata(source).schema().root().children();
std::vector<std::string> col_names;
std::transform(top_lvl_cols.cbegin(),
top_lvl_cols.cend(),
std::back_inserter(col_names),
[](auto const& col_meta) { return col_meta.name(); });
return col_names;
}
template <column_selection ColSelection,
row_selection RowSelection,
converts_strings ConvertsStrings,
uses_pandas_metadata UsesPandasMetadata,
cudf::type_id Timestamp>
void BM_parquet_read_options(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<ColSelection>,
nvbench::enum_type<RowSelection>,
nvbench::enum_type<ConvertsStrings>,
nvbench::enum_type<UsesPandasMetadata>,
nvbench::enum_type<Timestamp>>)
{
auto const num_chunks = RowSelection == row_selection::ALL ? 1 : chunked_read_num_chunks;
auto constexpr str_to_categories = ConvertsStrings == converts_strings::YES;
auto constexpr uses_pd_metadata = UsesPandasMetadata == uses_pandas_metadata::YES;
auto const ts_type = cudf::data_type{Timestamp};
auto const data_types =
dtypes_for_column_selection(get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)}),
ColSelection);
auto const tbl = create_random_table(data_types, table_size_bytes{data_size});
auto const view = tbl->view();
cuio_source_sink_pair source_sink(io_type::HOST_BUFFER);
cudf::io::parquet_writer_options options =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view);
cudf::io::write_parquet(options);
auto const cols_to_read =
select_column_names(get_top_level_col_names(source_sink.make_source_info()), ColSelection);
cudf::io::parquet_reader_options read_options =
cudf::io::parquet_reader_options::builder(source_sink.make_source_info())
.columns(cols_to_read)
.convert_strings_to_categories(str_to_categories)
.use_pandas_metadata(uses_pd_metadata)
.timestamp_type(ts_type);
auto const num_row_groups = read_parquet_metadata(source_sink.make_source_info()).num_rowgroups();
auto const chunk_row_cnt = cudf::util::div_rounding_up_unsafe(view.num_rows(), num_chunks);
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(
nvbench::exec_tag::sync | nvbench::exec_tag::timer, [&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::size_type rows_read = 0;
for (int32_t chunk = 0; chunk < num_chunks; ++chunk) {
switch (RowSelection) {
case row_selection::ALL: break;
case row_selection::ROW_GROUPS: {
read_options.set_row_groups({segments_in_chunk(num_row_groups, num_chunks, chunk)});
} break;
case row_selection::NROWS:
read_options.set_skip_rows(chunk * chunk_row_cnt);
read_options.set_num_rows(chunk_row_cnt);
break;
default: CUDF_FAIL("Unsupported row selection method");
}
rows_read += cudf::io::read_parquet(read_options).tbl->num_rows();
}
CUDF_EXPECTS(rows_read == view.num_rows(), "Benchmark did not read the entire table");
timer.stop();
});
auto const elapsed_time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
auto const data_processed = data_size * cols_to_read.size() / view.num_columns();
state.add_element_count(static_cast<double>(data_processed) / elapsed_time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
using row_selections =
nvbench::enum_type_list<row_selection::ALL, row_selection::NROWS, row_selection::ROW_GROUPS>;
NVBENCH_BENCH_TYPES(BM_parquet_read_options,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<column_selection::ALL>,
row_selections,
nvbench::enum_type_list<converts_strings::YES>,
nvbench::enum_type_list<uses_pandas_metadata::YES>,
nvbench::enum_type_list<cudf::type_id::EMPTY>))
.set_name("parquet_read_row_selection")
.set_type_axes_names({"column_selection",
"row_selection",
"str_to_categories",
"uses_pandas_metadata",
"timestamp_type"})
.set_min_samples(4);
using col_selections = nvbench::enum_type_list<column_selection::ALL,
column_selection::ALTERNATE,
column_selection::FIRST_HALF,
column_selection::SECOND_HALF>;
NVBENCH_BENCH_TYPES(BM_parquet_read_options,
NVBENCH_TYPE_AXES(col_selections,
nvbench::enum_type_list<row_selection::ALL>,
nvbench::enum_type_list<converts_strings::YES>,
nvbench::enum_type_list<uses_pandas_metadata::YES>,
nvbench::enum_type_list<cudf::type_id::EMPTY>))
.set_name("parquet_read_column_selection")
.set_type_axes_names({"column_selection",
"row_selection",
"str_to_categories",
"uses_pandas_metadata",
"timestamp_type"})
.set_min_samples(4);
NVBENCH_BENCH_TYPES(
BM_parquet_read_options,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<column_selection::ALL>,
nvbench::enum_type_list<row_selection::ALL>,
nvbench::enum_type_list<converts_strings::YES, converts_strings::NO>,
nvbench::enum_type_list<uses_pandas_metadata::YES, uses_pandas_metadata::NO>,
nvbench::enum_type_list<cudf::type_id::EMPTY>))
.set_name("parquet_read_misc_options")
.set_type_axes_names({"column_selection",
"row_selection",
"str_to_categories",
"uses_pandas_metadata",
"timestamp_type"})
.set_min_samples(4);
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/parquet/parquet_writer.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
NVBENCH_DECLARE_ENUM_TYPE_STRINGS(
cudf::io::statistics_freq,
[](auto value) {
switch (value) {
case cudf::io::statistics_freq::STATISTICS_NONE: return "STATISTICS_NONE";
case cudf::io::statistics_freq::STATISTICS_ROWGROUP: return "STATISTICS_ROWGROUP";
case cudf::io::statistics_freq::STATISTICS_PAGE: return "STATISTICS_PAGE";
case cudf::io::statistics_freq::STATISTICS_COLUMN: return "STATISTICS_COLUMN";
default: return "Unknown";
}
},
[](auto) { return std::string{}; })
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr size_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
template <data_type DataType>
void BM_parq_write_encode(nvbench::state& state, nvbench::type_list<nvbench::enum_type<DataType>>)
{
auto const data_types = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = cudf::io::compression_type::SNAPPY;
auto const sink_type = io_type::VOID;
auto const tbl =
create_random_table(cycle_dtypes(data_types, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(sink_type);
timer.start();
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
cudf::io::write_parquet(opts);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
template <cudf::io::io_type IO, cudf::io::compression_type Compression>
void BM_parq_write_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IO>, nvbench::enum_type<Compression>>)
{
auto const data_types = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = Compression;
auto const sink_type = IO;
auto const tbl =
create_random_table(cycle_dtypes(data_types, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(sink_type);
timer.start();
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
cudf::io::write_parquet(opts);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
template <cudf::io::statistics_freq Statistics, cudf::io::compression_type Compression>
void BM_parq_write_varying_options(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<Statistics>, nvbench::enum_type<Compression>>)
{
auto const enable_stats = Statistics;
auto const compression = Compression;
auto const file_path = state.get_string("file_path");
auto const data_types = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL_SIGNED),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST)});
auto const tbl = create_random_table(data_types, table_size_bytes{data_size});
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::FILEPATH);
timer.start();
cudf::io::parquet_writer_options const options =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression)
.stats_level(enable_stats)
.column_chunks_file_paths({file_path});
cudf::io::write_parquet(options);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::DURATION,
data_type::STRING,
data_type::LIST,
data_type::STRUCT>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::VOID>;
using compression_list =
nvbench::enum_type_list<cudf::io::compression_type::SNAPPY, cudf::io::compression_type::NONE>;
using stats_list = nvbench::enum_type_list<cudf::io::STATISTICS_NONE,
cudf::io::STATISTICS_ROWGROUP,
cudf::io::STATISTICS_COLUMN,
cudf::io::STATISTICS_PAGE>;
NVBENCH_BENCH_TYPES(BM_parq_write_encode, NVBENCH_TYPE_AXES(d_type_list))
.set_name("parquet_write_encode")
.set_type_axes_names({"data_type"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_parq_write_io_compression, NVBENCH_TYPE_AXES(io_list, compression_list))
.set_name("parquet_write_io_compression")
.set_type_axes_names({"io", "compression"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_parq_write_varying_options, NVBENCH_TYPE_AXES(stats_list, compression_list))
.set_name("parquet_write_options")
.set_type_axes_names({"statistics", "compression"})
.set_min_samples(4)
.add_string_axis("file_path", {"unused_path.parquet", ""});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/parquet/parquet_reader_input.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr size_t data_size = 512 << 20;
constexpr cudf::size_type num_cols = 64;
void parquet_read_common(cudf::io::parquet_writer_options const& write_opts,
cuio_source_sink_pair& source_sink,
nvbench::state& state)
{
cudf::io::write_parquet(write_opts);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(source_sink.make_source_info());
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
cudf::io::read_parquet(read_opts);
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
template <data_type DataType, cudf::io::io_type IOType>
void BM_parquet_read_data(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IOType>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = cudf::io::compression_type::SNAPPY;
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(IOType);
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
parquet_read_common(write_opts, source_sink, state);
}
template <cudf::io::io_type IOType, cudf::io::compression_type Compression>
void BM_parquet_read_io_compression(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IOType>, nvbench::enum_type<Compression>>)
{
auto const d_type = get_type_or_group({static_cast<int32_t>(data_type::INTEGRAL),
static_cast<int32_t>(data_type::FLOAT),
static_cast<int32_t>(data_type::DECIMAL),
static_cast<int32_t>(data_type::TIMESTAMP),
static_cast<int32_t>(data_type::DURATION),
static_cast<int32_t>(data_type::STRING),
static_cast<int32_t>(data_type::LIST),
static_cast<int32_t>(data_type::STRUCT)});
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
auto const compression = Compression;
auto const source_type = IOType;
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(source_type);
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
parquet_read_common(write_opts, source_sink, state);
}
template <cudf::io::io_type IOType>
void BM_parquet_read_io_small_mixed(nvbench::state& state,
nvbench::type_list<nvbench::enum_type<IOType>>)
{
auto const d_type =
std::pair<cudf::type_id, cudf::type_id>{cudf::type_id::STRING, cudf::type_id::INT32};
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
cudf::size_type const num_strings = state.get_int64("num_string_cols");
auto const source_type = IOType;
// want 80 pages total, across 4 columns, so 20 pages per column
cudf::size_type constexpr n_col = 4;
cudf::size_type constexpr page_size_rows = 10'000;
cudf::size_type constexpr num_rows = page_size_rows * (80 / n_col);
auto const tbl =
create_random_table(mix_dtypes(d_type, n_col, num_strings),
row_count{num_rows},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(source_type);
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.max_page_size_rows(10'000)
.compression(cudf::io::compression_type::NONE);
parquet_read_common(write_opts, source_sink, state);
}
template <data_type DataType, cudf::io::io_type IOType>
void BM_parquet_read_chunks(
nvbench::state& state,
nvbench::type_list<nvbench::enum_type<DataType>, nvbench::enum_type<IOType>>)
{
auto const d_type = get_type_or_group(static_cast<int32_t>(DataType));
cudf::size_type const cardinality = state.get_int64("cardinality");
cudf::size_type const run_length = state.get_int64("run_length");
cudf::size_type const byte_limit = state.get_int64("byte_limit");
auto const compression = cudf::io::compression_type::SNAPPY;
auto const tbl =
create_random_table(cycle_dtypes(d_type, num_cols),
table_size_bytes{data_size},
data_profile_builder().cardinality(cardinality).avg_run_length(run_length));
auto const view = tbl->view();
cuio_source_sink_pair source_sink(IOType);
cudf::io::parquet_writer_options write_opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view)
.compression(compression);
cudf::io::write_parquet(write_opts);
cudf::io::parquet_reader_options read_opts =
cudf::io::parquet_reader_options::builder(source_sink.make_source_info());
auto mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::sync | nvbench::exec_tag::timer,
[&](nvbench::launch& launch, auto& timer) {
try_drop_l3_cache();
timer.start();
auto reader = cudf::io::chunked_parquet_reader(byte_limit, read_opts);
do {
[[maybe_unused]] auto const chunk = reader.read_chunk();
} while (reader.has_next());
timer.stop();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(source_sink.size(), "encoded_file_size", "encoded_file_size");
}
using d_type_list = nvbench::enum_type_list<data_type::INTEGRAL,
data_type::FLOAT,
data_type::DECIMAL,
data_type::TIMESTAMP,
data_type::DURATION,
data_type::STRING,
data_type::LIST,
data_type::STRUCT>;
using io_list = nvbench::enum_type_list<cudf::io::io_type::FILEPATH,
cudf::io::io_type::HOST_BUFFER,
cudf::io::io_type::DEVICE_BUFFER>;
using compression_list =
nvbench::enum_type_list<cudf::io::compression_type::SNAPPY, cudf::io::compression_type::NONE>;
NVBENCH_BENCH_TYPES(BM_parquet_read_data,
NVBENCH_TYPE_AXES(d_type_list,
nvbench::enum_type_list<cudf::io::io_type::DEVICE_BUFFER>))
.set_name("parquet_read_decode")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_parquet_read_io_compression, NVBENCH_TYPE_AXES(io_list, compression_list))
.set_name("parquet_read_io_compression")
.set_type_axes_names({"io", "compression"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32});
NVBENCH_BENCH_TYPES(BM_parquet_read_chunks,
NVBENCH_TYPE_AXES(d_type_list,
nvbench::enum_type_list<cudf::io::io_type::DEVICE_BUFFER>))
.set_name("parquet_read_chunks")
.set_type_axes_names({"data_type", "io"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32})
.add_int64_axis("byte_limit", {0, 500'000});
NVBENCH_BENCH_TYPES(BM_parquet_read_io_small_mixed,
NVBENCH_TYPE_AXES(nvbench::enum_type_list<cudf::io::io_type::FILEPATH>))
.set_name("parquet_read_io_small_mixed")
.set_type_axes_names({"io"})
.set_min_samples(4)
.add_int64_axis("cardinality", {0, 1000})
.add_int64_axis("run_length", {1, 32})
.add_int64_axis("num_string_cols", {1, 2, 3});
| 0 |
rapidsai_public_repos/cudf/cpp/benchmarks/io
|
rapidsai_public_repos/cudf/cpp/benchmarks/io/parquet/parquet_writer_chunks.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/io/cuio_common.hpp>
#include <benchmarks/io/nvbench_helpers.hpp>
#include <cudf/column/column.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <nvbench/nvbench.cuh>
// Size of the data in the benchmark dataframe; chosen to be low enough to allow benchmarks to
// run on most GPUs, but large enough to allow highest throughput
constexpr int64_t data_size = 512 << 20;
void PQ_write(nvbench::state& state)
{
cudf::size_type const num_cols = state.get_int64("num_cols");
auto const tbl = create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
table_size_bytes{data_size});
auto const view = tbl->view();
std::size_t encoded_file_size = 0;
auto const mem_stats_logger = cudf::memory_stats_logger();
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(nvbench::exec_tag::timer | nvbench::exec_tag::sync,
[&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::VOID);
timer.start();
cudf::io::parquet_writer_options opts =
cudf::io::parquet_writer_options::builder(source_sink.make_sink_info(), view);
cudf::io::write_parquet(opts);
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
void PQ_write_chunked(nvbench::state& state)
{
cudf::size_type const num_cols = state.get_int64("num_cols");
cudf::size_type const num_tables = state.get_int64("num_chunks");
std::vector<std::unique_ptr<cudf::table>> tables;
for (cudf::size_type idx = 0; idx < num_tables; idx++) {
tables.push_back(create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
table_size_bytes{size_t(data_size / num_tables)}));
}
auto const mem_stats_logger = cudf::memory_stats_logger();
std::size_t encoded_file_size = 0;
state.set_cuda_stream(nvbench::make_cuda_stream_view(cudf::get_default_stream().value()));
state.exec(
nvbench::exec_tag::timer | nvbench::exec_tag::sync, [&](nvbench::launch& launch, auto& timer) {
cuio_source_sink_pair source_sink(io_type::VOID);
timer.start();
cudf::io::chunked_parquet_writer_options opts =
cudf::io::chunked_parquet_writer_options::builder(source_sink.make_sink_info());
cudf::io::parquet_chunked_writer writer(opts);
std::for_each(tables.begin(),
tables.end(),
[&writer](std::unique_ptr<cudf::table> const& tbl) { writer.write(*tbl); });
writer.close();
timer.stop();
encoded_file_size = source_sink.size();
});
auto const time = state.get_summary("nv/cold/time/gpu/mean").get_float64("value");
state.add_element_count(static_cast<double>(data_size) / time, "bytes_per_second");
state.add_buffer_size(
mem_stats_logger.peak_memory_usage(), "peak_memory_usage", "peak_memory_usage");
state.add_buffer_size(encoded_file_size, "encoded_file_size", "encoded_file_size");
}
NVBENCH_BENCH(PQ_write)
.set_name("parquet_write_num_cols")
.set_min_samples(4)
.add_int64_axis("num_cols", {8, 1024});
NVBENCH_BENCH(PQ_write_chunked)
.set_name("parquet_chunked_write")
.set_min_samples(4)
.add_int64_axis("num_cols", {8, 1024})
.add_int64_axis("num_chunks", {8, 64});
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
project(
CUDF_KAFKA
VERSION 24.02.00
LANGUAGES CXX
)
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# ##################################################################################################
# * conda environment -----------------------------------------------------------------------------
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
# ##################################################################################################
# * Build options
option(BUILD_TESTS "Build tests for libcudf_kafka" ON)
message(VERBOSE "CUDF_KAFKA: Build gtests: ${BUILD_TESTS}")
# ##################################################################################################
# * Dependencies
# add third party dependencies using CPM
rapids_cpm_init()
include(cmake/thirdparty/get_cudf.cmake)
include(cmake/thirdparty/get_rdkafka.cmake)
# # GTests if enabled
if(BUILD_TESTS)
# GoogleTest
include(../cmake/thirdparty/get_gtest.cmake)
# include CTest module -- automatically calls enable_testing()
include(CTest)
add_subdirectory(tests)
endif()
# ##################################################################################################
# * library target --------------------------------------------------------------------------------
add_library(cudf_kafka SHARED src/kafka_consumer.cpp src/kafka_callback.cpp)
# ##################################################################################################
# * include paths ---------------------------------------------------------------------------------
target_include_directories(
cudf_kafka PUBLIC "$<BUILD_INTERFACE:${CUDF_KAFKA_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:include>"
)
# ##################################################################################################
# * library paths ---------------------------------------------------------------------------------
target_link_libraries(cudf_kafka PUBLIC cudf::cudf RDKAFKA::RDKAFKA)
# Add Conda library, and include paths if specified
if(TARGET conda_env)
target_link_libraries(cudf_kafka PRIVATE conda_env)
endif()
set_target_properties(
cudf_kafka
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN" # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
)
add_library(cudf_kafka::cudf_kafka ALIAS cudf_kafka)
# ##################################################################################################
# * cudf_kafka Install ----------------------------------------------------------------------------
rapids_cmake_install_lib_dir(lib_dir)
install(
TARGETS cudf_kafka
DESTINATION ${lib_dir}
EXPORT cudf_kafka-exports
)
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
rapids_export(
INSTALL cudf_kafka
EXPORT_SET cudf_kafka-exports
GLOBAL_TARGETS cudf_kafka
NAMESPACE cudf_kafka::
)
rapids_export(
BUILD cudf_kafka
EXPORT_SET cudf_kafka-exports
GLOBAL_TARGETS cudf_kafka
NAMESPACE cudf_kafka::
)
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka/include
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/include/cudf_kafka/kafka_consumer.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "kafka_callback.hpp"
#include <cudf/io/datasource.hpp>
#include <librdkafka/rdkafkacpp.h>
#include <algorithm>
#include <chrono>
#include <map>
#include <memory>
#include <string>
namespace cudf {
namespace io {
namespace external {
namespace kafka {
/**
* @brief libcudf datasource for Apache Kafka
*
* @ingroup io_datasources
*/
class kafka_consumer : public cudf::io::datasource {
public:
/**
* @brief Creates an instance of the Kafka consumer object that is in a semi-ready state.
*
* A consumer in a semi-ready state does not have all required parameters to make successful
* consumer interactions with the Kafka broker. However in the semi-ready state Kafka metadata
* operations are still possible. This is useful for clients who plan to only use those metadata
* operations. This is useful when the need for delayed partition and topic assignment
* is not known ahead of time and needs to be delayed to as late as possible.
* Documentation for librdkafka configurations can be found at
* https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
*
* @param configs key/value pairs of librdkafka configurations that will be
* passed to the librdkafka client
* @param python_callable `python_callable_type` pointer to a Python functools.partial object
* @param callable_wrapper `kafka_oauth_callback_wrapper_type` Cython wrapper that will
* be used to invoke the `python_callable`. This wrapper serves the purpose
* of preventing us from having to link against the Python development library
* in libcudf_kafka.
*/
kafka_consumer(std::map<std::string, std::string> configs,
python_callable_type python_callable,
kafka_oauth_callback_wrapper_type callable_wrapper);
/**
* @brief Instantiate a Kafka consumer object. Documentation for librdkafka configurations can be
* found at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
*
* @param configs key/value pairs of librdkafka configurations that will be
* passed to the librdkafka client
* @param python_callable `python_callable_type` pointer to a Python functools.partial object
* @param callable_wrapper `kafka_oauth_callback_wrapper_type` Cython wrapper that will
* be used to invoke the `python_callable`. This wrapper serves the purpose
* of preventing us from having to link against the Python development library
* in libcudf_kafka.
* @param topic_name name of the Kafka topic to consume from
* @param partition partition index to consume from between `0` and `TOPIC_NUM_PARTITIONS - 1`
* inclusive
* @param start_offset seek position for the specified TOPPAR (Topic/Partition combo)
* @param end_offset position in the specified TOPPAR to read to
* @param batch_timeout maximum (millisecond) read time allowed. If end_offset is not reached
* before batch_timeout, a smaller subset will be returned
* @param delimiter optional delimiter to insert into the output between kafka messages, Ex: "\n"
*/
kafka_consumer(std::map<std::string, std::string> configs,
python_callable_type python_callable,
kafka_oauth_callback_wrapper_type callable_wrapper,
std::string const& topic_name,
int partition,
int64_t start_offset,
int64_t end_offset,
int batch_timeout,
std::string const& delimiter);
/**
* @brief Returns a buffer with a subset of data from Kafka Topic
*
* @param[in] offset Bytes from the start
* @param[in] size Bytes to read
*
* @return The data buffer
*/
std::unique_ptr<cudf::io::datasource::buffer> host_read(size_t offset, size_t size) override;
/**
* @brief Returns the size of the data in Kafka buffer
*
* @return size_t The size of the source data in bytes
*/
size_t size() const override;
/**
* @brief Reads a selected range into a preallocated buffer.
*
* @param[in] offset Bytes from the start
* @param[in] size Bytes to read
* @param[in] dst Address of the existing host memory
*
* @return The number of bytes read (can be smaller than size)
*/
size_t host_read(size_t offset, size_t size, uint8_t* dst) override;
/**
* @brief Commits an offset to a specified Kafka Topic/Partition instance
*
* @throws cudf::logic_error on failure to commit the partition offset
*
* @param[in] topic Name of the Kafka topic that the offset should be set for
* @param[in] partition Partition on the specified topic that should be used
* @param[in] offset Offset that should be set for the topic/partition pair
*
*/
void commit_offset(std::string const& topic, int partition, int64_t offset);
/**
* @brief Retrieve the watermark offset values for a topic/partition
*
* @param[in] topic Name of the Kafka topic that the watermark should be retrieved for
* @param[in] partition Partition on the specified topic which should be used
* @param[in] timeout Max milliseconds to wait on a response from the Kafka broker
* @param[in] cached If True uses the last retrieved value from the Kafka broker, if False
* the latest value will be retrieved from the Kafka broker by making a network
* request.
* @return The watermark offset value for the specified topic/partition
*/
std::map<std::string, int64_t> get_watermark_offset(std::string const& topic,
int partition,
int timeout,
bool cached);
/**
* @brief Retrieve the current Kafka client configurations
*
* @return Map<string, string> of key/value pairs of the current client configurations
*/
std::map<std::string, std::string> current_configs();
/**
* @brief Get the latest offset that was successfully committed to the Kafka broker
*
* @param[in] topic Topic name for the topic/partition pair
* @param[in] partition Partition number of the topic/partition pair
*
* @return Latest offset for the specified topic/partition pair
*/
int64_t get_committed_offset(std::string const& topic, int partition);
/**
* @brief Query the Kafka broker for the list of Topic partitions for a Topic. If no topic is
* specified then the partitions for all Topics in the broker will be retrieved.
*
* @param[in] specific_topic The name of the topic for which to retrieve partitions. If empty then
* the partitions for all topics will be retrieved.
*
* @return Map of Kafka topic names with their corresponding list of topic partition values.
*/
std::map<std::string, std::vector<int32_t>> list_topics(std::string specific_topic);
/**
* @brief Close the underlying socket connection to Kafka and clean up system resources
*
* @throws cudf::logic_error on failure to close the connection
* @param timeout Max milliseconds to wait on a response
*/
void close(int timeout);
/**
* @brief Stop all active consumption and remove consumer subscriptions to topic/partition
* instances
*
* @throws cudf::logic_error on failure to unsubscribe from the active partition assignments.
*/
void unsubscribe();
virtual ~kafka_consumer(){};
private:
std::unique_ptr<RdKafka::Conf> kafka_conf; // RDKafka configuration object
std::unique_ptr<RdKafka::KafkaConsumer> consumer;
std::map<std::string, std::string> configs;
python_callable_type python_callable_;
kafka_oauth_callback_wrapper_type callable_wrapper_;
std::string topic_name;
int partition;
int64_t start_offset;
int64_t end_offset;
int batch_timeout;
int default_timeout = 10000; // milliseconds
std::string delimiter;
std::string buffer;
private:
RdKafka::ErrorCode update_consumer_topic_partition_assignment(std::string const& topic,
int partition,
int64_t offset);
/**
* Convenience method for getting "now()" in Kafka's standard format
*/
int64_t now();
void consume_to_buffer();
};
} // namespace kafka
} // namespace external
} // namespace io
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka/include
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/include/cudf_kafka/kafka_callback.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/io/datasource.hpp>
#include <librdkafka/rdkafkacpp.h>
#include <map>
#include <memory>
#include <string>
namespace cudf {
namespace io {
namespace external {
namespace kafka {
/**
* @brief Python Callback function wrapper type used for Kafka OAuth events
*
* The KafkaConsumer calls the `kafka_oauth_callback_wrapper_type` when the existing
* oauth token is considered expired by the KafkaConsumer. Typically that
* means this will be invoked a single time when the KafkaConsumer is created
* to get the initial token and then intermediately as the token becomes
* expired.
*
* The callback function signature is:
* `std::map<std::string, std::string> kafka_oauth_callback_wrapper_type(void*)`
*
* The callback function returns a std::map<std::string, std::string>,
* where the std::map consists of the Oauth token and its
* linux epoch expiration time. Generally the token and expiration
* time is retrieved from an external service by the callback.
* Ex: [token, token_expiration_in_epoch]
*/
using kafka_oauth_callback_wrapper_type = std::map<std::string, std::string> (*)(void*);
using python_callable_type = void*;
/**
* @brief Callback to retrieve OAuth token from external source. Invoked when
* token refresh is required.
*/
class python_oauth_refresh_callback : public RdKafka::OAuthBearerTokenRefreshCb {
public:
/**
* @brief Construct a new python oauth refresh callback object
*
* @param callback_wrapper Cython wrapper that will
* be used to invoke the `python_callable`. This wrapper serves the purpose
* of preventing us from having to link against the Python development library
* in libcudf_kafka.
* @param python_callable pointer to a Python `functools.partial` object
*/
python_oauth_refresh_callback(kafka_oauth_callback_wrapper_type callback_wrapper,
python_callable_type python_callable);
/**
* @brief Invoke the Python callback function to get the OAuth token and its expiration time
*
* @param handle
* @param oauthbearer_config pointer to the OAuthBearerConfig object
*/
void oauthbearer_token_refresh_cb(RdKafka::Handle* handle, std::string const& oauthbearer_config);
private:
kafka_oauth_callback_wrapper_type callback_wrapper_;
python_callable_type python_callable_;
};
} // namespace kafka
} // namespace external
} // namespace io
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/tests/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# ##################################################################################################
# enable testing ################################################################################
# ##################################################################################################
enable_testing()
include(rapids-test)
rapids_test_init()
# This function takes in a test name and test source and handles setting all of the associated
# properties and linking to build the test
function(ConfigureTest test_name)
add_executable(${test_name} ${ARGN})
set_target_properties(
${test_name}
PROPERTIES RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${CUDF_KAFKA_BINARY_DIR}/gtests>"
INSTALL_RPATH "\$ORIGIN/../../../lib"
)
target_link_libraries(
${test_name} PRIVATE GTest::gmock GTest::gmock_main GTest::gtest_main cudf_kafka
)
rapids_test_add(
NAME ${test_name}
COMMAND ${test_name}
GPUS 1
PERCENT 25
INSTALL_COMPONENT_SET testing
)
endfunction()
# ##################################################################################################
# * Kafka host tests
# ----------------------------------------------------------------------------------
ConfigureTest(KAFKA_HOST_TEST kafka_consumer_tests.cpp)
rapids_test_install_relocatable(INSTALL_COMPONENT_SET testing DESTINATION bin/gtests/libcudf_kafka)
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/tests/kafka_consumer_tests.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_kafka/kafka_consumer.hpp>
#include <gtest/gtest.h>
#include <map>
#include <memory>
#include <string>
#include <cudf/io/csv.hpp>
#include <cudf/io/datasource.hpp>
namespace kafka = cudf::io::external::kafka;
struct KafkaDatasourceTest : public ::testing::Test {};
TEST_F(KafkaDatasourceTest, MissingGroupID)
{
// group.id is a required configuration.
std::map<std::string, std::string> kafka_configs;
kafka_configs["bootstrap.servers"] = "localhost:9092";
kafka::python_callable_type python_callable;
kafka::kafka_oauth_callback_wrapper_type callback_wrapper;
EXPECT_THROW(
kafka::kafka_consumer kc(
kafka_configs, python_callable, callback_wrapper, "csv-topic", 0, 0, 3, 5000, "\n"),
cudf::logic_error);
}
TEST_F(KafkaDatasourceTest, InvalidConfigValues)
{
// Give a made up configuration value
std::map<std::string, std::string> kafka_configs;
kafka_configs["completely_made_up_config"] = "wrong";
kafka::python_callable_type python_callable;
kafka::kafka_oauth_callback_wrapper_type callback_wrapper;
EXPECT_THROW(
kafka::kafka_consumer kc(
kafka_configs, python_callable, callback_wrapper, "csv-topic", 0, 0, 3, 5000, "\n"),
cudf::logic_error);
// Give a good config property with a bad value
kafka_configs.clear();
kafka_configs["message.max.bytes"] = "this should be a number not text";
EXPECT_THROW(
kafka::kafka_consumer kc(
kafka_configs, python_callable, callback_wrapper, "csv-topic", 0, 0, 3, 5000, "\n"),
cudf::logic_error);
}
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka/cmake
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/cmake/thirdparty/get_cudf.cmake
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds cudf and sets any additional necessary environment variables.
function(find_and_configure_cudf VERSION)
rapids_cmake_parse_version(MAJOR_MINOR ${VERSION} major_minor)
rapids_cpm_find(
cudf ${VERSION}
BUILD_EXPORT_SET cudf_kafka-exports
INSTALL_EXPORT_SET cudf_kafka-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/rapidsai/cudf.git
GIT_TAG branch-${major_minor}
GIT_SHALLOW TRUE SOURCE_SUBDIR cpp
OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF"
)
# If after loading cudf we now have the CMAKE_CUDA_COMPILER variable we know that we need to
# re-enable the cuda language
if(CMAKE_CUDA_COMPILER)
set(cudf_REQUIRES_CUDA
TRUE
PARENT_SCOPE
)
endif()
endfunction()
set(CUDF_KAFKA_MIN_VERSION
"${CUDF_KAFKA_VERSION_MAJOR}.${CUDF_KAFKA_VERSION_MINOR}.${CUDF_KAFKA_VERSION_PATCH}"
)
find_and_configure_cudf(${CUDF_KAFKA_MIN_VERSION})
if(cudf_REQUIRES_CUDA)
rapids_cuda_init_architectures(CUDF_KAFKA)
# Since we are building cudf as part of ourselves we need to enable the CUDA language in the
# top-most scope
enable_language(CUDA)
# Since CUDF_KAFKA only enables CUDA optionally we need to manually include the file that
# rapids_cuda_init_architectures relies on `project` calling
if(DEFINED CMAKE_PROJECT_CUDF_KAFKA_INCLUDE)
include("${CMAKE_PROJECT_CUDF_KAFKA_INCLUDE}")
endif()
endif()
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka/cmake
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/cmake/thirdparty/get_rdkafka.cmake
|
# =============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds rdkafka and sets any additional necessary environment variables.
function(get_RDKafka)
rapids_find_generate_module(
RDKAFKA
HEADER_NAMES rdkafkacpp.h
INCLUDE_SUFFIXES librdkafka
LIBRARY_NAMES rdkafka++
BUILD_EXPORT_SET cudf_kafka-exports
INSTALL_EXPORT_SET cudf_kafka-exports
)
if(DEFINED ENV{RDKAFKA_ROOT})
# Since this is inside a function the modification of CMAKE_PREFIX_PATH won't leak to other
# callers/users
list(APPEND CMAKE_PREFIX_PATH "$ENV{RDKAFKA_ROOT}")
list(APPEND CMAKE_PREFIX_PATH "$ENV{RDKAFKA_ROOT}/build")
endif()
rapids_find_package(
RDKAFKA REQUIRED
BUILD_EXPORT_SET cudf_kafka-exports
INSTALL_EXPORT_SET cudf_kafka-exports
)
endfunction()
get_RDKafka()
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/src/kafka_callback.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_kafka/kafka_callback.hpp>
#include <librdkafka/rdkafkacpp.h>
namespace cudf {
namespace io {
namespace external {
namespace kafka {
python_oauth_refresh_callback::python_oauth_refresh_callback(
kafka_oauth_callback_wrapper_type callback_wrapper, python_callable_type python_callable)
: callback_wrapper_(callback_wrapper), python_callable_(python_callable){};
void python_oauth_refresh_callback::oauthbearer_token_refresh_cb(
RdKafka::Handle* handle, std::string const& oauthbearer_config)
{
std::map<std::string, std::string> resp = callback_wrapper_(python_callable_);
// Build parameters to pass to librdkafka
std::string token = resp["token"];
int64_t token_lifetime_ms = std::stoll(resp["token_expiration_in_epoch"]);
std::list<std::string> extensions; // currently not supported
std::string errstr;
CUDF_EXPECTS(
RdKafka::ErrorCode::ERR_NO_ERROR ==
handle->oauthbearer_set_token(token, token_lifetime_ms, "kafka", extensions, errstr),
"Error occurred while setting the oauthbearer token");
}
} // namespace kafka
} // namespace external
} // namespace io
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/libcudf_kafka
|
rapidsai_public_repos/cudf/cpp/libcudf_kafka/src/kafka_consumer.cpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_kafka/kafka_consumer.hpp>
#include <librdkafka/rdkafkacpp.h>
#include <chrono>
#include <memory>
namespace cudf {
namespace io {
namespace external {
namespace kafka {
kafka_consumer::kafka_consumer(std::map<std::string, std::string> configs,
python_callable_type python_callable,
kafka_oauth_callback_wrapper_type callable_wrapper)
: configs(configs),
python_callable_(python_callable),
callable_wrapper_(callable_wrapper),
kafka_conf(RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL))
{
for (auto const& key_value : configs) {
std::string error_string;
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK ==
kafka_conf->set(key_value.first, key_value.second, error_string),
"Invalid Kafka configuration");
}
if (python_callable_ != nullptr) {
std::string error_string;
python_oauth_refresh_callback cb(callable_wrapper_, python_callable_);
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK ==
kafka_conf->set("oauthbearer_token_refresh_cb", &cb, error_string),
"Failed to set Kafka oauth callback");
}
// Kafka 0.9 > requires group.id in the configuration
std::string conf_val;
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK == kafka_conf->get("group.id", conf_val),
"Kafka group.id must be configured");
std::string errstr;
consumer = std::unique_ptr<RdKafka::KafkaConsumer>(
RdKafka::KafkaConsumer::create(kafka_conf.get(), errstr));
}
kafka_consumer::kafka_consumer(std::map<std::string, std::string> configs,
python_callable_type python_callable,
kafka_oauth_callback_wrapper_type callback_wrapper,
std::string const& topic_name,
int partition,
int64_t start_offset,
int64_t end_offset,
int batch_timeout,
std::string const& delimiter)
: configs(configs),
python_callable_(python_callable),
callable_wrapper_(callback_wrapper),
topic_name(topic_name),
partition(partition),
start_offset(start_offset),
end_offset(end_offset),
batch_timeout(batch_timeout),
delimiter(delimiter),
kafka_conf(RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL))
{
for (auto const& key_value : configs) {
std::string error_string;
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK ==
kafka_conf->set(key_value.first, key_value.second, error_string),
"Invalid Kafka configuration");
}
if (python_callable_ != nullptr) {
std::string error_string;
python_oauth_refresh_callback cb(callable_wrapper_, python_callable_);
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK ==
kafka_conf->set("oauthbearer_token_refresh_cb", &cb, error_string),
"Failed to set Kafka oauth callback");
}
// Kafka 0.9 > requires group.id in the configuration
std::string conf_val;
CUDF_EXPECTS(RdKafka::Conf::ConfResult::CONF_OK == kafka_conf->get("group.id", conf_val),
"Kafka group.id must be configured");
std::string errstr;
consumer = std::unique_ptr<RdKafka::KafkaConsumer>(
RdKafka::KafkaConsumer::create(kafka_conf.get(), errstr));
// Pre fill the local buffer with messages so the datasource->size() invocation
// will return a valid size.
consume_to_buffer();
}
std::unique_ptr<cudf::io::datasource::buffer> kafka_consumer::host_read(size_t offset, size_t size)
{
if (offset > buffer.size()) { return 0; }
size = std::min(size, buffer.size() - offset);
return std::make_unique<non_owning_buffer>((uint8_t*)buffer.data() + offset, size);
}
size_t kafka_consumer::host_read(size_t offset, size_t size, uint8_t* dst)
{
if (offset > buffer.size()) { return 0; }
auto const read_size = std::min(size, buffer.size() - offset);
memcpy(dst, buffer.data() + offset, size);
return read_size;
}
size_t kafka_consumer::size() const { return buffer.size(); }
/**
* Change the TOPPAR assignment for this consumer instance
*/
RdKafka::ErrorCode kafka_consumer::update_consumer_topic_partition_assignment(
std::string const& topic, int partition, int64_t offset)
{
std::vector<RdKafka::TopicPartition*> topic_partitions;
topic_partitions.push_back(RdKafka::TopicPartition::create(topic, partition, offset));
return consumer.get()->assign(topic_partitions);
}
void kafka_consumer::consume_to_buffer()
{
update_consumer_topic_partition_assignment(topic_name, partition, start_offset);
int64_t messages_read = 0;
auto end = std::chrono::steady_clock::now() + std::chrono::milliseconds(batch_timeout);
while (messages_read < end_offset - start_offset && end > std::chrono::steady_clock::now()) {
std::unique_ptr<RdKafka::Message> msg{
consumer->consume((end - std::chrono::steady_clock::now()).count())};
if (msg->err() == RdKafka::ErrorCode::ERR_NO_ERROR) {
buffer.append(static_cast<char*>(msg->payload()));
buffer.append(delimiter);
messages_read++;
} else if (msg->err() == RdKafka::ErrorCode::ERR__PARTITION_EOF) {
// If there are no more messages return
break;
}
}
}
std::map<std::string, std::string> kafka_consumer::current_configs()
{
std::map<std::string, std::string> configs;
std::list<std::string>* dump = kafka_conf->dump();
for (auto it = dump->begin(); it != dump->end(); std::advance(it, 2))
configs.insert({*it, *std::next(it)});
return configs;
}
int64_t kafka_consumer::get_committed_offset(std::string const& topic, int partition)
{
std::vector<RdKafka::TopicPartition*> toppar_list;
toppar_list.push_back(RdKafka::TopicPartition::create(topic, partition));
// Query Kafka to populate the TopicPartitions with the desired offsets
CUDF_EXPECTS(RdKafka::ERR_NO_ERROR == consumer->committed(toppar_list, default_timeout),
"Failed retrieve Kafka committed offsets");
int64_t offset = toppar_list[0]->offset();
return offset > 0 ? offset : -1001;
}
std::map<std::string, std::vector<int32_t>> kafka_consumer::list_topics(std::string specific_topic)
{
auto const metadata = [&]() {
std::string errstr;
auto spec_topic = std::unique_ptr<RdKafka::Topic>(
RdKafka::Topic::create(consumer.get(), specific_topic, nullptr, errstr));
RdKafka::Metadata* md;
CUDF_EXPECTS(
RdKafka::ERR_NO_ERROR ==
consumer->metadata(spec_topic == nullptr, spec_topic.get(), &md, default_timeout),
"Failed to list_topics in Kafka broker");
return std::unique_ptr<RdKafka::Metadata>{md};
}();
std::map<std::string, std::vector<int32_t>> topic_parts;
for (auto const& topic : *(metadata->topics())) {
auto& part_ids = topic_parts[topic->topic()];
auto const& parts = *(topic->partitions());
std::transform(
parts.cbegin(), parts.cend(), std::back_inserter(part_ids), [](auto const& part) {
return part->id();
});
}
return topic_parts;
}
std::map<std::string, int64_t> kafka_consumer::get_watermark_offset(std::string const& topic,
int partition,
int timeout,
bool cached)
{
int64_t low;
int64_t high;
std::map<std::string, int64_t> results;
RdKafka::ErrorCode err;
if (cached) {
err = consumer->get_watermark_offsets(topic, partition, &low, &high);
} else {
err = consumer->query_watermark_offsets(topic, partition, &low, &high, timeout);
}
if (err != RdKafka::ErrorCode::ERR_NO_ERROR) {
if (err == RdKafka::ErrorCode::ERR__PARTITION_EOF) {
results.insert(std::pair<std::string, int64_t>("low", low));
results.insert(std::pair<std::string, int64_t>("high", high));
} else {
CUDF_FAIL("Error retrieving Kafka watermark offset from broker");
}
} else {
results.insert(std::pair<std::string, int64_t>("low", low));
results.insert(std::pair<std::string, int64_t>("high", high));
}
return results;
}
void kafka_consumer::commit_offset(std::string const& topic, int partition, int64_t offset)
{
std::vector<RdKafka::TopicPartition*> partitions_;
RdKafka::TopicPartition* toppar = RdKafka::TopicPartition::create(topic, partition, offset);
CUDF_EXPECTS(toppar != nullptr, "RdKafka failed to create TopicPartition");
toppar->set_offset(offset);
partitions_.push_back(toppar);
CUDF_EXPECTS(RdKafka::ERR_NO_ERROR == consumer->commitSync(partitions_),
"Failed to commit consumer offsets");
}
void kafka_consumer::unsubscribe()
{
CUDF_EXPECTS(RdKafka::ErrorCode::ERR_NO_ERROR == consumer.get()->unassign(),
"Failed to unsubscribe from Kafka Consumer");
}
void kafka_consumer::close(int timeout)
{
CUDF_EXPECTS(RdKafka::ERR_NO_ERROR == consumer->close(), "Failed to close Kafka consumer");
consumer.reset(nullptr);
kafka_conf.reset(nullptr);
}
} // namespace kafka
} // namespace external
} // namespace io
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/cmake/config.json
|
{
"parse": {
"additional_commands": {
"CPMFindPackage": {
"kwargs": {
"NAME": 1,
"GITHUB_REPOSITORY": "?",
"GIT_TAG": "?",
"VERSION": "?",
"GIT_SHALLOW": "?",
"OPTIONS": "*",
"FIND_PACKAGE_ARGUMENTS": "*"
}
},
"ConfigureTest": {
"flags": ["TEST_NAME", "TEST_SRC"],
"kwargs": {
"GPUS": 1,
"PERCENT": 1
}
},
"ConfigureBench": {
"flags": ["BENCH_NAME", "BENCH_SRC"]
}
}
},
"format": {
"line_width": 100,
"tab_size": 2,
"command_case": "unchanged",
"max_lines_hwrap": 1,
"max_pargs_hwrap": 999,
"dangle_parens": true
},
"lint": {
"disabled_codes": ["C0301"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "_[A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "_[0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/Modules/FindcuFile.cmake
|
# =============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#[=======================================================================[.rst:
FindcuFile
----------
Find cuFile headers and libraries.
Imported Targets
^^^^^^^^^^^^^^^^
``cufile::cuFile``
The cuFile library, if found.
``cufile::cuFileRDMA``
The cuFile RDMA library, if found.
Result Variables
^^^^^^^^^^^^^^^^
This will define the following variables in your project:
``cuFile_FOUND``
true if (the requested version of) cuFile is available.
``cuFile_VERSION``
the version of cuFile.
``cuFile_LIBRARIES``
the libraries to link against to use cuFile.
``cuFileRDMA_LIBRARIES``
the libraries to link against to use cuFile RDMA.
``cuFile_INCLUDE_DIRS``
where to find the cuFile headers.
``cuFile_COMPILE_OPTIONS``
this should be passed to target_compile_options(), if the
target is not used for linking
#]=======================================================================]
# use pkg-config to get the directories and then use these values in the FIND_PATH() and
# FIND_LIBRARY() calls
find_package(PkgConfig QUIET)
pkg_check_modules(PKG_cuFile QUIET cuFile)
set(cuFile_COMPILE_OPTIONS ${PKG_cuFile_CFLAGS_OTHER})
set(cuFile_VERSION ${PKG_cuFile_VERSION})
# Find the location of the CUDA Toolkit
find_package(CUDAToolkit QUIET)
find_path(
cuFile_INCLUDE_DIR
NAMES cufile.h
HINTS ${PKG_cuFile_INCLUDE_DIRS} ${CUDAToolkit_INCLUDE_DIRS}
)
find_library(
cuFile_LIBRARY
NAMES cufile
HINTS ${PKG_cuFile_LIBRARY_DIRS} ${CUDAToolkit_LIBRARY_DIR}
)
find_library(
cuFileRDMA_LIBRARY
NAMES cufile_rdma
HINTS ${PKG_cuFile_LIBRARY_DIRS} ${CUDAToolkit_LIBRARY_DIR}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
cuFile
FOUND_VAR cuFile_FOUND
REQUIRED_VARS cuFile_LIBRARY cuFileRDMA_LIBRARY cuFile_INCLUDE_DIR
VERSION_VAR cuFile_VERSION
)
if(cuFile_INCLUDE_DIR AND NOT TARGET cufile::cuFile_interface)
add_library(cufile::cuFile_interface INTERFACE IMPORTED GLOBAL)
target_include_directories(
cufile::cuFile_interface INTERFACE "$<BUILD_INTERFACE:${cuFile_INCLUDE_DIR}>"
)
target_compile_options(cufile::cuFile_interface INTERFACE "${cuFile_COMPILE_OPTIONS}")
target_compile_definitions(cufile::cuFile_interface INTERFACE CUFILE_FOUND)
endif()
if(cuFile_FOUND AND NOT TARGET cufile::cuFile)
add_library(cufile::cuFile UNKNOWN IMPORTED GLOBAL)
set_target_properties(
cufile::cuFile
PROPERTIES IMPORTED_LOCATION "${cuFile_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${cuFile_COMPILE_OPTIONS}"
INTERFACE_INCLUDE_DIRECTORIES "${cuFile_INCLUDE_DIR}"
)
endif()
if(cuFile_FOUND AND NOT TARGET cufile::cuFileRDMA)
add_library(cufile::cuFileRDMA UNKNOWN IMPORTED GLOBAL)
set_target_properties(
cufile::cuFileRDMA
PROPERTIES IMPORTED_LOCATION "${cuFileRDMA_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${cuFile_COMPILE_OPTIONS}"
INTERFACE_INCLUDE_DIRECTORIES "${cuFile_INCLUDE_DIR}"
)
endif()
mark_as_advanced(cuFile_LIBRARY cuFileRDMA_LIBRARY cuFile_INCLUDE_DIR)
if(cuFile_FOUND)
set(cuFile_LIBRARIES ${cuFile_LIBRARY})
set(cuFileRDMA_LIBRARIES ${cuFileRDMA_LIBRARY})
set(cuFile_INCLUDE_DIRS ${cuFile_INCLUDE_DIR})
endif()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/Modules/ConfigureCUDA.cmake
|
# =============================================================================
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND CUDF_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations)
endif()
list(APPEND CUDF_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
# set warnings as errors
if(CUDA_WARNINGS_AS_ERRORS)
list(APPEND CUDF_CUDA_FLAGS -Werror=all-warnings)
else()
list(APPEND CUDF_CUDA_FLAGS -Werror=cross-execution-space-call)
endif()
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
if(DISABLE_DEPRECATION_WARNINGS)
list(APPEND CUDF_CXX_FLAGS -Wno-deprecated-declarations)
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations)
endif()
# make sure we produce smallest binary size
list(APPEND CUDF_CUDA_FLAGS -Xfatbin=-compress-all)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
if(CUDA_ENABLE_LINEINFO)
list(APPEND CUDF_CUDA_FLAGS -lineinfo)
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(VERBOSE "CUDF: Building with debugging flags")
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-rdynamic)
endif()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/Modules/JitifyPreprocessKernels.cmake
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Create `jitify_preprocess` executable
add_executable(jitify_preprocess "${JITIFY_INCLUDE_DIR}/jitify2_preprocess.cpp")
target_compile_definitions(jitify_preprocess PRIVATE "_FILE_OFFSET_BITS=64")
target_link_libraries(jitify_preprocess CUDA::cudart ${CMAKE_DL_LIBS})
# Take a list of files to JIT-compile and run them through jitify_preprocess.
function(jit_preprocess_files)
cmake_parse_arguments(ARG "" "SOURCE_DIRECTORY" "FILES" ${ARGN})
foreach(inc IN LISTS libcudacxx_raw_includes)
list(APPEND libcudacxx_includes "-I${inc}")
endforeach()
foreach(ARG_FILE ${ARG_FILES})
set(ARG_OUTPUT ${CUDF_GENERATED_INCLUDE_DIR}/include/jit_preprocessed_files/${ARG_FILE}.jit.hpp)
get_filename_component(jit_output_directory "${ARG_OUTPUT}" DIRECTORY)
list(APPEND JIT_PREPROCESSED_FILES "${ARG_OUTPUT}")
# Note: need to pass _FILE_OFFSET_BITS=64 in COMMAND due to a limitation in how conda builds
# glibc
add_custom_command(
OUTPUT ${ARG_OUTPUT}
DEPENDS jitify_preprocess "${ARG_SOURCE_DIRECTORY}/${ARG_FILE}"
WORKING_DIRECTORY ${ARG_SOURCE_DIRECTORY}
VERBATIM
COMMAND ${CMAKE_COMMAND} -E make_directory "${jit_output_directory}"
COMMAND
"${CMAKE_COMMAND}" -E env LD_LIBRARY_PATH=${CUDAToolkit_LIBRARY_DIR}
$<TARGET_FILE:jitify_preprocess> ${ARG_FILE} -o
${CUDF_GENERATED_INCLUDE_DIR}/include/jit_preprocessed_files -i -m -std=c++17
-remove-unused-globals -D_FILE_OFFSET_BITS=64 -D__CUDACC_RTC__ -I${CUDF_SOURCE_DIR}/include
-I${CUDF_SOURCE_DIR}/src ${libcudacxx_includes} -I${CUDAToolkit_INCLUDE_DIRS}
--no-preinclude-workarounds --no-replace-pragma-once
COMMENT "Custom command to JIT-compile files."
)
endforeach()
set(JIT_PREPROCESSED_FILES
"${JIT_PREPROCESSED_FILES}"
PARENT_SCOPE
)
endfunction()
if(NOT (EXISTS "${CUDF_GENERATED_INCLUDE_DIR}/include"))
make_directory("${CUDF_GENERATED_INCLUDE_DIR}/include")
endif()
jit_preprocess_files(
SOURCE_DIRECTORY ${CUDF_SOURCE_DIR}/src FILES binaryop/jit/kernel.cu transform/jit/kernel.cu
rolling/jit/kernel.cu
)
add_custom_target(
jitify_preprocess_run
DEPENDS ${JIT_PREPROCESSED_FILES}
COMMENT "Target representing jitified files."
)
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_thrust.cmake
|
# =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds thrust and sets any additional necessary environment variables.
function(find_and_configure_thrust)
include(${rapids-cmake-dir}/cpm/thrust.cmake)
include(${rapids-cmake-dir}/cpm/package_override.cmake)
set(cudf_patch_dir "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/patches")
rapids_cpm_package_override("${cudf_patch_dir}/thrust_override.json")
# Make sure we install thrust into the `include/libcudf` subdirectory instead of the default
include(GNUInstallDirs)
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/libcudf")
set(CMAKE_INSTALL_LIBDIR "${CMAKE_INSTALL_INCLUDEDIR}/lib")
# Find or install Thrust with our custom set of patches
rapids_cpm_thrust(
NAMESPACE cudf
BUILD_EXPORT_SET cudf-exports
INSTALL_EXPORT_SET cudf-exports
)
# Store where CMake can find our custom Thrust install
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
INSTALL Thrust [=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/thrust]=]
EXPORT_SET cudf-exports
CONDITION Thrust_SOURCE_DIR
)
endfunction()
find_and_configure_thrust()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_cucollections.cmake
|
# =============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds cuCollections and performs any additional configuration.
function(find_and_configure_cucollections)
include(${rapids-cmake-dir}/cpm/cuco.cmake)
if(BUILD_SHARED_LIBS)
rapids_cpm_cuco(BUILD_EXPORT_SET cudf-exports)
else()
rapids_cpm_cuco(BUILD_EXPORT_SET cudf-exports INSTALL_EXPORT_SET cudf-exports)
endif()
endfunction()
find_and_configure_cucollections()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_cufile.cmake
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds nvcomp and sets any additional necessary environment variables.
function(find_and_configure_cufile)
list(APPEND CMAKE_MODULE_PATH ${CUDF_SOURCE_DIR}/cmake/Modules)
rapids_find_package(cuFile)
if(cuFile_FOUND AND NOT BUILD_SHARED_LIBS)
include("${rapids-cmake-dir}/export/find_package_file.cmake")
rapids_export_find_package_file(
BUILD "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET cudf-exports
)
rapids_export_find_package_file(
INSTALL "${CUDF_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET cudf-exports
)
endif()
endfunction()
find_and_configure_cufile()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_spdlog.cmake
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone speedlog
function(find_and_configure_spdlog)
include(${rapids-cmake-dir}/cpm/spdlog.cmake)
rapids_cpm_spdlog(FMT_OPTION "EXTERNAL_FMT_HO" INSTALL_EXPORT_SET cudf-exports)
rapids_export_package(BUILD spdlog cudf-exports)
if(spdlog_ADDED)
rapids_export(
BUILD spdlog
EXPORT_SET spdlog
GLOBAL_TARGETS spdlog spdlog_header_only
NAMESPACE spdlog::
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD spdlog [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-exports
)
endif()
endfunction()
find_and_configure_spdlog()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_dlpack.cmake
|
# =============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds dlpack and sets any additional necessary environment variables.
function(find_and_configure_dlpack VERSION)
include(${rapids-cmake-dir}/find/generate_module.cmake)
rapids_find_generate_module(DLPACK HEADER_NAMES dlpack.h)
rapids_cpm_find(
dlpack ${VERSION}
GIT_REPOSITORY https://github.com/dmlc/dlpack.git
GIT_TAG v${VERSION}
GIT_SHALLOW TRUE
DOWNLOAD_ONLY TRUE
OPTIONS "BUILD_MOCK OFF"
)
if(DEFINED dlpack_SOURCE_DIR)
# otherwise find_package(DLPACK) will set this variable
set(DLPACK_INCLUDE_DIR
"${dlpack_SOURCE_DIR}/include"
PARENT_SCOPE
)
endif()
endfunction()
set(CUDF_MIN_VERSION_dlpack 0.5)
find_and_configure_dlpack(${CUDF_MIN_VERSION_dlpack})
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_nvcomp.cmake
|
# =============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds nvcomp and sets any additional necessary environment variables.
function(find_and_configure_nvcomp)
include(${rapids-cmake-dir}/cpm/nvcomp.cmake)
rapids_cpm_nvcomp(
BUILD_EXPORT_SET cudf-exports
INSTALL_EXPORT_SET cudf-exports
USE_PROPRIETARY_BINARY ${CUDF_USE_PROPRIETARY_NVCOMP}
)
# Per-thread default stream
if(TARGET nvcomp AND CUDF_USE_PER_THREAD_DEFAULT_STREAM)
target_compile_definitions(nvcomp PRIVATE CUDA_API_PER_THREAD_DEFAULT_STREAM)
endif()
endfunction()
find_and_configure_nvcomp()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_kvikio.cmake
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds KvikIO
function(find_and_configure_kvikio VERSION)
rapids_cpm_find(
KvikIO ${VERSION}
GLOBAL_TARGETS kvikio::kvikio
CPM_ARGS
GIT_REPOSITORY https://github.com/rapidsai/kvikio.git
GIT_TAG branch-${VERSION}
GIT_SHALLOW TRUE SOURCE_SUBDIR cpp
OPTIONS "KvikIO_BUILD_EXAMPLES OFF"
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD KvikIO "${KvikIO_BINARY_DIR}"
EXPORT_SET cudf-exports
CONDITION KvikIO_BINARY_DIR
)
endfunction()
set(KVIKIO_MIN_VERSION_cudf "${CUDF_VERSION_MAJOR}.${CUDF_VERSION_MINOR}")
find_and_configure_kvikio(${KVIKIO_MIN_VERSION_cudf})
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_libcudacxx.cmake
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds thrust and sets any additional necessary environment variables.
function(find_and_configure_libcudacxx)
# Make sure we install libcudacxx beside our patched version of thrust
include(GNUInstallDirs)
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/libcudf")
set(CMAKE_INSTALL_LIBDIR "${CMAKE_INSTALL_INCLUDEDIR}/lib")
include(${rapids-cmake-dir}/cpm/libcudacxx.cmake)
rapids_cpm_libcudacxx(BUILD_EXPORT_SET cudf-exports INSTALL_EXPORT_SET cudf-exports)
# Store where CMake can find our custom Thrust install
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
INSTALL libcudacxx
[=[${CMAKE_CURRENT_LIST_DIR}/../../../include/libcudf/lib/rapids/cmake/libcudacxx]=]
EXPORT_SET cudf-exports
CONDITION libcudacxx_SOURCE_DIR
)
endfunction()
find_and_configure_libcudacxx()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_gtest.cmake
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds gtest and sets any additional necessary environment variables.
function(find_and_configure_gtest)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
# Find or install GoogleTest
rapids_cpm_gtest(BUILD_EXPORT_SET cudf-testing-exports INSTALL_EXPORT_SET cudf-testing-exports)
if(GTest_ADDED)
rapids_export(
BUILD GTest
VERSION ${GTest_VERSION}
EXPORT_SET GTestTargets
GLOBAL_TARGETS gtest gmock gtest_main gmock_main
NAMESPACE GTest::
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD GTest [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-testing-exports
)
endif()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_arrow.cmake
|
# =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Finding arrow is far more complex than it should be, and as a result we violate multiple linting
# rules aiming to limit complexity. Since all our other CMake scripts conform to expectations
# without undue difficulty, disabling those rules for just this function is our best approach for
# now. The spacing between this comment, the cmake-lint directives, and the function docstring is
# necessary to prevent cmake-format from trying to combine the lines.
# cmake-lint: disable=R0912,R0913,R0915
include_guard(GLOBAL)
# Generate a FindArrow module for the case where we need to search for arrow within a pip install
# pyarrow.
function(find_libarrow_in_python_wheel PYARROW_VERSION)
string(REPLACE "." ";" PYARROW_VER_COMPONENTS "${PYARROW_VERSION}")
list(GET PYARROW_VER_COMPONENTS 0 PYARROW_SO_VER)
# The soname for Arrow libraries is constructed using the major version plus "00". Note that,
# although it may seem like it due to Arrow almost exclusively releasing new major versions (i.e.
# `${MINOR_VERSION}${PATCH_VERSION}` is almost always equivalent to "00"),
# the soname is not generated by concatenating the major, minor, and patch versions into a single
# version number soname, just `${MAJOR_VERSION}00`
set(PYARROW_LIB "libarrow.so.${PYARROW_SO_VER}00")
find_package(Python REQUIRED)
execute_process(
COMMAND "${Python_EXECUTABLE}" -c "import pyarrow; print(pyarrow.get_library_dirs()[0])"
OUTPUT_VARIABLE CUDF_PYARROW_WHEEL_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
list(APPEND CMAKE_PREFIX_PATH "${CUDF_PYARROW_WHEEL_DIR}")
rapids_find_generate_module(
Arrow NO_CONFIG
VERSION "${PYARROW_VERSION}"
LIBRARY_NAMES "${PYARROW_LIB}"
BUILD_EXPORT_SET cudf-exports
INSTALL_EXPORT_SET cudf-exports
HEADER_NAMES arrow/python/arrow_to_pandas.h
)
find_package(Arrow ${PYARROW_VERSION} MODULE REQUIRED GLOBAL)
add_library(arrow_shared ALIAS Arrow::Arrow)
# When using the libarrow inside a wheel, whether or not libcudf may be built using the new C++11
# ABI is dependent on whether the libarrow inside the wheel was compiled using that ABI because we
# need the arrow library that we bundle in cudf to be ABI-compatible with the one inside pyarrow.
# We determine what options to use by checking the glibc version on the current system, which is
# also how pip determines which manylinux-versioned pyarrow wheel to install. Note that tests will
# not build successfully without also propagating these options to builds of GTest. Similarly,
# benchmarks will not work without updating GBench (and possibly NVBench) builds. We are currently
# ignoring these limitations since we don't anticipate using this feature except for building
# wheels.
EXECUTE_PROCESS(
COMMAND ${CMAKE_C_COMPILER} -print-file-name=libc.so.6
OUTPUT_VARIABLE GLIBC_EXECUTABLE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
EXECUTE_PROCESS(
COMMAND ${GLIBC_EXECUTABLE}
OUTPUT_VARIABLE GLIBC_OUTPUT
OUTPUT_STRIP_TRAILING_WHITESPACE
)
STRING(REGEX MATCH "stable release version ([0-9]+\\.[0-9]+)" GLIBC_VERSION ${GLIBC_OUTPUT})
STRING(REPLACE "stable release version " "" GLIBC_VERSION ${GLIBC_VERSION})
STRING(REPLACE "." ";" GLIBC_VERSION_LIST ${GLIBC_VERSION})
LIST(GET GLIBC_VERSION_LIST 1 GLIBC_VERSION_MINOR)
if(GLIBC_VERSION_MINOR LESS 28)
target_compile_options(
Arrow::Arrow INTERFACE "$<$<COMPILE_LANGUAGE:CXX>:-D_GLIBCXX_USE_CXX11_ABI=0>"
"$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-D_GLIBCXX_USE_CXX11_ABI=0>"
)
endif()
rapids_export_package(BUILD Arrow cudf-exports)
rapids_export_package(INSTALL Arrow cudf-exports)
list(POP_BACK CMAKE_PREFIX_PATH)
endfunction()
# This function finds arrow and sets any additional necessary environment variables.
function(find_and_configure_arrow VERSION BUILD_STATIC ENABLE_S3 ENABLE_ORC ENABLE_PYTHON
ENABLE_PARQUET
)
if(USE_LIBARROW_FROM_PYARROW)
# Generate a FindArrow.cmake to find pyarrow's libarrow.so
find_libarrow_in_python_wheel(${VERSION})
set(ARROW_FOUND
TRUE
PARENT_SCOPE
)
set(ARROW_LIBRARIES
arrow_shared
PARENT_SCOPE
)
return()
endif()
if(BUILD_STATIC)
if(TARGET arrow_static)
set(ARROW_FOUND
TRUE
PARENT_SCOPE
)
set(ARROW_LIBRARIES
arrow_static
PARENT_SCOPE
)
return()
endif()
else()
if(TARGET arrow_shared)
set(ARROW_FOUND
TRUE
PARENT_SCOPE
)
set(ARROW_LIBRARIES
arrow_shared
PARENT_SCOPE
)
return()
endif()
endif()
if(NOT ARROW_ARMV8_ARCH)
set(ARROW_ARMV8_ARCH "armv8-a")
endif()
if(NOT ARROW_SIMD_LEVEL)
set(ARROW_SIMD_LEVEL "NONE")
endif()
if(BUILD_STATIC)
set(ARROW_BUILD_STATIC ON)
set(ARROW_BUILD_SHARED OFF)
# Turn off CPM using `find_package` so we always download and make sure we get proper static
# library.
set(CPM_DOWNLOAD_Arrow TRUE)
# By default ARROW will try to search for a static version of OpenSSL which is a bad idea given
# that shared linking is advised for critical components like SSL. If a static build is
# requested, we honor ARROW's default of static linking, but users may consider setting
# ARROW_OPENSSL_USE_SHARED even in static builds.
else()
set(ARROW_BUILD_SHARED ON)
set(ARROW_BUILD_STATIC OFF)
# By default ARROW will try to search for a static version of OpenSSL which is a bad idea given
# that shared linking is advised for critical components like SSL
set(ARROW_OPENSSL_USE_SHARED ON)
endif()
set(ARROW_PYTHON_OPTIONS "")
if(ENABLE_PYTHON)
list(APPEND ARROW_PYTHON_OPTIONS "ARROW_PYTHON ON")
# Arrow's logic to build Boost from source is busted, so we have to get it from the system.
list(APPEND ARROW_PYTHON_OPTIONS "BOOST_SOURCE SYSTEM")
list(APPEND ARROW_PYTHON_OPTIONS "ARROW_DEPENDENCY_SOURCE AUTO")
endif()
set(ARROW_PARQUET_OPTIONS "")
if(ENABLE_PARQUET)
# Arrow's logic to build Boost from source is busted, so we have to get it from the system.
list(APPEND ARROW_PARQUET_OPTIONS "BOOST_SOURCE SYSTEM")
list(APPEND ARROW_PARQUET_OPTIONS "Thrift_SOURCE BUNDLED")
list(APPEND ARROW_PARQUET_OPTIONS "ARROW_DEPENDENCY_SOURCE AUTO")
endif()
rapids_cpm_find(
Arrow ${VERSION}
GLOBAL_TARGETS arrow_shared parquet_shared arrow_acero_shared arrow_dataset_shared arrow_static
parquet_static arrow_acero_static arrow_dataset_static
CPM_ARGS
GIT_REPOSITORY https://github.com/apache/arrow.git
GIT_TAG apache-arrow-${VERSION}
GIT_SHALLOW TRUE SOURCE_SUBDIR cpp
OPTIONS "CMAKE_VERBOSE_MAKEFILE ON"
"ARROW_ACERO ON"
"ARROW_IPC ON"
"ARROW_DATASET ON"
"ARROW_WITH_BACKTRACE ON"
"ARROW_CXXFLAGS -w"
"ARROW_JEMALLOC OFF"
"ARROW_S3 ${ENABLE_S3}"
"ARROW_ORC ${ENABLE_ORC}"
# e.g. needed by blazingsql-io
${ARROW_PARQUET_OPTIONS}
"ARROW_PARQUET ${ENABLE_PARQUET}"
"ARROW_FILESYSTEM ON"
${ARROW_PYTHON_OPTIONS}
# Arrow modifies CMake's GLOBAL RULE_LAUNCH_COMPILE unless this is off
"ARROW_USE_CCACHE OFF"
"ARROW_ARMV8_ARCH ${ARROW_ARMV8_ARCH}"
"ARROW_SIMD_LEVEL ${ARROW_SIMD_LEVEL}"
"ARROW_BUILD_STATIC ${ARROW_BUILD_STATIC}"
"ARROW_BUILD_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_POSITION_INDEPENDENT_CODE ON"
"ARROW_DEPENDENCY_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_BOOST_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_BROTLI_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_GFLAGS_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_GRPC_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_PROTOBUF_USE_SHARED ${ARROW_BUILD_SHARED}"
"ARROW_ZSTD_USE_SHARED ${ARROW_BUILD_SHARED}"
"xsimd_SOURCE AUTO"
)
set(ARROW_FOUND
TRUE
PARENT_SCOPE
)
if(BUILD_STATIC)
set(ARROW_LIBRARIES arrow_static)
else()
set(ARROW_LIBRARIES arrow_shared)
endif()
# Arrow_DIR: set if CPM found Arrow on the system/conda/etc.
if(Arrow_DIR)
# This extra find_package is necessary because rapids_cpm_find does not propagate all the
# variables from find_package that we might need. This is especially problematic when
# rapids_cpm_find builds from source.
find_package(Arrow REQUIRED QUIET)
if(ENABLE_PARQUET)
# Setting Parquet_DIR is conditional because parquet may be installed independently of arrow.
if(NOT Parquet_DIR)
# Set this to enable `find_package(Parquet)`
set(Parquet_DIR "${Arrow_DIR}")
endif()
# Set this to enable `find_package(ArrowDataset)`. This will call find_package(ArrowAcero) for
# us
set(ArrowDataset_DIR "${Arrow_DIR}")
find_package(ArrowDataset REQUIRED QUIET)
endif()
# Arrow_ADDED: set if CPM downloaded Arrow from Github
elseif(Arrow_ADDED)
# Copy these files so we can avoid adding paths in Arrow_BINARY_DIR to
# target_include_directories. That defeats ccache.
file(INSTALL "${Arrow_BINARY_DIR}/src/arrow/util/config.h"
DESTINATION "${Arrow_SOURCE_DIR}/cpp/src/arrow/util"
)
if(ENABLE_PARQUET)
file(INSTALL "${Arrow_BINARY_DIR}/src/parquet/parquet_version.h"
DESTINATION "${Arrow_SOURCE_DIR}/cpp/src/parquet"
)
endif()
# Arrow populates INTERFACE_INCLUDE_DIRECTORIES for the `arrow_static` and `arrow_shared`
# targets in FindArrow, so for static source-builds, we have to do it after-the-fact.
#
# This only works because we know exactly which components we're using. Don't forget to update
# this list if we add more!
#
foreach(ARROW_LIBRARY ${ARROW_LIBRARIES})
target_include_directories(
${ARROW_LIBRARY}
INTERFACE "$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/src>"
"$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/src/generated>"
"$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/thirdparty/hadoop/include>"
"$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/thirdparty/flatbuffers/include>"
)
endforeach()
else()
set(ARROW_FOUND
FALSE
PARENT_SCOPE
)
message(FATAL_ERROR "CUDF: Arrow library not found or downloaded.")
endif()
if(Arrow_ADDED)
set(arrow_code_string
[=[
if (TARGET cudf::arrow_shared AND (NOT TARGET arrow_shared))
add_library(arrow_shared ALIAS cudf::arrow_shared)
endif()
if (TARGET cudf::arrow_static AND (NOT TARGET arrow_static))
add_library(arrow_static ALIAS cudf::arrow_static)
endif()
if (NOT TARGET arrow::flatbuffers)
add_library(arrow::flatbuffers INTERFACE IMPORTED)
endif()
if (NOT TARGET arrow::hadoop)
add_library(arrow::hadoop INTERFACE IMPORTED)
endif()
]=]
)
if(ENABLE_PARQUET)
string(
APPEND
arrow_code_string
"
find_package(Boost)
if (NOT TARGET Boost::headers)
add_library(Boost::headers INTERFACE IMPORTED)
endif()
"
)
endif()
if(NOT TARGET xsimd)
string(
APPEND
arrow_code_string
"
if(NOT TARGET arrow::xsimd)
add_library(arrow::xsimd INTERFACE IMPORTED)
target_include_directories(arrow::xsimd INTERFACE \"${Arrow_BINARY_DIR}/xsimd_ep/src/xsimd_ep-install/include\")
endif()
"
)
endif()
rapids_export(
BUILD Arrow
VERSION ${VERSION}
EXPORT_SET arrow_targets
GLOBAL_TARGETS arrow_shared arrow_static
NAMESPACE cudf::
FINAL_CODE_BLOCK arrow_code_string
)
if(ENABLE_PARQUET)
set(arrow_acero_code_string
[=[
if (TARGET cudf::arrow_acero_shared AND (NOT TARGET arrow_acero_shared))
add_library(arrow_acero_shared ALIAS cudf::arrow_acero_shared)
endif()
if (TARGET cudf::arrow_acero_static AND (NOT TARGET arrow_acero_static))
add_library(arrow_acero_static ALIAS cudf::arrow_acero_static)
endif()
]=]
)
rapids_export(
BUILD ArrowAcero
VERSION ${VERSION}
EXPORT_SET arrow_acero_targets
GLOBAL_TARGETS arrow_acero_shared arrow_acero_static
NAMESPACE cudf::
FINAL_CODE_BLOCK arrow_acero_code_string
)
set(arrow_dataset_code_string
[=[
if (TARGET cudf::arrow_dataset_shared AND (NOT TARGET arrow_dataset_shared))
add_library(arrow_dataset_shared ALIAS cudf::arrow_dataset_shared)
endif()
if (TARGET cudf::arrow_dataset_static AND (NOT TARGET arrow_dataset_static))
add_library(arrow_dataset_static ALIAS cudf::arrow_dataset_static)
endif()
]=]
)
rapids_export(
BUILD ArrowDataset
VERSION ${VERSION}
EXPORT_SET arrow_dataset_targets
GLOBAL_TARGETS arrow_dataset_shared arrow_dataset_static
NAMESPACE cudf::
FINAL_CODE_BLOCK arrow_dataset_code_string
)
set(parquet_code_string
[=[
if (TARGET cudf::parquet_shared AND (NOT TARGET parquet_shared))
add_library(parquet_shared ALIAS cudf::parquet_shared)
endif()
if (TARGET cudf::parquet_static AND (NOT TARGET parquet_static))
add_library(parquet_static ALIAS cudf::parquet_static)
endif()
]=]
)
rapids_export(
BUILD Parquet
VERSION ${VERSION}
EXPORT_SET parquet_targets
GLOBAL_TARGETS parquet_shared parquet_static
NAMESPACE cudf::
FINAL_CODE_BLOCK parquet_code_string
)
endif()
endif()
# We generate the arrow-configfiles when we built arrow locally, so always do `find_dependency`
rapids_export_package(BUILD Arrow cudf-exports)
rapids_export_package(INSTALL Arrow cudf-exports)
if(ENABLE_PARQUET)
rapids_export_package(BUILD Parquet cudf-exports)
rapids_export_package(BUILD ArrowDataset cudf-exports)
endif()
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD Arrow [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET cudf-exports
)
rapids_export_find_package_root(
BUILD Parquet [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET cudf-exports
CONDITION ENABLE_PARQUET
)
rapids_export_find_package_root(
BUILD ArrowDataset [=[${CMAKE_CURRENT_LIST_DIR}]=]
EXPORT_SET cudf-exports
CONDITION ENABLE_PARQUET
)
set(ARROW_LIBRARIES
"${ARROW_LIBRARIES}"
PARENT_SCOPE
)
endfunction()
if(NOT DEFINED CUDF_VERSION_Arrow)
set(CUDF_VERSION_Arrow
# This version must be kept in sync with the libarrow version pinned for builds in
# dependencies.yaml.
14.0.1
CACHE STRING "The version of Arrow to find (or build)"
)
endif()
find_and_configure_arrow(
${CUDF_VERSION_Arrow} ${CUDF_USE_ARROW_STATIC} ${CUDF_ENABLE_ARROW_S3} ${CUDF_ENABLE_ARROW_ORC}
${CUDF_ENABLE_ARROW_PYTHON} ${CUDF_ENABLE_ARROW_PARQUET}
)
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_jitify.cmake
|
# =============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Jitify doesn't have a version :/
# This function finds Jitify and sets any additional necessary environment variables.
function(find_and_configure_jitify)
rapids_cpm_find(
jitify 2.0.0
GIT_REPOSITORY https://github.com/rapidsai/jitify.git
GIT_TAG jitify2
GIT_SHALLOW TRUE
DOWNLOAD_ONLY TRUE
)
set(JITIFY_INCLUDE_DIR
"${jitify_SOURCE_DIR}"
PARENT_SCOPE
)
endfunction()
find_and_configure_jitify()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_rmm.cmake
|
# =============================================================================
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds rmm and sets any additional necessary environment variables.
function(find_and_configure_rmm)
include(${rapids-cmake-dir}/cpm/rmm.cmake)
# Find or install RMM
rapids_cpm_rmm(BUILD_EXPORT_SET cudf-exports INSTALL_EXPORT_SET cudf-exports)
endfunction()
find_and_configure_rmm()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_fmt.cmake
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to find or clone fmt
function(find_and_configure_fmt)
include(${rapids-cmake-dir}/cpm/fmt.cmake)
rapids_cpm_fmt(INSTALL_EXPORT_SET cudf-exports BUILD_EXPORT_SET cudf-exports)
endfunction()
find_and_configure_fmt()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/get_nvbench.cmake
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds nvbench and applies any needed patches.
function(find_and_configure_nvbench)
include(${rapids-cmake-dir}/cpm/nvbench.cmake)
include(${rapids-cmake-dir}/cpm/package_override.cmake)
set(cudf_patch_dir "${CMAKE_CURRENT_FUNCTION_LIST_DIR}/patches")
rapids_cpm_package_override("${cudf_patch_dir}/nvbench_override.json")
rapids_cpm_nvbench(BUILD_STATIC)
endfunction()
find_and_configure_nvbench()
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/thrust_transform_iter_with_reduce_by_key.diff
|
diff --git a/thrust/iterator/transform_input_output_iterator.h b/thrust/iterator/transform_input_output_iterator.h
index f512a36..a5f725d 100644
--- a/thrust/iterator/transform_input_output_iterator.h
+++ b/thrust/iterator/transform_input_output_iterator.h
@@ -102,6 +102,8 @@ template <typename InputFunction, typename OutputFunction, typename Iterator>
/*! \endcond
*/
+ transform_input_output_iterator() = default;
+
/*! This constructor takes as argument a \c Iterator an \c InputFunction and an
* \c OutputFunction and copies them to a new \p transform_input_output_iterator
*
diff --git a/thrust/iterator/transform_output_iterator.h b/thrust/iterator/transform_output_iterator.h
index 66fb46a..4a68cb5 100644
--- a/thrust/iterator/transform_output_iterator.h
+++ b/thrust/iterator/transform_output_iterator.h
@@ -104,6 +104,8 @@ template <typename UnaryFunction, typename OutputIterator>
/*! \endcond
*/
+ transform_output_iterator() = default;
+
/*! This constructor takes as argument an \c OutputIterator and an \c
* UnaryFunction and copies them to a new \p transform_output_iterator
*
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/thrust_faster_scan_compile_times.diff
|
diff --git a/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh b/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh
index b188c75f..3f36656f 100644
--- a/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh
+++ b/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh
@@ -736,7 +736,7 @@ struct DeviceRadixSortPolicy
/// SM60 (GP100)
- struct Policy600 : ChainedPolicy<600, Policy600, Policy500>
+ struct Policy600 : ChainedPolicy<600, Policy600, Policy600>
{
enum {
PRIMARY_RADIX_BITS = (sizeof(KeyT) > 1) ? 7 : 5, // 6.9B 32b keys/s (Quadro P100)
diff --git a/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh b/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh
index e0470ccb..6a0c2ed6 100644
--- a/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh
+++ b/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh
@@ -280,7 +280,7 @@ struct DeviceReducePolicy
};
/// SM60
- struct Policy600 : ChainedPolicy<600, Policy600, Policy350>
+ struct Policy600 : ChainedPolicy<600, Policy600, Policy600>
{
// ReducePolicy (P100: 591 GB/s @ 64M 4B items; 583 GB/s @ 256M 1B items)
typedef AgentReducePolicy<
diff --git a/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh b/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh
index c2d04588..ac2d10e0 100644
--- a/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh
+++ b/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh
@@ -177,7 +177,7 @@ struct DeviceScanPolicy
};
/// SM600
- struct Policy600 : ChainedPolicy<600, Policy600, Policy520>
+ struct Policy600 : ChainedPolicy<600, Policy600, Policy600>
{
typedef AgentScanPolicy<
128, 15, ///< Threads per block, items per thread
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/thrust_override.json
|
{
"packages" : {
"Thrust" : {
"patches" : [
{
"file" : "Thrust/install_rules.diff",
"issue" : "Thrust 1.X installs incorrect files [https://github.com/NVIDIA/thrust/issues/1790]",
"fixed_in" : "2.0.0"
},
{
"file" : "${current_json_dir}/thrust_transform_iter_with_reduce_by_key.diff",
"issue" : "Support transform_output_iterator as output of reduce by key [https://github.com/NVIDIA/thrust/pull/1805]",
"fixed_in" : "2.1"
},
{
"file" : "${current_json_dir}/thrust_disable_64bit_dispatching.diff",
"issue" : "Remove 64bit dispatching as not needed by libcudf and results in compiling twice as many kernels [https://github.com/rapidsai/cudf/pull/11437]",
"fixed_in" : ""
},
{
"file" : "${current_json_dir}/thrust_faster_sort_compile_times.diff",
"issue" : "Improve Thrust sort compile times by not unrolling loops for inlined comparators [https://github.com/rapidsai/cudf/pull/10577]",
"fixed_in" : ""
},
{
"file" : "${current_json_dir}/thrust_faster_scan_compile_times.diff",
"issue" : "Improve Thrust scan compile times by reducing the number of kernels generated [https://github.com/rapidsai/cudf/pull/8183]",
"fixed_in" : ""
},
{
"file" : "${current_json_dir}/cub_segmented_sort_with_bool_key.diff",
"issue" : "Fix an error in CUB DeviceSegmentedSort when the keys are bool type [https://github.com/NVIDIA/cub/issues/594]",
"fixed_in" : "2.1"
}
]
}
}
}
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/thrust_faster_sort_compile_times.diff
|
diff --git a/dependencies/cub/cub/block/block_merge_sort.cuh b/dependencies/cub/cub/block/block_merge_sort.cuh
index 4769df36..d86d6342 100644
--- a/dependencies/cub/cub/block/block_merge_sort.cuh
+++ b/dependencies/cub/cub/block/block_merge_sort.cuh
@@ -91,7 +91,7 @@ __device__ __forceinline__ void SerialMerge(KeyT *keys_shared,
KeyT key1 = keys_shared[keys1_beg];
KeyT key2 = keys_shared[keys2_beg];
-#pragma unroll
+#pragma unroll 1
for (int item = 0; item < ITEMS_PER_THREAD; ++item)
{
bool p = (keys2_beg < keys2_end) &&
@@ -383,7 +383,7 @@ public:
//
KeyT max_key = oob_default;
- #pragma unroll
+ #pragma unroll 1
for (int item = 1; item < ITEMS_PER_THREAD; ++item)
{
if (ITEMS_PER_THREAD * linear_tid + item < valid_items)
@@ -407,7 +407,7 @@ public:
// each thread has sorted keys
// merge sort keys in shared memory
//
- #pragma unroll
+ #pragma unroll 1
for (int target_merged_threads_number = 2;
target_merged_threads_number <= NUM_THREADS;
target_merged_threads_number *= 2)
diff --git a/dependencies/cub/cub/thread/thread_sort.cuh b/dependencies/cub/cub/thread/thread_sort.cuh
index 5d486789..b42fb5f0 100644
--- a/dependencies/cub/cub/thread/thread_sort.cuh
+++ b/dependencies/cub/cub/thread/thread_sort.cuh
@@ -83,10 +83,10 @@ StableOddEvenSort(KeyT (&keys)[ITEMS_PER_THREAD],
{
constexpr bool KEYS_ONLY = std::is_same<ValueT, NullType>::value;
- #pragma unroll
+ #pragma unroll 1
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
{
- #pragma unroll
+ #pragma unroll 1
for (int j = 1 & i; j < ITEMS_PER_THREAD - 1; j += 2)
{
if (compare_op(keys[j + 1], keys[j]))
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/nvbench_global_setup.diff
|
diff --git a/nvbench/main.cuh b/nvbench/main.cuh
index 0ba82d7..cca5273 100644
--- a/nvbench/main.cuh
+++ b/nvbench/main.cuh
@@ -54,6 +54,16 @@
// clang-format on
#endif
+#ifndef NVBENCH_ENVIRONMENT
+namespace nvbench {
+struct no_environment
+{
+ no_environment(int, char const *const *) {}
+};
+}
+#define NVBENCH_ENVIRONMENT nvbench::no_environment
+#endif
+
#define NVBENCH_MAIN_PARSE(argc, argv) \
nvbench::option_parser parser; \
parser.parse(argc, argv)
@@ -77,6 +87,7 @@
printer.set_total_state_count(total_states); \
\
printer.set_completed_state_count(0); \
+ [[maybe_unused]] auto env_state = NVBENCH_ENVIRONMENT(argc, argv); \
for (auto &bench_ptr : benchmarks) \
{ \
bench_ptr->set_printer(printer); \
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/thrust_disable_64bit_dispatching.diff
|
diff --git a/thrust/system/cuda/detail/dispatch.h b/thrust/system/cuda/detail/dispatch.h
index d0e3f94..76774b0 100644
--- a/thrust/system/cuda/detail/dispatch.h
+++ b/thrust/system/cuda/detail/dispatch.h
@@ -32,9 +32,8 @@
status = call arguments; \
} \
else { \
- auto THRUST_PP_CAT2(count, _fixed) = static_cast<thrust::detail::int64_t>(count); \
- status = call arguments; \
- }
+ throw std::runtime_error("THRUST_INDEX_TYPE_DISPATCH 64-bit count is unsupported in libcudf"); \
+ }
/**
* Dispatch between 32-bit and 64-bit index based versions of the same algorithm
@@ -52,10 +51,8 @@
status = call arguments; \
} \
else { \
- auto THRUST_PP_CAT2(count1, _fixed) = static_cast<thrust::detail::int64_t>(count1); \
- auto THRUST_PP_CAT2(count2, _fixed) = static_cast<thrust::detail::int64_t>(count2); \
- status = call arguments; \
- }
+ throw std::runtime_error("THRUST_DOUBLE_INDEX_TYPE_DISPATCH 64-bit count is unsupported in libcudf"); \
+ }
/**
* Dispatch between 32-bit and 64-bit index based versions of the same algorithm
* implementation. This version allows using different token sequences for callables
| 0 |
rapidsai_public_repos/cudf/cpp/cmake/thirdparty
|
rapidsai_public_repos/cudf/cpp/cmake/thirdparty/patches/nvbench_override.json
|
{
"packages" : {
"nvbench" : {
"patches" : [
{
"file" : "${current_json_dir}/nvbench_global_setup.diff",
"issue" : "Fix add support for global setup to initialize RMM in nvbench [https://github.com/NVIDIA/nvbench/pull/123]",
"fixed_in" : ""
},
{
"file" : "nvbench/nvml_with_static_builds.diff",
"issue" : "Add support for nvml with static nvbench [https://github.com/NVIDIA/nvbench/pull/148]",
"fixed_in" : ""
}
]
}
}
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.