repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/legate_kvikio.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <array>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iostream>
#include <utility>
#include "legate_mapping.hpp"
#include "task_opcodes.hpp"
#include <kvikio/file_handle.hpp>
namespace legate_kvikio {
/**
* @brief Functor converting Legate type code to size
*/
struct elem_size_fn {
template <legate::Type::Code DTYPE>
size_t operator()()
{
return sizeof(legate::legate_type_of<DTYPE>);
}
};
/**
* @brief Get the size of a Legate type code
*
* @param code Legate type code
* @return The number of bytes
*/
size_t sizeof_legate_type_code(legate::Type::Code code)
{
return legate::type_dispatch(code, elem_size_fn{});
}
/**
* @brief Get store argument from task context
*
* @tparam IsOutputArgument Whether it is an output or an input argument
* @param context Legate task context.
* @param i The argument index
* @return The i'th argument store argument
*/
template <bool IsOutputArgument>
legate::Store& get_store_arg(legate::TaskContext& context, int i)
{
if constexpr (IsOutputArgument) { return context.outputs()[i]; }
return context.inputs()[i];
}
/**
* @brief Read or write Legate store to or from disk using KvikIO
*
* @tparam IsReadOperation Whether the operation is a read or a write operation
* @param context Legate task context.
*/
template <bool IsReadOperation>
void read_write_store(legate::TaskContext& context)
{
std::string path = context.scalars()[0].value<std::string>();
legate::Store& store = get_store_arg<IsReadOperation>(context, 0);
auto shape = store.shape<1>();
size_t itemsize = sizeof_legate_type_code(store.code());
if (shape.volume() == 0) { return; }
size_t nbytes = shape.volume() * itemsize;
size_t offset = shape.lo.x * itemsize; // Offset in bytes
std::array<size_t, 1> strides{};
// We know that the accessor is contiguous because we set `policy.exact = true`
// in `Mapper::store_mappings()`.
// TODO: support of non-contigues stores
if constexpr (IsReadOperation) {
kvikio::FileHandle f(path, "r");
auto* data = store.write_accessor<char, 1>().ptr(shape, strides.data());
assert(strides[0] == itemsize);
f.pread(data, nbytes, offset).get();
} else {
kvikio::FileHandle f(path, "r+");
const auto* data = store.read_accessor<char, 1>().ptr(shape, strides.data());
assert(strides[0] == itemsize);
f.pwrite(data, nbytes, offset).get();
}
}
/**
* @brief Write a Legate store to disk using KvikIO
* Task signature:
* - scalars:
* - path: std::string
* - inputs:
* - buffer: 1d store (any dtype)
* NB: the file must exist before running this task because in order to support
* access from multiple processes, this task opens the file in "r+" mode.
*/
class WriteTask : public Task<WriteTask, TaskOpCode::OP_WRITE> {
public:
static void cpu_variant(legate::TaskContext& context) { read_write_store<false>(context); }
static void gpu_variant(legate::TaskContext& context) { read_write_store<false>(context); }
};
/**
* @brief Read a Legate store from disk using KvikIO
* Task signature:
* - scalars:
* - path: std::string
* - outputs:
* - buffer: 1d store (any dtype)
*/
class ReadTask : public Task<ReadTask, TaskOpCode::OP_READ> {
public:
static void cpu_variant(legate::TaskContext& context) { read_write_store<true>(context); }
static void gpu_variant(legate::TaskContext& context) { read_write_store<true>(context); }
};
} // namespace legate_kvikio
namespace // unnamed
{
static void __attribute__((constructor)) register_tasks(void)
{
legate_kvikio::WriteTask::register_variants();
legate_kvikio::ReadTask::register_variants();
}
} // namespace
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/tile_io.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <array>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <filesystem>
#include <iostream>
#include <stdexcept>
#include <utility>
#include "legate_mapping.hpp"
#include "task_opcodes.hpp"
#include <kvikio/file_handle.hpp>
namespace {
/**
* @brief Get the tile coordinate based on a task index
*
* @param task_index Task index
* @param tile_start The start tile coordinate
* @return Tile coordinate
*/
legate::DomainPoint get_tile_coord(legate::DomainPoint task_index,
legate::Span<const uint64_t>& tile_start)
{
for (uint32_t i = 0; i < task_index.dim; ++i) {
task_index[i] += tile_start[i];
}
return task_index;
}
/**
* @brief Get the file path of a tile
*
* @param dirpath The path to the root directory of the Zarr file
* @param tile_coord The coordinate of the tile
* @param delimiter The delimiter
* @return Path to the file representing the requested tile
*/
std::filesystem::path get_file_path(const std::string& dirpath,
const legate::DomainPoint& tile_coord,
const std::string& delimiter = ".")
{
std::stringstream ss;
for (int32_t idx = 0; idx < tile_coord.dim; ++idx) {
if (idx != 0) { ss << delimiter; }
ss << tile_coord[idx];
}
return std::filesystem::path(dirpath) / ss.str();
}
/**
* @brief Functor for tiling read or write Legate store to or from disk using KvikIO
*
* @tparam IsReadOperation Whether the operation is a read or a write operation
* @param context The Legate task context
* @param store The Legate store to read or write
*/
template <bool IsReadOperation>
struct tile_read_write_fn {
template <legate::Type::Code CODE, int32_t DIM>
void operator()(legate::TaskContext& context, legate::Store& store)
{
using DTYPE = legate::legate_type_of<CODE>;
const auto task_index = context.get_task_index();
const std::string path = context.scalars().at(0).value<std::string>();
legate::Span<const uint64_t> tile_shape = context.scalars().at(1).values<uint64_t>();
legate::Span<const uint64_t> tile_start = context.scalars().at(2).values<uint64_t>();
const auto tile_coord = get_tile_coord(task_index, tile_start);
const auto filepath = get_file_path(path, tile_coord);
auto shape = store.shape<DIM>();
auto shape_volume = shape.volume();
if (shape_volume == 0) { return; }
size_t nbytes = shape_volume * sizeof(DTYPE);
// We know that the accessor is contiguous because we set `policy.exact = true`
// in `Mapper::store_mappings()`.
if constexpr (IsReadOperation) {
kvikio::FileHandle f(filepath, "r");
auto* data = store.write_accessor<DTYPE, DIM>().ptr(shape);
f.pread(data, nbytes).get();
} else {
kvikio::FileHandle f(filepath, "w");
const auto* data = store.read_accessor<DTYPE, DIM>().ptr(shape);
f.pwrite(data, nbytes).get();
}
}
};
/**
* @brief Flatten the domain point to a 1D point
*
* @param lo_dp Lower point
* @param hi_dp High point
* @param point_dp The domain point to flatten
* @return The flatten domain point
*/
template <int32_t DIM>
size_t linearize(const legate::DomainPoint& lo_dp,
const legate::DomainPoint& hi_dp,
const legate::DomainPoint& point_dp)
{
const legate::Point<DIM> lo = lo_dp;
const legate::Point<DIM> hi = hi_dp;
const legate::Point<DIM> point = point_dp - lo_dp;
const legate::Point<DIM> extents = hi - lo + legate::Point<DIM>::ONES();
size_t idx = 0;
for (int32_t dim = 0; dim < DIM; ++dim) {
idx = idx * extents[dim] + point[dim];
}
return idx;
}
/**
* @brief Functor for tiling read Legate store by offsets from disk using KvikIO
*
* @param context The Legate task context
* @param store The Legate output store
*/
struct tile_read_by_offsets_fn {
template <legate::Type::Code CODE, int32_t DIM>
void operator()(legate::TaskContext& context, legate::Store& store)
{
using DTYPE = legate::legate_type_of<CODE>;
const auto task_index = context.get_task_index();
const auto launch_domain = context.get_launch_domain();
const std::string path = context.scalars().at(0).value<std::string>();
legate::Span<const uint64_t> offsets = context.scalars().at(1).values<uint64_t>();
legate::Span<const uint64_t> tile_shape = context.scalars().at(2).values<uint64_t>();
// Flatten task index
uint32_t flatten_task_index =
linearize<DIM>(launch_domain.lo(), launch_domain.hi(), task_index);
auto shape = store.shape<DIM>();
auto shape_volume = shape.volume();
if (shape_volume == 0) { return; }
size_t nbytes = shape_volume * sizeof(DTYPE);
// We know that the accessor is contiguous because we set `policy.exact = true`
// in `Mapper::store_mappings()`.
kvikio::FileHandle f(path, "r");
auto* data = store.write_accessor<DTYPE, DIM>().ptr(shape);
f.pread(data, nbytes, offsets[flatten_task_index]).get();
}
};
} // namespace
namespace legate_kvikio {
/**
* @brief Write a tiled Legate store to disk using KvikIO
* Task signature:
* - scalars:
* - path: std::string
* - tile_shape: tuple of int64_t
* - tile_start: tuple of int64_t
* - inputs:
* - buffer: store (any dtype)
*
* NB: the store must be contigues. To make Legate in force this,
* set `policy.exact = true` in `Mapper::store_mappings()`.
*
*/
class TileWriteTask : public Task<TileWriteTask, TaskOpCode::OP_TILE_WRITE> {
public:
static void cpu_variant(legate::TaskContext& context)
{
legate::Store& store = context.inputs().at(0);
legate::double_dispatch(store.dim(), store.code(), tile_read_write_fn<false>{}, context, store);
}
static void gpu_variant(legate::TaskContext& context)
{
// Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant.
cpu_variant(context);
}
};
/**
* @brief Read a tiled Legate store to disk using KvikIO
* Task signature:
* - scalars:
* - path: std::string
* - tile_shape: tuple of int64_t
* - tile_start: tuple of int64_t
* - outputs:
* - buffer: store (any dtype)
*
* NB: the store must be contigues. To make Legate in force this,
* set `policy.exact = true` in `Mapper::store_mappings()`.
*
*/
class TileReadTask : public Task<TileReadTask, TaskOpCode::OP_TILE_READ> {
public:
static void cpu_variant(legate::TaskContext& context)
{
legate::Store& store = context.outputs().at(0);
legate::double_dispatch(store.dim(), store.code(), tile_read_write_fn<true>{}, context, store);
}
static void gpu_variant(legate::TaskContext& context)
{
// Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant.
cpu_variant(context);
}
};
/**
* @brief Read a tiled Legate store by offset to disk using KvikIO
* Task signature:
* - scalars:
* - path: std::string
* - offsets: tuple of int64_t
* - tile_shape: tuple of int64_t
* - outputs:
* - buffer: store (any dtype)
*
* NB: the store must be contigues. To make Legate in force this,
* set `policy.exact = true` in `Mapper::store_mappings()`.
*
*/
class TileReadByOffsetsTask
: public Task<TileReadByOffsetsTask, TaskOpCode::OP_TILE_READ_BY_OFFSETS> {
public:
static void cpu_variant(legate::TaskContext& context)
{
legate::Store& store = context.outputs().at(0);
legate::double_dispatch(store.dim(), store.code(), tile_read_by_offsets_fn{}, context, store);
}
static void gpu_variant(legate::TaskContext& context)
{
// Since KvikIO supports both GPU and CPU memory seamlessly, we reuse the CPU variant.
cpu_variant(context);
}
};
} // namespace legate_kvikio
namespace {
void __attribute__((constructor)) register_tasks()
{
legate_kvikio::TileWriteTask::register_variants();
legate_kvikio::TileReadTask::register_variants();
legate_kvikio::TileReadByOffsetsTask::register_variants();
}
} // namespace
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/cufile.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
import pathlib
from typing import Any
import legate.core.types as types
from .library_description import TaskOpCode, context
from .utils import get_legate_store
class CuFile:
"""File handle for GPUDirect Storage (GDS)"""
def __init__(self, file: pathlib.Path | str, flags: str = "r"):
"""Open file for GDS IO operations
The file is opened in this constructor immediately and not in a
Legate task. This means that re-opening a file that was created
by a not-yet-executed Legate task requires a blocking fence like
`get_legate_runtime().issue_execution_fence(block=True)`.
Legate-KvikIO doesn't maintain a file descriptor thus the file path
to the file must not change while opened by this handle.
Parameters
----------
file: pathlib.Path or str
Path-like object giving the pathname (absolute or relative to the
current working directory) of the file to be opened and registered.
flags: str, optional
"r" -> "open for reading (default)"
"w" -> "open for writing, truncating the file first"
"+" -> "open for updating (reading and writing)"
"""
assert "a" not in flags
self._closed = False
self._filepath = str(file)
self._flags = flags
# We open the file here in order to:
# * trigger exceptions here instead of in the Legate tasks, which
# forces the Python interpreter to exit.
# * create or truncate files opened in "w" mode, which is required
# because `TaskOpCode.WRITE` always opens the file in "r+" mode.
with open(self._filepath, mode=flags):
pass
def close(self) -> None:
"""Deregister the file and close the file"""
self._closed = True
@property
def closed(self) -> bool:
return self._closed
def fileno(self) -> int:
raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor")
def open_flags(self) -> int:
"""Get the flags of the file descriptor (see open(2))"""
raise RuntimeError("Legate-KvikIO doesn't expose any file descriptor")
def __enter__(self) -> CuFile:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def read(self, buf: Any) -> None:
"""Reads specified buffer from the file into device or host memory
Warning, the size of `buf` must be greater than the size of the file.
Parameters
----------
buf: legate-store-like (1-dimensional)
A Legate store or any object implementing `__legate_data_interface__` to
read into.
"""
assert not self._closed
if "r" not in self._flags and "+" not in self._flags:
raise ValueError(f"Cannot read a file opened with flags={self._flags}")
output = get_legate_store(buf)
task = context.create_auto_task(TaskOpCode.READ)
task.add_scalar_arg(self._filepath, types.string)
task.add_output(output)
task.set_side_effect(True)
task.execute()
def write(self, buf: Any) -> None:
"""Writes specified buffer from device or host memory to the file
Hint, if a subsequent operation read this file, insert a fence in between
such as `legate.core.get_legate_runtime().issue_execution_fence(block=False)`
Parameters
----------
buf: legate-store-like (1-dimensional)
A Legate store or any object implementing `__legate_data_interface__` to
write into buffer.
"""
assert not self._closed
if "w" not in self._flags and "+" not in self._flags:
raise ValueError(f"Cannot write to a file opened with flags={self._flags}")
input = get_legate_store(buf)
task = context.create_auto_task(TaskOpCode.WRITE)
task.add_scalar_arg(self._filepath, types.string)
task.add_input(input)
task.set_side_effect(True)
task.execute()
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/library_description.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import os
from enum import IntEnum
from typing import Any
from legate.core import Library, get_legate_runtime
from legate_kvikio.install_info import header, libpath
class LibraryDescription(Library):
def __init__(self) -> None:
self.shared_object: Any = None
@property
def cffi(self) -> Any:
return self.shared_object
def get_name(self) -> str:
return "legate_kvikio"
def get_shared_library(self) -> str:
return os.path.join(libpath, f"liblegate_kvikio{self.get_library_extension()}")
def get_c_header(self) -> str:
return header
def get_registration_callback(self) -> str:
return "legate_kvikio_perform_registration"
def initialize(self, shared_object: Any) -> None:
self.shared_object = shared_object
def destroy(self) -> None:
pass
description = LibraryDescription()
context = get_legate_runtime().register_library(description)
class TaskOpCode(IntEnum):
WRITE = description.cffi.OP_WRITE
READ = description.cffi.OP_READ
TILE_WRITE = description.cffi.OP_TILE_WRITE
TILE_READ = description.cffi.OP_TILE_READ
TILE_READ_BY_OFFSETS = description.cffi.OP_TILE_READ_BY_OFFSETS
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/zarr.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
import math
import pathlib
from typing import Optional, Tuple
import cunumeric
import zarr.core
from .tile import read_tiles, write_tiles
def get_padded_array(zarr_ary: zarr.Array) -> Optional[cunumeric.ndarray]:
"""Get a padded array that has an shape divisible by `zarr_ary.chunks`.
Parameters
----------
zarr_ary
The Zarr array
Return
------
The padded array or None if the shapes are already divisible
"""
if all(s % c == 0 for s, c in zip(zarr_ary.shape, zarr_ary.chunks)):
return None # Already aligned
padded_shape = tuple(
math.ceil(s / c) * c for s, c in zip(zarr_ary.shape, zarr_ary.chunks)
)
return cunumeric.empty(shape=padded_shape, dtype=zarr_ary.dtype)
def write_array(
ary: cunumeric.ndarray,
dirpath: pathlib.Path | str,
chunks: Optional[int | Tuple[int]],
compressor=None,
) -> None:
"""Write an Zarr array to disk using KvikIO
Notes
-----
The array is padded to make its shape divisible by chunks (if not already).
This involves coping the whole array, which can be expensive both in terms of
performance and memory usage.
TODO: minimize the copy needed
Parameters
----------
ary
The cuNumeric array to write.
dirpath
Root directory of the tile files.
tile_shape
The shape of each tile.
tile_start
The start coordinate of the tiles
"""
dirpath = pathlib.Path(dirpath)
if compressor is not None:
raise NotImplementedError("compressor isn't supported")
# We use Zarr to write the meta data
zarr_ary = zarr.open_array(
dirpath,
shape=ary.shape,
dtype=ary.dtype,
mode="w",
chunks=chunks,
compressor=compressor,
)
padded_ary = get_padded_array(zarr_ary)
if padded_ary is None:
write_tiles(ary, dirpath=dirpath, tile_shape=zarr_ary.chunks)
else:
padded_ary[tuple(slice(s) for s in zarr_ary.shape)] = ary
write_tiles(padded_ary, dirpath=dirpath, tile_shape=zarr_ary.chunks)
def read_array(dirpath: pathlib.Path | str) -> cunumeric.ndarray:
"""Read an Zarr array from disk using KvikIO
Notes
-----
The returned array might be a view of a underlying array that has been padded in
order to make its shape divisible by the shape of the Zarr chunks on disk.
Parameters
----------
dirpath
Root directory of the tile files.
Return
------
The cuNumeric array read from disk.
"""
dirpath = pathlib.Path(dirpath)
# We use Zarr to read the meta data
zarr_ary = zarr.open_array(dirpath, mode="r")
if zarr_ary.compressor is not None:
raise NotImplementedError("compressor isn't supported")
padded_ary = get_padded_array(zarr_ary)
if padded_ary is None:
ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype)
read_tiles(ret, dirpath=dirpath, tile_shape=zarr_ary.chunks)
else:
read_tiles(padded_ary, dirpath=dirpath, tile_shape=zarr_ary.chunks)
ret = padded_ary[tuple(slice(s) for s in zarr_ary.shape)]
return ret
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/tile.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
import pathlib
from typing import Iterable, Optional, Tuple
import cunumeric
import legate.core.types as types
from legate.core import Rect
from .library_description import TaskOpCode, context
from .utils import get_legate_store
def _tile_read_write(
op: TaskOpCode,
ary: cunumeric.ndarray,
dirpath: pathlib.Path | str,
tile_shape: Tuple[int],
tile_start: Optional[Tuple[int]],
) -> None:
"""Implementation of `write_tiles` and `read_tiles`"""
dirpath = pathlib.Path(dirpath)
if tile_start is None:
tile_start = (0,) * len(tile_shape)
if len(ary.shape) != len(tile_shape):
raise ValueError("Tile shape and array shape must have same number of axes")
if any(d % c != 0 for d, c in zip(ary.shape, tile_shape)):
raise ValueError(
f"The tile shape {tile_shape} must be "
f"divisible with the array shape {ary.shape}"
)
# Partition the array into even tiles
store_partition = get_legate_store(ary).partition_by_tiling(tile_shape)
# Use the partition's color shape as the launch shape so there will be
# one task for each tile
launch_shape = store_partition.partition.color_shape
task = context.create_manual_task(
op,
launch_domain=Rect(launch_shape),
)
if op == TaskOpCode.TILE_WRITE:
task.add_input(store_partition)
elif op == TaskOpCode.TILE_READ:
task.add_output(store_partition)
else:
raise ValueError(f"Unknown op: {op}")
task.add_scalar_arg(str(dirpath), types.string)
task.add_scalar_arg(tile_shape, (types.uint64,))
task.add_scalar_arg(tile_start, (types.uint64,))
task.execute()
def write_tiles(
ary: cunumeric.ndarray,
dirpath: pathlib.Path | str,
tile_shape: Tuple[int],
tile_start: Optional[Tuple[int]] = None,
) -> None:
"""Write an array as multiple tiles to disk using KvikIO
The array shape must be divisible with the tile shape.
Parameters
----------
ary
The cuNumeric array to write.
dirpath
Root directory of the tile files.
tile_shape
The shape of each tile.
tile_start
The start coordinate of the tiles
"""
_tile_read_write(TaskOpCode.TILE_WRITE, ary, dirpath, tile_shape, tile_start)
def read_tiles(
ary: cunumeric.ndarray,
dirpath: pathlib.Path | str,
tile_shape: Tuple[int],
tile_start: Optional[Tuple[int]] = None,
) -> None:
"""Read multiple tiles from disk into an array using KvikIO
The array shape must be divisible with the tile shape.
Parameters
----------
ary
The cuNumeric array to read into.
dirpath
Root directory of the tile files.
tile_shape
The shape of each tile.
tile_start
The start coordinate of the tiles
"""
_tile_read_write(TaskOpCode.TILE_READ, ary, dirpath, tile_shape, tile_start)
def read_tiles_by_offsets(
ary: cunumeric.ndarray,
filepath: Iterable[pathlib.Path | str],
offsets: Tuple[int],
tile_shape: Tuple[int],
) -> None:
"""Read multiple tiles from a single file into an array using KvikIO
The array shape must be divisible with the tile shape.
# TODO: support a filepath per offset/size
Parameters
----------
ary
The cuNumeric array to read into.
filepath
Filepath to the file.
offsets
The offset of each tile in the file (in bytes).
tile_shape
The shape of each tile.
"""
if len(ary.shape) != len(tile_shape):
raise ValueError("Tile shape and array shape must have same number of axes")
if any(d % c != 0 for d, c in zip(ary.shape, tile_shape)):
raise ValueError(
f"The tile shape {tile_shape} must be "
f"divisible with the array shape {ary.shape}"
)
# Partition the array into even tiles
store_partition = get_legate_store(ary).partition_by_tiling(tile_shape)
# Use the partition's color shape as the launch shape so there will be
# one task for each tile
launch_shape = Rect(store_partition.partition.color_shape)
launch_vol = launch_shape.get_volume()
if launch_vol != len(offsets):
raise ValueError(
f"Number of offsets ({len(offsets)}) must match the number "
f"of tiles of `ary` ({launch_vol})"
)
task = context.create_manual_task(
TaskOpCode.TILE_READ_BY_OFFSETS,
launch_domain=launch_shape,
)
task.add_output(store_partition)
task.add_scalar_arg(str(filepath), types.string)
task.add_scalar_arg(offsets, (types.uint64,))
task.add_scalar_arg(tile_shape, (types.uint64,))
task.execute()
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from .cufile import CuFile # noqa: F401
__version__ = "23.12.00"
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/kerchunk.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
import itertools
import math
import pathlib
import cunumeric
import fsspec
import zarr.core
import zarr.hierarchy
from kerchunk.hdf import SingleHdf5ToZarr
from .tile import read_tiles_by_offsets
from .zarr import get_padded_array
def hdf5_read(filepath: pathlib.Path | str, dataset_name: str) -> cunumeric.ndarray:
"""Read an HDF5 array from disk using Kerchunk and KvikIO
We use Kerchunk's `SingleHdf5ToZarr` to find the data chunks embedded
in the hdf5 file. If it fails for any reason, this function fails as well.
Notes
-----
The returned array might be a view of a underlying array that has been padded in
order to make its shape divisible by the shape of the Zarr chunks on disk.
Parameters
----------
filepath
File path to the hdf5 file.
Return
------
The cuNumeric array read from disk.
"""
filepath = pathlib.Path(filepath)
# TODO: look for already generated kerchunk annotations
annotations = SingleHdf5ToZarr(filepath, inline_threshold=0).translate()
# Load annotations
zarr_group = zarr.open(fsspec.get_mapper("reference://", fo=annotations))
zarr_ary: zarr.Array = zarr_group[dataset_name]
if zarr_ary.compressor is not None:
raise NotImplementedError("compressor isn't supported")
# Extract offset and bytes for each chunk
refs = annotations["refs"]
offsets = []
tile_nbytes = math.prod(zarr_ary.chunks) * zarr_ary.itemsize
for chunk_coord in itertools.product(
*(range(math.ceil(s / c)) for s, c in zip(zarr_ary.shape, zarr_ary.chunks))
):
key = zarr_ary._chunk_key(chunk_coord)
_, offset, nbytes = refs[key]
offsets.append(offset)
assert tile_nbytes == nbytes
padded_ary = get_padded_array(zarr_ary)
if padded_ary is None:
ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype)
read_tiles_by_offsets(
ret,
filepath=filepath,
offsets=tuple(offsets),
tile_shape=zarr_ary.chunks,
)
else:
read_tiles_by_offsets(
padded_ary,
filepath=filepath,
offsets=tuple(offsets),
tile_shape=zarr_ary.chunks,
)
ret = padded_ary[tuple(slice(s) for s in zarr_ary.shape)]
return ret
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/legate_kvikio/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from typing import Any
from legate.core import Store
def get_legate_store(buf: Any) -> Store:
"""Extracts a Legate store from object
Supports any object that implements the Legate data interface
(`__legate_data_interface__`).
Parameters
----------
buf: legate-store-like
Object implement the Legate store interface
Returns
-------
Store
The extracted Legate store
"""
if isinstance(buf, Store):
return buf
data = buf.__legate_data_interface__["data"]
field = next(iter(data))
array = data[field]
_, store = array.stores()
return store
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/examples/zarr_io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import cunumeric as num
import legate.core
import legate_kvikio.zarr
def zarr_io(dirname):
a = num.arange(10000).reshape(100, 100)
# Write array to a Zarr file by chunks of 10x10.
legate_kvikio.zarr.write_array(a, dirname, chunks=(10, 10))
# Block until done writing.
legate.core.get_legate_runtime().issue_execution_fence(block=True)
# Read array from a Zarr file.
b = legate_kvikio.zarr.read_array(dirname)
# They should be equal
assert (a == b).all()
if __name__ == "__main__":
zarr_io("/tmp/legate-kvikio-zarr-io")
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/examples/basic_io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import cunumeric as num
import legate_kvikio as kvikio
from legate.core import get_legate_runtime
def main(path):
a = num.arange(1_000_000)
f = kvikio.CuFile(path, "w+")
f.write(a)
# In order to make sure the file has been written before the following
# read execute, we insert a fence between the write and read.
# Notice, this call isn't blocking.
get_legate_runtime().issue_execution_fence(block=False)
b = num.empty_like(a)
f.read(b)
f.close()
# In order to make sure the file has been written before re-opening
# it for reading, we block the execution.
get_legate_runtime().issue_execution_fence(block=True)
c = num.empty_like(a)
with kvikio.CuFile(path, "r") as f:
f.read(c)
# They should all be identical
assert all(a == b)
assert all(a == c)
print("sum: ", c.sum())
if __name__ == "__main__":
main("/tmp/legate-kvikio-hello-world-file")
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/examples/hdf5_io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import h5py
import numpy as np
import legate_kvikio.kerchunk
def hdf5_io(filename):
a = np.arange(10000).reshape((100, 100))
# Write array using h5py
with h5py.File(filename, "w") as f:
f.create_dataset("mydataset", chunks=(10, 10), data=a)
# Read hdf5 file using legate+kerchunk
b = legate_kvikio.kerchunk.hdf5_read(filename, dataset_name="mydataset")
# They should be equal
assert (a == b).all()
if __name__ == "__main__":
hdf5_io("/tmp/legate-kvikio-io.hdf5")
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/notebooks/nvcomp_batch_codec.ipynb
|
import json
import numcodecs
import numpy as np
import zarr
from IPython.display import display
np.set_printoptions(precision=4, suppress=True)NVCOMP_CODEC_ID = "nvcomp_batch"
# Currently supported algorithms.
LZ4_ALGO = "LZ4"
GDEFLATE_ALGO = "Gdeflate"
SNAPPY_ALGO = "snappy"
ZSTD_ALGO = "zstd"
codec = numcodecs.registry.get_codec(dict(id=NVCOMP_CODEC_ID, algorithm=LZ4_ALGO))
# To pass algorithm-specific options, use options parameter:
# codec = numcodecs.registry.get_codec(dict(id=NVCOMP_CODEC_ID, algo=LZ4_ALGO, options={"data_type": 1}))
display(codec)shape = (100, 100)
chunks = (10, 10)
np.random.seed(1)
x = zarr.array(np.random.randn(*shape).astype(np.float32), chunks=chunks, compressor=codec)
display(x[:])
display(x.info)# Use simple dictionary store, see zarr documentation for other options.
zarr_store = {}
zarr.save_array(zarr_store, x, compressor=codec)
# Check stored metadata.
meta_info = json.loads(zarr_store[".zarray"])
display(meta_info)y = zarr.open_array(zarr_store)
display(y.info)# Test the roundtrip.
np.testing.assert_equal(y[:], x[:])# Get default (CPU) implementation of LZ4 codec.
cpu_codec = numcodecs.registry.get_codec({"id": "lz4"})
x = zarr.array(np.random.randn(*shape).astype(np.float32), chunks=chunks, compressor=cpu_codec)
# Define a simple, dictionary-based store. In real scenarios this can be a filesystem or some other persistent store.
store = {}
zarr.save_array(store, x, compressor=cpu_codec)
# Check that the data was written by the expected codec.
meta = json.loads(store[".zarray"])
display(meta)
assert meta["compressor"]["id"] == "lz4"
# Change codec to GPU/nvCOMP-based.
meta["compressor"] = {"id": NVCOMP_CODEC_ID, "algorithm": LZ4_ALGO}
store[".zarray"] = json.dumps(meta).encode()
y = zarr.open_array(store, compressor=codec)
display(x.info)
display(y.info)
np.testing.assert_equal(x[:], y[:])
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/notebooks/nvcomp_vs_zarr_lz4.ipynb
|
import cupy as cp
import numpy as np
import pandas as pd
import time
import zarr
import kvikio.nvcomp
# conda install -c conda-forge zarrHOST_LZ4_MAX = 2013929216 # 2113929216
sizes = list(map(lambda x: HOST_LZ4_MAX//(2**x), np.arange(20)))
print(sizes)input_size = []
cascaded_size = []
cascaded_temp_size = []
cascaded_round_trip_time = []
lz4_gpu_size = []
lz4_gpu_temp_size = []
lz4_gpu_round_trip_time = []
bitcomp_gpu_size = []
bitcomp_gpu_temp_size = []
bitcomp_gpu_round_trip_time = []
lz4_size = []
lz4_round_trip_time = []text = open('kjv10.txt').read()
bib = np.frombuffer(bytes(text, 'utf-8'), dtype=np.int8)
data_buffer = np.tile(bib, 500)# One of the three below keys, this will set the arrangement of test data for a full run of the notebook.
TARGET = "Ascending"
DTYPE = cp.int32data = {
"Ascending": np.arange(0, HOST_LZ4_MAX, dtype=np.int32),
"Random": np.random.randint(0, 100, HOST_LZ4_MAX, dtype=np.int32),
"Text": data_buffer
}def get_host_data(offset, dtype):
exemplar = np.array([1], dtype=dtype)
print(offset)
print(exemplar.itemsize)
print(data[TARGET].itemsize)
index = offset // data[TARGET].itemsize
index = index - (index % exemplar.itemsize)
print(index)
return data[TARGET][0:index].view(dtype)input_size = []
cascaded_size = []
cascaded_temp_size = []
cascaded_round_trip_time = []
lz4_gpu_size = []
lz4_gpu_temp_size = []
lz4_gpu_round_trip_time = []
lz4_size = []
lz4_round_trip_time = []
for size in sizes:
data_host = get_host_data(size, DTYPE)
data_gpu = cp.array(data_host)
"""Cascaded GPU"""
t_gpu = time.time()
compressor = kvikio.nvcomp.CascadedManager(dtype=data_gpu.dtype)
compressed = compressor.compress(data_gpu)
output_size = compressed.nbytes
decompressed = compressor.decompress(compressed)
decompressed_size = decompressed.size * decompressed.itemsize
input_size.append(data_gpu.size * data_gpu.itemsize)
cascaded_round_trip_time.append(time.time() - t_gpu)
cascaded_size.append(output_size)
print('-----')
print('Input size: ', data_gpu.size * data_gpu.itemsize)
print('Cascaded GPU compressor output size: ', output_size)
print('Cascaded GPU decompressor output size: ', decompressed_size)
print('Cascaded GPU compress/decompress round trip time: ',time.time() - t_gpu)
del compressor
"""LZ4 Host"""
lz4 = zarr.LZ4()
t_host = time.time()
host_compressed = lz4.encode(data_gpu.get())
del data_gpu
print(len(host_compressed))
host_compressed = host_compressed[:2113929216]
host_decompressed = lz4.decode(host_compressed)
print('Lz4 zarr time: ', time.time() - t_host)
print('Lz4 compressed size: ', len(host_compressed))
lz4_size.append(len(host_compressed))
lz4_round_trip_time.append(time.time() - t_host)lz4_gpu_size = []
lz4_gpu_temp_size = []
lz4_gpu_round_trip_time = []
for size in sizes:
data_host = get_host_data(size, DTYPE)
data_gpu = cp.array(data_host)
"""LZ4 GPU"""
data_gpu = cp.array(data_host)
t_gpu = time.time()
compressor = kvikio.nvcomp.LZ4Manager(dtype=data_gpu.dtype)
compressed = compressor.compress(data_gpu)
output_size = compressed.nbytes
decompressed = compressor.decompress(compressed)
decompressed_size = decompressed.size * decompressed.itemsize
lz4_gpu_round_trip_time.append(time.time() - t_gpu)
lz4_gpu_size.append(output_size)
print('lz4 GPU compressor output size: ', output_size)
print('lz4 GPU decompressor output size: ', decompressed_size)
print('lz4 GPU compress/decompress round trip time: ',time.time() - t_gpu)# zarr lz4 max buffer size is 264241152 int64s
# zarr lz4 max buffer size is 2113929216 bytes
# cascaded max buffer size is 2147483640 bytes
# cascaded max buffer size is 268435456 int64sprint(input_size)
print(cascaded_size)
print(cascaded_temp_size)
print(cascaded_round_trip_time)
print(lz4_gpu_size)
print(lz4_gpu_temp_size)
print(lz4_gpu_round_trip_time)
print(lz4_size)
print(lz4_round_trip_time)
df = pd.DataFrame({
'Input Size (Bytes)': input_size,
'cascaded_size': cascaded_size,
'cascaded_round_trip_time': cascaded_round_trip_time,
'lz4_gpu_size': lz4_gpu_size,
'lz4_gpu_round_trip_time': lz4_gpu_round_trip_time,
'lz4_size': lz4_size,
'lz4_round_trip_time': lz4_round_trip_time
})### You'll need the following to display the upcoming plots. ###
# !conda install -c conda-forge plotly
# !npm install requiredf['Cascaded Compression Ratio'] = df['Input Size (Bytes)'] / df['cascaded_size']
df['Lz4 Gpu Compression Ratio'] = df['Input Size (Bytes)'] / df['lz4_gpu_size']
df['Lz4 Host Compression Ratio'] = df['Input Size (Bytes)'] / df['lz4_size']
df['Cascaded Speedup'] = df['lz4_round_trip_time'] / df['cascaded_round_trip_time']
df['Lz4 Gpu Speedup'] = df['lz4_round_trip_time'] / df['lz4_gpu_round_trip_time']
print(df.columns)import plotly.express as px
title = 'Gpu Acceleration over Zarr Lz4 - ' + TARGET + " " + str(DTYPE)
subtitle = 'Includes host->gpu copy time'
fig = px.line(df, x='Input Size (Bytes)',
y=['Cascaded Speedup', 'Lz4 Gpu Speedup'],
labels={'value': 'Multiple Faster'},
title=title)
fig.update_xaxes(type='category')
fig.show()import plotly.express as px
title = 'Compression - ' + TARGET + " " + str(DTYPE)
fig = px.line(df, x='Input Size (Bytes)',
y=[
'Lz4 Gpu Compression Ratio',
'Cascaded Compression Ratio',
'Lz4 Host Compression Ratio'
],
labels={'value': 'Compression Factor'},
title=title)
fig.update_xaxes(type='category')
fig.show()
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/notebooks/zarr.ipynb
|
"""
In this tutorial, we will show how to use KvikIO to read and write GPU memory directly to/from Zarr files.
"""
import json
import shutil
import numpy
import cupy
import zarr
import kvikio
import kvikio.zarr
from kvikio.nvcomp_codec import NvCompBatchCodec
from numcodecs import LZ4# Let's create a new Zarr array using KvikIO's GDS store and LZ4 compression
z = zarr.array(
cupy.arange(10),
chunks=2,
store=kvikio.zarr.GDSStore("my-zarr-file.zarr"),
meta_array=cupy.empty(()),
compressor=NvCompBatchCodec("lz4"),
overwrite=True,
)
z, z.compressor, z.store# And because we set the `meta_array` argument, reading the Zarr array returns a CuPy array
type(z[:])z[1:9]z[:] + 42z = zarr.open_array(kvikio.zarr.GDSStore("my-zarr-file.zarr"))
type(z[:]), type(z.compressor), z[:]z = zarr.open_array("my-zarr-file.zarr")
type(z[:]), type(z.compressor), z[:]# Read the Zarr metadata and replace the compressor with a CPU implementation of LZ4
store = zarr.DirectoryStore("my-zarr-file.zarr") # We could also have used kvikio.zarr.GDSStore
meta = json.loads(store[".zarray"])
meta["compressor"] = LZ4().get_config()
store[".zarray"] = json.dumps(meta).encode() # NB: this changes the Zarr metadata on disk
# And then open the file as usually
z = zarr.open_array(store)
type(z[:]), type(z.compressor), z[:]import numcodecs
# Let's create a new Zarr array using the default compression.
z = zarr.array(
numpy.arange(10),
chunks=2,
store="my-zarr-file.zarr",
overwrite=True,
# The default (CPU) implementation of LZ4 codec.
compressor=numcodecs.registry.get_codec({"id": "lz4"})
)
z, z.compressor, z.store# Read the Zarr metadata and replace the compressor with a GPU implementation of LZ4
store = kvikio.zarr.GDSStore("my-zarr-file.zarr") # We could also have used zarr.DirectoryStore
meta = json.loads(store[".zarray"])
meta["compressor"] = NvCompBatchCodec("lz4").get_config()
store[".zarray"] = json.dumps(meta).encode() # NB: this changes the Zarr metadata on disk
# And then open the file as usually
z = zarr.open_array(store, meta_array=cupy.empty(()))
type(z[:]), type(z.compressor), z[:]# Clean up
shutil.rmtree("my-zarr-file.zarr", ignore_errors=True)
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/.devcontainer/README.md
|
# KvikIO Development Containers
This directory contains [devcontainer configurations](https://containers.dev/implementors/json_reference/) for using VSCode to [develop in a container](https://code.visualstudio.com/docs/devcontainers/containers) via the `Remote Containers` [extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) or [GitHub Codespaces](https://github.com/codespaces).
This container is a turnkey development environment for building and testing the KvikIO C++ and Python libraries.
## Table of Contents
* [Prerequisites](#prerequisites)
* [Host bind mounts](#host-bind-mounts)
* [Launch a Dev Container](#launch-a-dev-container)
## Prerequisites
* [VSCode](https://code.visualstudio.com/download)
* [VSCode Remote Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
## Host bind mounts
By default, the following directories are bind-mounted into the devcontainer:
* `${repo}:/home/coder/kvikio`
* `${repo}/../.aws:/home/coder/.aws`
* `${repo}/../.local:/home/coder/.local`
* `${repo}/../.cache:/home/coder/.cache`
* `${repo}/../.conda:/home/coder/.conda`
* `${repo}/../.config:/home/coder/.config`
This ensures caches, configurations, dependencies, and your commits are persisted on the host across container runs.
## Launch a Dev Container
To launch a devcontainer from VSCode, open the KvikIO repo and select the "Reopen in Container" button in the bottom right:<br/><img src="https://user-images.githubusercontent.com/178183/221771999-97ab29d5-e718-4e5f-b32f-2cdd51bba25c.png"/>
Alternatively, open the VSCode command palette (typically `cmd/ctrl + shift + P`) and run the "Rebuild and Reopen in Container" command.
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/.devcontainer/Dockerfile
|
# syntax=docker/dockerfile:1.5
ARG BASE
ARG PYTHON_PACKAGE_MANAGER=conda
FROM ${BASE} as pip-base
ENV DEFAULT_VIRTUAL_ENV=rapids
FROM ${BASE} as conda-base
ENV DEFAULT_CONDA_ENV=rapids
FROM ${PYTHON_PACKAGE_MANAGER}-base
ARG CUDA
ENV CUDAARCHS="RAPIDS"
ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}"
ARG PYTHON_PACKAGE_MANAGER
ENV PYTHON_PACKAGE_MANAGER="${PYTHON_PACKAGE_MANAGER}"
ENV PYTHONSAFEPATH="1"
ENV PYTHONUNBUFFERED="1"
ENV PYTHONDONTWRITEBYTECODE="1"
ENV SCCACHE_REGION="us-east-2"
ENV SCCACHE_BUCKET="rapids-sccache-devs"
ENV VAULT_HOST="https://vault.ops.k8s.rapids.ai"
ENV HISTFILE="/home/coder/.cache/._bash_history"
| 0 |
rapidsai_public_repos/kvikio/.devcontainer
|
rapidsai_public_repos/kvikio/.devcontainer/cuda11.8-pip/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:23.12-cpp-llvm16-cuda11.8-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:23.12": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/kvikio,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/kvikio/.devcontainer
|
rapidsai_public_repos/kvikio/.devcontainer/cuda12.0-pip/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.0",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:23.12-cpp-llvm16-cuda12.0-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:23.12": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/kvikio,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/kvikio/.devcontainer
|
rapidsai_public_repos/kvikio/.devcontainer/cuda12.0-conda/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.0",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:23.12-cpp-mambaforge-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:23.12": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.0-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/kvikio,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.0-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/kvikio/.devcontainer
|
rapidsai_public_repos/kvikio/.devcontainer/cuda11.8-conda/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:23.12-cpp-llvm16-cuda11.8-mambaforge-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:23.12": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda11.8-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/kvikio,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda11.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/docs/make.bat
|
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/docs/Makefile
|
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?= "-W"
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/runtime_settings.rst
|
Runtime Settings
================
Compatibility Mode ``KVIKIO_COMPAT_MODE``
-----------------------------------------
When KvikIO is running in compatibility mode, it doesn't load ``libcufile.so``. Instead, reads and writes are done using POSIX. Notice, this is not the same as the compatibility mode in cuFile. That is cuFile can run in compatibility mode while KvikIO is not.
Set the environment variable ``KVIKIO_COMPAT_MODE`` to enable/disable compatibility mode. By default, compatibility mode is enabled:
* when ``libcufile.so`` cannot be found.
* when running in Windows Subsystem for Linux (WSL).
* when ``/run/udev`` isn't readable, which typically happens when running inside a docker image not launched with ``--volume /run/udev:/run/udev:ro``.
Thread Pool ``KVIKIO_NTHREADS``
-------------------------------
KvikIO can use multiple threads for IO automatically. Set the environment variable ``KVIKIO_NTHREADS`` to the number of threads in the thread pool. If not set, the default value is 1.
Task Size ``KVIKIO_TASK_SIZE``
------------------------------
KvikIO splits parallel IO operations into multiple tasks. Set the environment variable ``KVIKIO_TASK_SIZE`` to the maximum task size (in bytes). If not set, the default value is 4194304 (4 MiB).
GDS Threshold ``KVIKIO_GDS_THRESHOLD``
--------------------------------------
In order to improve performance of small IO, ``.pread()`` and ``.pwrite()`` implement a shortcut that circumvent the threadpool and use the POSIX backend directly. Set the environment variable ``KVIKIO_GDS_THRESHOLD`` to the minimum size (in bytes) to use GDS. If not set, the default value is 1048576 (1 MiB).
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/quickstart.rst
|
Quickstart
==========
KvikIO can be used in place of Python's built-in `open() <https://docs.python.org/3/library/functions.html#open>`_ function with the caveat that a file is always opened in binary (``"b"``) mode.
In order to open a file, use KvikIO's filehandle :py:meth:`kvikio.cufile.CuFile`.
.. code-block:: python
import cupy
import kvikio
a = cupy.arange(100)
f = kvikio.CuFile("test-file", "w")
# Write whole array to file
f.write(a)
f.close()
b = cupy.empty_like(a)
f = kvikio.CuFile("test-file", "r")
# Read whole array from file
f.read(b)
assert all(a == b)
# Use contexmanager
c = cupy.empty_like(a)
with kvikio.CuFile(path, "r") as f:
f.read(c)
assert all(a == c)
# Non-blocking read
d = cupy.empty_like(a)
with kvikio.CuFile(path, "r") as f:
future1 = f.pread(d[:50])
future2 = f.pread(d[50:], file_offset=d[:50].nbytes)
future1.get() # Wait for first read
future2.get() # Wait for second read
assert all(a == d)
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/install.rst
|
Installation
============
KvikIO can be installed using Conda/Mamba or from source.
Conda/Mamba
-----------
We strongly recommend using `mamba <https://github.com/mamba-org/mamba>`_ inplace of conda, which we will do throughout the documentation.
Install the **stable release** from the ``rapidsai`` channel like:
.. code-block::
# Install in existing environment
mamba install -c rapidsai -c conda-forge kvikio
# Create new environment (CUDA 11.8)
mamba create -n kvikio-env -c rapidsai -c conda-forge python=3.10 cuda-version=11.8 kvikio
# Create new environment (CUDA 12.0)
mamba create -n kvikio-env -c rapidsai -c conda-forge python=3.10 cuda-version=12.0 kvikio
Install the **nightly release** from the ``rapidsai-nightly`` channel like:
.. code-block::
# Install in existing environment
mamba install -c rapidsai-nightly -c conda-forge kvikio
# Create new environment (CUDA 11.8)
mamba create -n kvikio-env -c rapidsai-nightly -c conda-forge python=3.10 cuda-version=11.8 kvikio
# Create new environment (CUDA 12.0)
mamba create -n kvikio-env -c rapidsai-nightly -c conda-forge python=3.10 cuda-version=12.0 kvikio
.. note::
If the nightly install doesn't work, set ``channel_priority: flexible`` in your ``.condarc``.
Build from source
-----------------
In order to setup a development environment run:
.. code-block::
# CUDA 11.8
mamba env create --name kvikio-dev --file conda/environments/all_cuda-118_arch-x86_64.yaml
# CUDA 12.0
mamba env create --name kvikio-dev --file conda/environments/all_cuda-120_arch-x86_64.yaml
To build and install the extension run:
.. code-block::
./build.sh kvikio
One might have to define ``CUDA_HOME`` to the path to the CUDA installation.
In order to test the installation, run the following:
.. code-block::
pytest tests/
And to test performance, run the following:
.. code-block::
python benchmarks/single-node-io.py
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/zarr.rst
|
Zarr
====
`Zarr <https://github.com/zarr-developers/zarr-specs>`_ is a binary file format for chunked, compressed, N-Dimensional array. It is used throughout the PyData ecosystem and especially for climate and biological science applications.
`Zarr-Python <https://zarr.readthedocs.io/en/stable/>`_ is the official Python package for reading and writing Zarr arrays. Its main feature is a NumPy-like array that translates array operations into file IO seamlessly.
KvikIO provides a GPU backend to Zarr-Python that enables `GPUDirect Storage (GDS) <https://developer.nvidia.com/blog/gpudirect-storage/>`_ seamlessly.
The following is an example of how to use the convenience function :py:meth:`kvikio.zarr.open_cupy_array`
to create a new Zarr array and how open an existing Zarr array.
.. literalinclude:: ../../python/examples/zarr_cupy_nvcomp.py
:language: python
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/conf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "kvikio"
copyright = "2023, NVIDIA"
author = "NVIDIA"
# The short X.Y version.
version = '23.12'
# The full version, including alpha/beta/rc tags
release = '23.12.00'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.extlinks",
"numpydoc",
"sphinx_click",
"sphinx_rtd_theme",
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "kvikiodoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "kvikio.tex", "kvikio Documentation", "NVIDIA", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "kvikio", "kvikio Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"kvikio",
"kvikio Documentation",
author,
"kvikio",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_js_file(
"https://docs.rapids.ai/assets/js/custom.js", loading_method="defer"
)
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/api.rst
|
API
===
CuFile
------
.. currentmodule:: kvikio.cufile
.. autoclass:: CuFile
:members:
.. autoclass:: IOFuture
:members:
Zarr
----
.. currentmodule:: kvikio.zarr
.. autoclass:: GDSStore
:members:
Defaults
--------
.. currentmodule:: kvikio.defaults
.. autofunction:: compat_mode
.. autofunction:: compat_mode_reset
.. autofunction:: get_num_threads
.. autofunction:: num_threads_reset
| 0 |
rapidsai_public_repos/kvikio/docs
|
rapidsai_public_repos/kvikio/docs/source/index.rst
|
Welcome to KvikIO's Python documentation!
=========================================
KvikIO is a Python and C++ library for high performance file IO. It provides C++ and Python
bindings to `cuFile <https://docs.nvidia.com/gpudirect-storage/api-reference-guide/index.html>`_,
which enables `GPUDirect Storage <https://developer.nvidia.com/blog/gpudirect-storage/>`_ (GDS).
KvikIO also works efficiently when GDS isn't available and can read/write both host and device data seamlessly.
KvikIO is a part of the `RAPIDS <https://rapids.ai/>`_ suite of open-source software libraries for GPU-accelerated data science.
.. note::
This is the documentation for the Python library. For the C++ documentation, see under `libkvikio <https://docs.rapids.ai/api/libkvikio/nightly/>`_.
Contents
--------
.. toctree::
:maxdepth: 1
:caption: Getting Started
install
quickstart
zarr
runtime_settings
api
genindex
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/test_python.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate Python testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test_python \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}
RAPIDS_COVERAGE_DIR=${RAPIDS_COVERAGE_DIR:-"${PWD}/coverage-results"}
mkdir -p "${RAPIDS_TESTS_DIR}" "${RAPIDS_COVERAGE_DIR}"
rapids-print-env
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
libkvikio kvikio
rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
rapids-logger "pytest kvikio"
pushd python/
pytest \
--cache-clear \
--junitxml="${RAPIDS_TESTS_DIR}/junit-kvikio.xml" \
--verbose \
--cov-config=.coveragerc \
--cov=kvikio \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/kvikio-coverage.xml" \
--cov-report=term \
tests
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/test_cpp.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate C++ testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test_cpp \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch)" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}/
mkdir -p "${RAPIDS_TESTS_DIR}"
SUITEERROR=0
rapids-print-env
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
libkvikio libkvikio-tests
rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
# Run BASIC_IO_TEST
"$CONDA_PREFIX"/bin/tests/libkvikio/BASIC_IO_TEST
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/build_python.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
rapids-logger "Begin py build"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
rapids-conda-retry mambabuild \
--channel "${CPP_CHANNEL}" \
conda/recipes/kvikio
rapids-upload-conda-to-s3 python
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/check_style.sh
|
#!/bin/bash
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create checks conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key checks \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n checks
conda activate checks
FORMAT_FILE_URL=https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.04/cmake-format-rapids-cmake.json
export RAPIDS_CMAKE_FORMAT_FILE=/tmp/rapids_cmake_ci/cmake-formats-rapids-cmake.json
mkdir -p $(dirname ${RAPIDS_CMAKE_FORMAT_FILE})
wget -O ${RAPIDS_CMAKE_FORMAT_FILE} ${FORMAT_FILE_URL}
# Run pre-commit checks
pre-commit run --hook-stage manual --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/build_cpp.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
rapids-logger "Begin cpp build"
rapids-conda-retry mambabuild conda/recipes/libkvikio
rapids-upload-conda-to-s3 cpp
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/ci/build_docs.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key docs \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n docs
conda activate docs
rapids-print-env
rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
--channel "${PYTHON_CHANNEL}" \
kvikio libkvikio
export RAPIDS_VERSION_NUMBER="23.12"
export RAPIDS_DOCS_DIR="$(mktemp -d)"
rapids-logger "Build CPP docs"
pushd cpp/doxygen
doxygen Doxyfile
mkdir -p "${RAPIDS_DOCS_DIR}/libkvikio/html"
mv html/* "${RAPIDS_DOCS_DIR}/libkvikio/html"
popd
rapids-logger "Build Python docs"
pushd docs
sphinx-build -b dirhtml source _html -W
sphinx-build -b text source _text -W
mkdir -p "${RAPIDS_DOCS_DIR}/kvikio/"{html,txt}
mv _html/* "${RAPIDS_DOCS_DIR}/kvikio/html"
mv _text/* "${RAPIDS_DOCS_DIR}/kvikio/txt"
popd
rapids-upload-docs
| 0 |
rapidsai_public_repos/kvikio/ci
|
rapidsai_public_repos/kvikio/ci/release/update-version.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
##########################
# KvikIO Version Updater #
##########################
## Usage
# bash update-version.sh <new_version>
# Format is YY.MM.PP - no leading 'v' or trailing 'a'
NEXT_FULL_TAG=$1
# Get current version
CURRENT_TAG=$(git tag --merged HEAD | grep -xE '^v.*' | sort --version-sort | tail -n 1 | tr -d 'v')
CURRENT_MAJOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[1]}')
CURRENT_MINOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[2]}')
CURRENT_PATCH=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[3]}')
CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}
#Get <major>.<minor> for next version
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
NEXT_UCX_PY_VERSION="$(curl -sL https://version.gpuci.io/rapids/${NEXT_SHORT_TAG}).*"
# Need to distutils-normalize the original version
NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_SHORT_TAG}'))")
echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG"
# Inplace sed replace; workaround for Linux and Mac
function sed_runner() {
sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak
}
# cpp update
sed_runner "/project(/,/)/s/VERSION.*/VERSION ${NEXT_FULL_TAG}/" cpp/CMakeLists.txt
# Python CMakeLists updates
sed_runner 's/set(kvikio_version.*)/set(kvikio_version '${NEXT_FULL_TAG}')/g' python/CMakeLists.txt
# Python __init__.py updates
sed_runner "s/__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" python/kvikio/__init__.py
sed_runner "s/__version__ = .*/__version__ = \"${NEXT_FULL_TAG}\"/g" legate/legate_kvikio/__init__.py
# Python pyproject.toml updates
sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" python/pyproject.toml
sed_runner "s/^version = .*/version = \"${NEXT_FULL_TAG}\"/g" legate/pyproject.toml
# rapids-cmake version
sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' cpp/cmake/fetch_rapids.cmake
# cmake-format rapids-cmake definitions
sed_runner 's/'"branch-.*\/cmake-format-rapids-cmake.json"'/'"branch-${NEXT_SHORT_TAG}\/cmake-format-rapids-cmake.json"'/g' ci/checks/style.sh
# doxyfile update
sed_runner 's/PROJECT_NUMBER = .*/PROJECT_NUMBER = '${NEXT_FULL_TAG}'/g' cpp/doxygen/Doxyfile
# sphinx docs update
sed_runner 's/version = .*/version = '"'${NEXT_SHORT_TAG}'"'/g' docs/source/conf.py
sed_runner 's/release = .*/release = '"'${NEXT_FULL_TAG}'"'/g' docs/source/conf.py
DEPENDENCIES=(
cudf
)
for DEP in "${DEPENDENCIES[@]}"; do
for FILE in dependencies.yaml conda/environments/*.yaml; do
sed_runner "/-.* ${DEP}==/ s/==.*/==${NEXT_SHORT_TAG_PEP440}.*/g" ${FILE}
done
done
# CI files
for FILE in .github/workflows/*.yaml; do
sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" "${FILE}"
done
sed_runner "s/RAPIDS_VERSION_NUMBER=\".*/RAPIDS_VERSION_NUMBER=\"${NEXT_SHORT_TAG}\"/g" ci/build_docs.sh
# .devcontainer files
find .devcontainer/ -type f -name devcontainer.json -print0 | while IFS= read -r -d '' filename; do
sed_runner "s@rapidsai/devcontainers:[0-9.]*@rapidsai/devcontainers:${NEXT_SHORT_TAG}@g" "${filename}"
sed_runner "s@rapidsai/devcontainers/features/rapids-build-utils:[0-9.]*@rapidsai/devcontainers/features/rapids-build-utils:${NEXT_SHORT_TAG_PEP440}@" "${filename}"
done
| 0 |
rapidsai_public_repos/kvikio/ci
|
rapidsai_public_repos/kvikio/ci/checks/copyright.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import datetime
import os
import re
import sys
import git
FilesToCheck = [
re.compile(r"[.](cmake|cpp|cu|cuh|h|hpp|sh|pxd|py|pyx)$"),
re.compile(r"CMakeLists[.]txt$"),
re.compile(r"setup[.]cfg$"),
re.compile(r"meta[.]yaml$"),
]
ExemptFiles = [
re.compile(r"get_cpm[.]cmake"),
]
# this will break starting at year 10000, which is probably OK :)
CheckSimple = re.compile(
r"Copyright *(?:\(c\))? *(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)"
)
CheckDouble = re.compile(
r"Copyright *(?:\(c\))? *(\d{4})-(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)" # noqa: E501
)
def checkThisFile(f):
if isinstance(f, git.Diff):
if f.deleted_file or f.b_blob.size == 0:
return False
f = f.b_path
elif not os.path.exists(f) or os.stat(f).st_size == 0:
# This check covers things like symlinks which point to files that DNE
return False
for exempt in ExemptFiles:
if exempt.search(f):
return False
for checker in FilesToCheck:
if checker.search(f):
return True
return False
def modifiedFiles():
"""Get a set of all modified files, as Diff objects.
The files returned have been modified in git since the merge base of HEAD
and the upstream of the target branch. We return the Diff objects so that
we can read only the staged changes.
"""
repo = git.Repo()
# Use the environment variable TARGET_BRANCH or RAPIDS_BASE_BRANCH (defined in CI) if possible
target_branch = os.environ.get("TARGET_BRANCH", os.environ.get("RAPIDS_BASE_BRANCH"))
if target_branch is None:
# Fall back to the closest branch if not on CI
target_branch = repo.git.describe(
all=True, tags=True, match="branch-*", abbrev=0
).lstrip("heads/")
upstream_target_branch = None
if target_branch in repo.heads:
# Use the tracking branch of the local reference if it exists. This
# returns None if no tracking branch is set.
upstream_target_branch = repo.heads[target_branch].tracking_branch()
if upstream_target_branch is None:
# Fall back to the remote with the newest target_branch. This code
# path is used on CI because the only local branch reference is
# current-pr-branch, and thus target_branch is not in repo.heads.
# This also happens if no tracking branch is defined for the local
# target_branch. We use the remote with the latest commit if
# multiple remotes are defined.
candidate_branches = [
remote.refs[target_branch] for remote in repo.remotes
if target_branch in remote.refs
]
if len(candidate_branches) > 0:
upstream_target_branch = sorted(
candidate_branches,
key=lambda branch: branch.commit.committed_datetime,
)[-1]
else:
# If no remotes are defined, try to use the local version of the
# target_branch. If this fails, the repo configuration must be very
# strange and we can fix this script on a case-by-case basis.
upstream_target_branch = repo.heads[target_branch]
merge_base = repo.merge_base("HEAD", upstream_target_branch.commit)[0]
diff = merge_base.diff()
changed_files = {f for f in diff if f.b_path is not None}
return changed_files
def getCopyrightYears(line):
res = CheckSimple.search(line)
if res:
return int(res.group(1)), int(res.group(1))
res = CheckDouble.search(line)
if res:
return int(res.group(1)), int(res.group(2))
return None, None
def replaceCurrentYear(line, start, end):
# first turn a simple regex into double (if applicable). then update years
res = CheckSimple.sub(r"Copyright (c) \1-\1, NVIDIA CORPORATION", line)
res = CheckDouble.sub(
rf"Copyright (c) {start:04d}-{end:04d}, NVIDIA CORPORATION",
res,
)
return res
def checkCopyright(f, update_current_year):
"""Checks for copyright headers and their years."""
errs = []
thisYear = datetime.datetime.now().year
lineNum = 0
crFound = False
yearMatched = False
if isinstance(f, git.Diff):
path = f.b_path
lines = f.b_blob.data_stream.read().decode().splitlines(keepends=True)
else:
path = f
with open(f, encoding="utf-8") as fp:
lines = fp.readlines()
for line in lines:
lineNum += 1
start, end = getCopyrightYears(line)
if start is None:
continue
crFound = True
if start > end:
e = [
path,
lineNum,
"First year after second year in the copyright "
"header (manual fix required)",
None,
]
errs.append(e)
elif thisYear < start or thisYear > end:
e = [
path,
lineNum,
"Current year not included in the copyright header",
None,
]
if thisYear < start:
e[-1] = replaceCurrentYear(line, thisYear, end)
if thisYear > end:
e[-1] = replaceCurrentYear(line, start, thisYear)
errs.append(e)
else:
yearMatched = True
# copyright header itself not found
if not crFound:
e = [
path,
0,
"Copyright header missing or formatted incorrectly "
"(manual fix required)",
None,
]
errs.append(e)
# even if the year matches a copyright header, make the check pass
if yearMatched:
errs = []
if update_current_year:
errs_update = [x for x in errs if x[-1] is not None]
if len(errs_update) > 0:
lines_changed = ", ".join(str(x[1]) for x in errs_update)
print(f"File: {path}. Changing line(s) {lines_changed}")
for _, lineNum, __, replacement in errs_update:
lines[lineNum - 1] = replacement
with open(path, "w", encoding="utf-8") as out_file:
out_file.writelines(lines)
return errs
def getAllFilesUnderDir(root, pathFilter=None):
retList = []
for dirpath, dirnames, filenames in os.walk(root):
for fn in filenames:
filePath = os.path.join(dirpath, fn)
if pathFilter(filePath):
retList.append(filePath)
return retList
def checkCopyright_main():
"""
Checks for copyright headers in all the modified files. In case of local
repo, this script will just look for uncommitted files and in case of CI
it compares between branches "$PR_TARGET_BRANCH" and "current-pr-branch"
"""
retVal = 0
argparser = argparse.ArgumentParser(
"Checks for a consistent copyright header in git's modified files"
)
argparser.add_argument(
"--update-current-year",
dest="update_current_year",
action="store_true",
required=False,
help="If set, "
"update the current year if a header is already "
"present and well formatted.",
)
argparser.add_argument(
"--git-modified-only",
dest="git_modified_only",
action="store_true",
required=False,
help="If set, "
"only files seen as modified by git will be "
"processed.",
)
args, dirs = argparser.parse_known_args()
if args.git_modified_only:
files = [f for f in modifiedFiles() if checkThisFile(f)]
else:
files = []
for d in [os.path.abspath(d) for d in dirs]:
if not os.path.isdir(d):
raise ValueError(f"{d} is not a directory.")
files += getAllFilesUnderDir(d, pathFilter=checkThisFile)
errors = []
for f in files:
errors += checkCopyright(f, args.update_current_year)
if len(errors) > 0:
if any(e[-1] is None for e in errors):
print("Copyright headers incomplete in some of the files!")
for e in errors:
print(" %s:%d Issue: %s" % (e[0], e[1], e[2]))
print("")
n_fixable = sum(1 for e in errors if e[-1] is not None)
path_parts = os.path.abspath(__file__).split(os.sep)
file_from_repo = os.sep.join(path_parts[path_parts.index("ci") :])
if n_fixable > 0 and not args.update_current_year:
print(
f"You can run `python {file_from_repo} --git-modified-only "
"--update-current-year` and stage the results in git to "
f"fix {n_fixable} of these errors.\n"
)
retVal = 1
return retVal
if __name__ == "__main__":
sys.exit(checkCopyright_main())
| 0 |
rapidsai_public_repos/kvikio/ci
|
rapidsai_public_repos/kvikio/ci/checks/style.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#######################
# kvikio Style Tester #
#######################
PATH=/conda/bin:$PATH
# Activate common conda env
. /opt/conda/etc/profile.d/conda.sh
conda activate rapids
FORMAT_FILE_URL=https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.12/cmake-format-rapids-cmake.json
export RAPIDS_CMAKE_FORMAT_FILE=/tmp/rapids_cmake_ci/cmake-formats-rapids-cmake.json
mkdir -p $(dirname ${RAPIDS_CMAKE_FORMAT_FILE})
wget -O ${RAPIDS_CMAKE_FORMAT_FILE} ${FORMAT_FILE_URL}
# Run pre-commit checks
pre-commit run --hook-stage manual --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/Dockerfile.cudf_source
|
FROM nvidia/cuda:11.0-devel-ubuntu20.04
ARG DEBIAN_FRONTEND=noninteractive
ENV CUDA_ROOT=/usr/local/cuda
WORKDIR /
RUN apt-get update -y && apt-get install -y build-essential wget git vim libpciaccess-dev pciutils
# Install conda
ADD https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh /miniconda.sh
RUN sh /miniconda.sh -b -p /conda && /conda/bin/conda update -n base conda
ENV PATH=${PATH}:/conda/bin
# Enables "source activate conda"
SHELL ["/bin/bash", "-c"]
# Setup cuDF
RUN git clone https://github.com/rapidsai/cudf.git /cudf \
&& cd /cudf \
&& git checkout branch-0.19 \
&& git submodule update --init --remote --recursive \
&& conda env create --name cudf_dev --file conda/environments/cudf_dev_cuda11.0.yml \
&& source activate cudf_dev \
&& conda install -c rapidsai -c nvidia -c conda-forge -y openmpi cmake \
&& mkdir -p cpp/build \
&& cd cpp/build \
&& cmake .. -DCMAKE_INSTALL_PREFIX=${CONDA_PREFIX} -DCMAKE_CUDA_ARCHITECTURES="70;80" \
&& make -j install \
&& conda clean -a -y
ENV CUDF_ROOT=/conda/envs/cudf_dev
ENV LD_LIBRARY_PATH=${CUDA_ROOT}/lib64:${CUDF_ROOT}/lib:${LD_LIBRARY_PATH}
ENV PATH=${PATH}:${CUDF_ROOT}/bin
# Setup Mellanox OFED
RUN apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-1.0.4.0/ubuntu20.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1
# Setup UCX
ADD https://github.com/openucx/ucx/releases/download/v1.9.0/ucx-1.9.0.tar.gz .
RUN apt-get install -y numactl libnuma-dev file pkg-config binutils binutils-dev \
&& tar -zxf ucx-1.9.0.tar.gz && cd ucx-1.9.0 \
&& ./contrib/configure-release --enable-mt --with-cuda=/usr/local/cuda --with-rdmacm --with-verbs \
&& make -j \
&& make install \
&& cd / && rm -rf ucx-1.9.0 && rm ucx-1.9.0.tar.gz
ENV UCX_ROOT=/usr
# Setup nvcomp
RUN git clone https://github.com/NVIDIA/nvcomp && cd nvcomp && git checkout branch-2.0 && mkdir -p build && cd build \
&& ${CUDF_ROOT}/bin/cmake .. && make -j
ENV NVCOMP_ROOT=/nvcomp/build
ENV LD_LIBRARY_PATH=${NVCOMP_ROOT}/lib:${LD_LIBRARY_PATH}
# Setup NCCL
RUN git clone https://github.com/NVIDIA/nccl && cd nccl \
&& make -j src.build NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_80,code=compute_80"
ENV NCCL_ROOT=/nccl/build
ENV LD_LIBRARY_PATH=${NCCL_ROOT}/lib:${LD_LIBRARY_PATH}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/CMakeLists.txt
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cmake_minimum_required(VERSION 3.18)
project(distributed LANGUAGES CUDA CXX)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CUDA_STANDARD 14)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
find_package(CUDAToolkit REQUIRED)
find_package(CUDF REQUIRED)
find_package(RMM REQUIRED)
find_package(UCX REQUIRED)
find_package(NCCL REQUIRED)
find_package(MPI REQUIRED)
find_package(NVCOMP REQUIRED)
set(GPU_ARCHS ${GPU_ARCHS} "70;80")
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:-Wall>")
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:-Werror>")
add_compile_options("$<$<COMPILE_LANGUAGE:CUDA>:--compiler-options=-Wall>")
add_compile_options("$<$<COMPILE_LANGUAGE:CUDA>:--compiler-options=-Werror>")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g")
set(CMAKE_CUDA_FLAGS_DEBUG "${CMAKE_CUDA_FLAGS_DEBUG} -g")
add_subdirectory(src lib)
add_subdirectory(benchmark bin/benchmark)
add_subdirectory(test bin/test)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/run-clang-format.py
|
#!/usr/bin/python3
import glob
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", default="clang-format", help="Path to clang-format executable")
args = parser.parse_args()
extensions = ["cu", "cpp", "h", "cuh", "hpp"]
file_paths = []
for extension in extensions:
file_paths.extend(glob.glob('**/*.{}'.format(extension), recursive=True))
for file_path in file_paths:
subprocess.run([args.path, "-i", "-style=file", file_path])
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/README.md
|
# Distributed Join Project
## Overview
This proof-of-concept repo implements the distributed repartitioned join algorithm. The algorithm consists of three steps:
1. Hash partition: reorder input tables into partitions based on the hash values of the key columns.
2. All-to-all communication: send each partition to the corresponding MPI rank so that rows with the same hash values end up in the same rank.
3. Local join: each MPI rank performs local join independently.
For more information about the algorithm used and optimizations, please refer to [the ADMS'21 paper](http://www.adms-conf.org/2021-camera-ready/gao_adms21.pdf) and [the presentatiton](http://www.adms-conf.org/2021-camera-ready/gao_presentation.pdf).
For production-quality distributed join implementation, checkout [cuDF's Dask integration](https://rapids.ai/dask.html).
The following plot shows the weak-scaling performance when joining the `l_orderkey` column from lineitem table with the `o_orderkey` and the `o_orderpriority` columns from the orders table on TPC-H dataset with SF100k.

## Compilation
This project depends on CUDA, UCX, NCCL, MPI, cuDF 0.19 and nvcomp 2.0.
To compile, make sure the variables `CUDA_ROOT`, `CUDF_ROOT`, `MPI_ROOT`, `UCX_ROOT`, `NCCL_ROOT` and `NVCOMP_ROOT` are pointing to the installation path of CUDA, cuDF, MPI, UCX, NCCL and nvcomp repectively.
[The wiki page](https://github.com/rapidsai/distributed-join/wiki/How-to-compile-and-run-the-code) contains step-by-step instructions for setting up the environment.
To compile, run
```bash
mkdir build && cd build
cmake ..
make -j
```
## Running
To run on systems not needing Infiniband (e.g. single-node DGX-2):
```bash
UCX_MEMTYPE_CACHE=n UCX_TLS=sm,cuda_copy,cuda_ipc mpirun -n 16 --cpus-per-rank 3 bin/benchmark/distributed_join
```
On systems needing Infiniband communication (e.g. single or multi-node DGX-1Vs):
* GPU-NIC affinity is critical on systems with multiple GPUs and NICs, please refer to [this page from QUDA](https://github.com/lattice/quda/wiki/Multi-GPU-Support#maximizing-gdr-performance) for more detailed info. Also, you could modify run script included in the benchmark folder.
* Depending on whether you're running with `srun` or `mpirun`, update `run_sample.sh` to set `lrank` to `$SLURM_LOCALID` or `$OMPI_COMM_WORLD_LOCAL_RANK` correspondingly.
Example run on a single DGX-1V (all 8 GPUs):
```bash
$ mpirun -n 8 --bind-to none --mca btl ^openib,smcuda benchmark/run_sample.sh
rank 0 gpu list 0,1,2,3,4,5,6,7 cpu bind 1-4 ndev mlx5_0:1
rank 1 gpu list 0,1,2,3,4,5,6,7 cpu bind 5-8 ndev mlx5_0:1
rank 2 gpu list 0,1,2,3,4,5,6,7 cpu bind 10-13 ndev mlx5_1:1
rank 3 gpu list 0,1,2,3,4,5,6,7 cpu bind 15-18 ndev mlx5_1:1
rank 4 gpu list 0,1,2,3,4,5,6,7 cpu bind 21-24 ndev mlx5_2:1
rank 6 gpu list 0,1,2,3,4,5,6,7 cpu bind 30-33 ndev mlx5_3:1
rank 7 gpu list 0,1,2,3,4,5,6,7 cpu bind 35-38 ndev mlx5_3:1
rank 5 gpu list 0,1,2,3,4,5,6,7 cpu bind 25-28 ndev mlx5_2:1
Device count: 8
Rank 4 select 4/8 GPU
Device count: 8
Rank 5 select 5/8 GPU
Device count: 8
Rank 3 select 3/8 GPU
Device count: 8
Rank 7 select 7/8 GPU
Device count: 8
Rank 0 select 0/8 GPU
Device count: 8
Rank 1 select 1/8 GPU
Device count: 8
Rank 2 select 2/8 GPU
Device count: 8
Rank 6 select 6/8 GPU
========== Parameters ==========
Key type: int64_t
Payload type: int64_t
Number of rows in the build table: 800 million
Number of rows in the probe table: 800 million
Selectivity: 0.3
Keys in build table are unique: true
Over-decomposition factor: 1
Communicator: UCX
Registration method: preregistered
Compression: false
================================
Elasped time (s) 0.392133
```
For the arguments accepted by each benchmark, please refer to the source files in the `benchmark` folder.
## Code formatting
This repo uses `clang-format` for code formatting. To format the code, make sure `clang-format` is installed and run
```bash
./run-clang-format.py -p <path to clang-format>
```
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/Dockerfile
|
FROM nvidia/cuda:11.0-devel-ubuntu20.04
ARG DEBIAN_FRONTEND=noninteractive
ENV CUDA_ROOT=/usr/local/cuda
WORKDIR /
RUN apt-get update -y && apt-get install -y build-essential wget git vim libpciaccess-dev pciutils
# Install conda
ADD https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh /miniconda.sh
RUN sh /miniconda.sh -b -p /conda && /conda/bin/conda update -n base conda
ENV PATH=${PATH}:/conda/bin
# Enables "source activate conda"
SHELL ["/bin/bash", "-c"]
# Setup cuDF
RUN conda create --name cudf_release \
&& source activate cudf_release \
&& conda install -c rapidsai -c nvidia -c conda-forge -y \
cudf=0.19 \
python=3.8 \
cudatoolkit=11.0 \
openmpi \
cmake \
&& conda clean -a -y
ENV CUDF_ROOT=/conda/envs/cudf_release
ENV LD_LIBRARY_PATH=${CUDA_ROOT}/lib64:${CUDF_ROOT}/lib:${LD_LIBRARY_PATH}
ENV PATH=${PATH}:${CUDF_ROOT}/bin
# Setup Mellanox OFED
RUN apt-get install -y --no-install-recommends \
ca-certificates \
gnupg \
wget
RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - && \
mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.2-1.0.4.0/ubuntu20.04/mellanox_mlnx_ofed.list && \
apt-get update -y && \
apt-get install -y --no-install-recommends \
ibverbs-providers \
ibverbs-utils \
libibmad-dev \
libibmad5 \
libibumad-dev \
libibumad3 \
libibverbs-dev \
libibverbs1 \
librdmacm-dev \
librdmacm1
# Setup UCX
ADD https://github.com/openucx/ucx/releases/download/v1.9.0/ucx-1.9.0.tar.gz .
RUN apt-get install -y numactl libnuma-dev file pkg-config binutils binutils-dev \
&& tar -zxf ucx-1.9.0.tar.gz && cd ucx-1.9.0 \
&& ./contrib/configure-release --enable-mt --with-cuda=/usr/local/cuda --with-rdmacm --with-verbs \
&& make -j \
&& make install \
&& cd / && rm -rf ucx-1.9.0 && rm ucx-1.9.0.tar.gz
ENV UCX_ROOT=/usr
# Setup nvcomp
RUN git clone https://github.com/NVIDIA/nvcomp && cd nvcomp && git checkout branch-2.0 && mkdir -p build && cd build \
&& ${CUDF_ROOT}/bin/cmake .. && make -j
ENV NVCOMP_ROOT=/nvcomp/build
ENV LD_LIBRARY_PATH=${NVCOMP_ROOT}/lib:${LD_LIBRARY_PATH}
# Setup NCCL
RUN git clone https://github.com/NVIDIA/nccl && cd nccl \
&& make -j src.build NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_80,code=compute_80"
ENV NCCL_ROOT=/nccl/build
ENV LD_LIBRARY_PATH=${NCCL_ROOT}/lib:${LD_LIBRARY_PATH}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/LICENSE
|
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/distributed-join/.clang-format
|
---
# This file is copied from cuDF repo:
# https://github.com/rapidsai/cudf/blob/branch-0.17/cpp/.clang-format
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLoopsOnASingleLine: true
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: true
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/generate_dataset/nvtx_helper.cuh
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NVTX_HELPER_CUH
#define NVTX_HELPER_CUH
#ifdef USE_NVTX
#include <nvToolsExt.h>
static const uint32_t colors[] = {
0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff};
static const int num_colors = sizeof(colors) / sizeof(uint32_t);
#define PUSH_RANGE(name, cid) \
{ \
int color_id = cid; \
color_id = color_id % num_colors; \
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name, cid)
#define POP_RANGE
#endif
#endif // NVTX_HELPER_CUH
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/generate_dataset/generate_dataset.cuh
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../src/error.hpp"
#include "nvtx_helper.cuh"
#include <rmm/exec_policy.hpp>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cassert>
/* redefine atomic compare and swap with signed type */
__device__ __inline__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val)
{
return (int64_t)atomicCAS(
(unsigned long long int*)address, (unsigned long long int)compare, (unsigned long long int)val);
}
__global__ static void init_curand(curandState* state, const int nstates)
{
int ithread = threadIdx.x + blockIdx.x * blockDim.x;
if (ithread < nstates) { curand_init(1234ULL, ithread, 0, state + ithread); }
}
template <typename key_type, typename size_type>
__global__ static void init_build_tbl(key_type* const build_tbl,
const size_type build_tbl_size,
const key_type rand_max,
const bool uniq_build_tbl_keys,
key_type* const lottery,
const size_type lottery_size,
curandState* state,
const int num_states)
{
static_assert(std::is_signed<key_type>::value, "key_type needs to be signed for lottery to work");
const int start_idx = blockIdx.x * blockDim.x + threadIdx.x;
const key_type stride = blockDim.x * gridDim.x;
assert(start_idx < num_states);
curandState localState = state[start_idx];
for (size_type idx = start_idx; idx < build_tbl_size; idx += stride) {
const double x = curand_uniform_double(&localState);
if (uniq_build_tbl_keys) {
// If the build table keys need to be unique, go through lottery array from lottery_idx until
// finding a key which has not been used (-1). Mark the key as been used by atomically setting
// the spot to -1.
size_type lottery_idx = x * lottery_size;
key_type lottery_val = -1;
while (-1 == lottery_val) {
lottery_val = lottery[lottery_idx];
if (-1 != lottery_val) { lottery_val = atomicCAS(lottery + lottery_idx, lottery_val, -1); }
lottery_idx = (lottery_idx + 1) % lottery_size;
}
build_tbl[idx] = lottery_val;
} else {
build_tbl[idx] = x * rand_max;
}
}
state[start_idx] = localState;
}
template <typename key_type, typename size_type>
__global__ void init_probe_tbl(key_type* const probe_tbl,
const size_type probe_tbl_size,
const key_type* const build_tbl,
const size_type build_tbl_size,
const key_type* const lottery,
const size_type lottery_size,
const double selectivity,
curandState* state,
const int num_states)
{
const int start_idx = blockIdx.x * blockDim.x + threadIdx.x;
const size_type stride = blockDim.x * gridDim.x;
assert(start_idx < num_states);
curandState localState = state[start_idx];
for (size_type idx = start_idx; idx < probe_tbl_size; idx += stride) {
key_type val;
double x = curand_uniform_double(&localState);
if (x <= selectivity) {
// x <= selectivity means this key in the probe table should be present in the build table, so
// we pick a key from build_tbl
x = curand_uniform_double(&localState);
size_type build_tbl_idx = x * build_tbl_size;
if (build_tbl_idx >= build_tbl_size) { build_tbl_idx = build_tbl_size - 1; }
val = build_tbl[build_tbl_idx];
} else {
// This key in the probe table should not be present in the build table, so we pick a key from
// lottery.
x = curand_uniform_double(&localState);
size_type lottery_idx = x * lottery_size;
val = lottery[lottery_idx];
}
probe_tbl[idx] = val;
}
state[start_idx] = localState;
}
/**
* generate_input_tables generates random integer input tables for database benchmarks.
*
* generate_input_tables generates two random integer input tables for database benchmark
* mainly designed to benchmark join operations. The templates key_type and size_type needed
* to be builtin integer types (e.g. short, int, longlong) and key_type needs to be signed
* as the lottery used internally relies on being able to use negative values to mark drawn
* numbers. The tables need to be preallocated in a memory region accessible by the GPU
* (e.g. device memory, zero copy memory or unified memory). Each value in the build table
* will be from [0,rand_max] and if uniq_build_tbl_keys is true it is ensured that each value
* will be uniq in the build table. Each value in the probe table will be also in the build
* table with a propability of selectivity and a random number from
* [0,rand_max] \setminus \{build_tbl\} otherwise.
*
* @param[out] build_tbl The build table to generate. Usually the smaller table used to
* "build" the hash table in a hash based join implementation.
* @param[in] build_tbl_size number of keys in the build table
* @param[out] probe_tbl The probe table to generate. Usually the larger table used to
* probe into the hash table created from the build table.
* @param[in] build_tbl_size number of keys in the build table
* @param[in] selectivity propability with which an element of the probe table is
* present in the build table.
* @param[in] rand_max maximum random number to generate. I.e. random numbers are
* integers from [0,rand_max].
* @param[in] uniq_build_tbl_keys if each key in the build table should appear exactly once.
*/
template <typename key_type, typename size_type>
void generate_input_tables(key_type* const build_tbl,
const size_type build_tbl_size,
key_type* const probe_tbl,
const size_type probe_tbl_size,
const double selectivity,
const key_type rand_max,
const bool uniq_build_tbl_keys)
{
// With large values of rand_max the a lot of temporary storage is needed for the lottery. At the
// expense of not being that accurate with applying the selectivity an especially more memory
// efficient implementations would be to partition the random numbers into two intervals and then
// let one table choose random numbers from only one interval and the other only select with
// selectivity propability from the same interval and from the other in the other cases.
static_assert(std::is_signed<key_type>::value, "key_type needs to be signed for lottery to work");
const int block_size = 128;
// Maximize exposed parallelism while minimizing storage for curand state
int num_blocks_init_build_tbl{-1};
CUDA_RT_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&num_blocks_init_build_tbl, init_build_tbl<key_type, size_type>, block_size, 0));
int num_blocks_init_probe_tbl{-1};
CUDA_RT_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&num_blocks_init_probe_tbl, init_probe_tbl<key_type, size_type>, block_size, 0));
int dev_id{-1};
CUDA_RT_CALL(cudaGetDevice(&dev_id));
int num_sms{-1};
CUDA_RT_CALL(cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, dev_id));
const int num_states =
num_sms * std::max(num_blocks_init_build_tbl, num_blocks_init_probe_tbl) * block_size;
rmm::device_vector<curandState> devStates(num_states);
init_curand<<<(num_states - 1) / block_size + 1, block_size>>>(devStates.data().get(),
num_states);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
rmm::device_vector<key_type> build_tbl_sorted(build_tbl_size);
size_type lottery_size =
rand_max < std::numeric_limits<key_type>::max() - 1 ? rand_max + 1 : rand_max;
rmm::device_vector<key_type> lottery(lottery_size);
if (uniq_build_tbl_keys) { thrust::sequence(thrust::device, lottery.begin(), lottery.end(), 0); }
init_build_tbl<key_type, size_type>
<<<num_sms * num_blocks_init_build_tbl, block_size>>>(build_tbl,
build_tbl_size,
rand_max,
uniq_build_tbl_keys,
lottery.data().get(),
lottery_size,
devStates.data().get(),
num_states);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
CUDA_RT_CALL(cudaMemcpy(build_tbl_sorted.data().get(),
build_tbl,
build_tbl_size * sizeof(key_type),
cudaMemcpyDeviceToDevice));
thrust::sort(rmm::exec_policy(), build_tbl_sorted.begin(), build_tbl_sorted.end());
// Exclude keys used in build table from lottery
thrust::counting_iterator<key_type> first_lottery_elem(0);
thrust::counting_iterator<key_type> last_lottery_elem = first_lottery_elem + lottery_size;
key_type* lottery_end = thrust::set_difference(thrust::device,
first_lottery_elem,
last_lottery_elem,
build_tbl_sorted.begin(),
build_tbl_sorted.end(),
lottery.data().get());
lottery_size = thrust::distance(lottery.data().get(), lottery_end);
init_probe_tbl<key_type, size_type>
<<<num_sms * num_blocks_init_build_tbl, block_size>>>(probe_tbl,
probe_tbl_size,
build_tbl,
build_tbl_size,
lottery.data().get(),
lottery_size,
selectivity,
devStates.data().get(),
num_states);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/CMakeLists.txt
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include(BuildHelpers)
file(GLOB SOURCES *.cu *.cpp)
build_executables(SOURCES)
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/gpubdb_shuffle_on.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This benchmark tests shuffle performance on GPU-BDB "web_clickstreams" table.
Parameters:
--data-folder Forder containing the parquet files of GPU-BDB "web_clickstreams" table.
--nfiles-per-rank Number of Parquet files read for input on each rank.
--compression If specified, compress data before all-to-all communication.
*/
#include "../src/all_to_all_comm.hpp"
#include "../src/communicator.hpp"
#include "../src/error.hpp"
#include "../src/registered_memory_resource.hpp"
#include "../src/setup.hpp"
#include "../src/shuffle_on.hpp"
#include "utility.hpp"
#include <cudf/concatenate.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/table.hpp>
#include <mpi.h>
#include <cuda_runtime.h>
#include <dirent.h>
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <memory>
#include <sstream>
using cudf::table;
using cudf::table_view;
static std::string data_folderpath = "";
static int nfiles_per_rank = 1;
static bool compression = false;
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--data-folder")) { data_folderpath = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--nfiles-per-rank")) { nfiles_per_rank = atoi(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--compression")) { compression = true; }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Data folder: " << data_folderpath << std::endl;
std::cout << "Files per rank: " << nfiles_per_rank << std::endl;
std::cout << "Compression: " << compression << std::endl;
std::cout << "================================" << std::endl;
}
/**
* Get a vector of sorted parquet file names in folder specified by *folderpath*.
*
* In this function, only the root rank will query the filesystem, and the result file names are
* broadcasted to all worker ranks. Therefore, this function needs to be called collectively by all
* ranks in MPI_COMM_WORLD.
*
* @param[in] folderpath Path to the folder to be queried.
* @param[out] num_input_files Number of Parquet files in *folderpath*.
* @param[out] file_names Parquet file names in *folderpath*.
*/
void get_parquet_file_names(const char *folderpath,
int &num_input_files,
std::vector<std::string> &file_names)
{
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
num_input_files = 0;
file_names.clear();
// Query parquet file names in *folderpath* on the root rank and store in *file_names*
if (mpi_rank == 0) {
DIR *data_folder = opendir(folderpath);
if (!data_folder) {
std::cerr << "Cannot open directory\n";
exit(EXIT_FAILURE);
}
try {
struct dirent *next_entry;
while ((next_entry = readdir(data_folder)) != NULL) {
std::string file_name = next_entry->d_name;
if (file_name.find(".parquet") != std::string::npos) {
// if the current file name contains ".parquet"
file_names.push_back(file_name);
num_input_files++;
}
}
} catch (...) {
closedir(data_folder);
throw;
}
closedir(data_folder);
}
// Broadcast the file names from the root rank to all worker ranks
MPI_CALL(MPI_Bcast(&num_input_files, 1, MPI_INT, 0, MPI_COMM_WORLD));
constexpr int max_file_name_length = 100;
char file_name_bcast[max_file_name_length];
for (int ifile = 0; ifile < num_input_files; ifile++) {
if (mpi_rank == 0) {
strncpy(file_name_bcast, file_names[ifile].c_str(), max_file_name_length);
}
// Each file name is broadcasted to *file_name_bcast*, and then each worker rank adds it to
// *file_names*.
MPI_CALL(MPI_Bcast(file_name_bcast, max_file_name_length, MPI_CHAR, 0, MPI_COMM_WORLD));
if (mpi_rank != 0) { file_names.emplace_back(file_name_bcast); }
}
std::sort(file_names.begin(), file_names.end());
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
// Parse command line arguments
parse_command_line_arguments(argc, argv);
report_configuration();
// Initialize communicator and memory pool
Communicator *communicator{nullptr};
registered_memory_resource *registered_mr{nullptr};
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, "UCX", "preregistered", 0);
void *preallocated_pinned_buffer;
CUDA_RT_CALL(
cudaMallocHost(&preallocated_pinned_buffer, communicator->mpi_size * sizeof(size_t)));
// Get a vector of parquet file names in data_folder
int num_input_files;
std::vector<std::string> file_names;
get_parquet_file_names(data_folderpath.c_str(), num_input_files, file_names);
// Read parquet files
std::vector<std::unique_ptr<table>> input_tables;
for (int ifile = 0; ifile < nfiles_per_rank; ifile++) {
int file_index = ifile * communicator->mpi_size + communicator->mpi_rank;
if (file_index >= num_input_files) break;
std::string filepath = data_folderpath + "/" + file_names[file_index];
cudf::io::parquet_reader_options cuio_options =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(filepath));
cuio_options.set_columns(
{"wcs_user_sk", "wcs_item_sk", "wcs_click_date_sk", "wcs_click_time_sk"});
input_tables.push_back(cudf::io::read_parquet(cuio_options).tbl);
}
// Combine input tables into a single table
std::vector<table_view> input_table_views;
input_table_views.reserve(input_tables.size());
for (auto const &input_table : input_tables) { input_table_views.push_back(input_table->view()); }
std::unique_ptr<table> combined_input = cudf::concatenate(input_table_views);
input_tables.clear();
// Remove rows with NULL value in "wcs_user_sk" and "wcs_item_sk"
std::unique_ptr<table> combined_input_filtered =
cudf::drop_nulls(combined_input->view(), {0, 1}, 2);
combined_input.reset();
std::cout << "Rank " << communicator->mpi_rank << " input table has "
<< combined_input_filtered->view().num_rows() << " rows." << std::endl;
// Calculate input sizes
int64_t input_size_irank = calculate_table_size(combined_input_filtered->view());
int64_t input_size_total;
MPI_CALL(
MPI_Allreduce(&input_size_irank, &input_size_total, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD));
// Warmup
warmup_all_to_all(communicator);
if (compression) { warmup_nvcomp(); }
std::vector<ColumnCompressionOptions> compression_options =
generate_compression_options_distributed(combined_input_filtered->view(), compression);
if (communicator->mpi_rank == 0) { print_compression_options(compression_options); }
// Benchmark shuffle_on
CUDA_RT_CALL(cudaDeviceSynchronize());
MPI_Barrier(MPI_COMM_WORLD);
double start = MPI_Wtime();
std::unique_ptr<table> shuffle_result = shuffle_on(combined_input_filtered->view(),
{0},
communicator,
compression_options,
cudf::hash_id::HASH_MURMUR3,
true,
preallocated_pinned_buffer);
MPI_Barrier(MPI_COMM_WORLD);
double stop = MPI_Wtime();
if (communicator->mpi_rank == 0) {
double elapsed_time = stop - start;
std::cout << "Elasped time (s): " << elapsed_time << std::endl;
std::cout << "Throughput (GB/s): " << input_size_total / 1e9 / elapsed_time << std::endl;
}
// Cleanup
combined_input_filtered.reset();
shuffle_result.reset();
CUDA_RT_CALL(cudaFreeHost(preallocated_pinned_buffer));
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, "UCX", "preregistered");
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/distributed_join.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This benchmark runs distributed join on random keys. Both the left and the right tables contain two
columns. The key column consists of random integers and the payload column consists of row ids.
Parameters:
**--key-type {int32_t,int64_t}**
Data type for the key columns. Default: `int64_t`.
**--payload-type {int32_t,int64_t}**
Data type for the payload columns. Default: `int64_t`.
**--build-table-nrows [INTEGER]**
Number of rows in the build table per GPU. Default: `100'000'000`.
**--probe-table-nrows [INTEGER]**
Number of rows in the probe table per GPU. Default: `100'000'000`.
**--selectivity [FLOAT]**
The probability (in range 0.0 - 1.0) of each probe table row has matches in the build table.
Default: `0.3`.
**--duplicate-build-keys**
If specified, key columns of the build table are allowed to have duplicates.
**--over-decomposition-factor [INTEGER]**
Partition the input tables into (over decomposition factor) * (number of GPUs) buckets, which is
used for computation-communication overlap. This argument has to be an integer >= 1. Higher number
means smaller batch size. `1` means no overlap. Default: `1`.
**--communicator [STR]**
This option can be either "UCX" or "NCCL", which controls what communicator to use. Default: `UCX`.
**--registration-method [STR]**
If the UCX communicator is selected, this option can be either "none", "preregistered" or "buffer",
to control how registration is performed for GPUDirect RDMA.
- "none": No preregistration.
- "preregistered": The whole RMM memory pool will be preregistered.
- "buffer": Preregister a set of communication buffers. The communication in distributed join will
go through these buffers.
*/
#include "../src/communicator.hpp"
#include "../src/compression.hpp"
#include "../src/distributed_join.hpp"
#include "../src/error.hpp"
#include "../src/generate_table.cuh"
#include "../src/registered_memory_resource.hpp"
#include "../src/setup.hpp"
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cuda_profiler_api.h>
#include <mpi.h>
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
static std::string key_type = "int64_t";
static std::string payload_type = "int64_t";
static cudf::size_type BUILD_TABLE_NROWS_EACH_RANK = 100'000'000;
static cudf::size_type PROBE_TABLE_NROWS_EACH_RANK = 100'000'000;
static double SELECTIVITY = 0.3;
static bool IS_BUILD_TABLE_KEY_UNIQUE = true;
static int OVER_DECOMPOSITION_FACTOR = 1;
static std::string COMMUNICATOR_NAME = "UCX";
static std::string REGISTRATION_METHOD = "preregistered";
static int64_t COMMUNICATOR_BUFFER_SIZE = 1'600'000'000LL;
static bool COMPRESSION = false;
static int NVLINK_DOMAIN_SIZE = 1;
static bool REPORT_TIMING = false;
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--key-type")) { key_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--payload-type")) { payload_type = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--build-table-nrows")) {
BUILD_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--probe-table-nrows")) {
PROBE_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--selectivity")) { SELECTIVITY = atof(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--duplicate-build-keys")) { IS_BUILD_TABLE_KEY_UNIQUE = false; }
if (!strcmp(argv[iarg], "--over-decomposition-factor")) {
OVER_DECOMPOSITION_FACTOR = atoi(argv[iarg + 1]);
}
if (!strcmp(argv[iarg], "--communicator")) { COMMUNICATOR_NAME = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--compression")) { COMPRESSION = true; }
if (!strcmp(argv[iarg], "--registration-method")) { REGISTRATION_METHOD = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--nvlink-domain-size")) { NVLINK_DOMAIN_SIZE = atoi(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--report-timing")) { REPORT_TIMING = true; }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Key type: " << key_type << std::endl;
std::cout << "Payload type: " << payload_type << std::endl;
std::cout << "Number of rows in the build table: "
<< static_cast<uint64_t>(BUILD_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Number of rows in the probe table: "
<< static_cast<uint64_t>(PROBE_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million"
<< std::endl;
std::cout << "Selectivity: " << SELECTIVITY << std::endl;
std::cout << "Keys in build table are unique: " << IS_BUILD_TABLE_KEY_UNIQUE << std::endl;
std::cout << "Over-decomposition factor: " << OVER_DECOMPOSITION_FACTOR << std::endl;
std::cout << "Communicator: " << COMMUNICATOR_NAME << std::endl;
if (COMMUNICATOR_NAME == "UCX")
std::cout << "Registration method: " << REGISTRATION_METHOD << std::endl;
std::cout << "Compression: " << COMPRESSION << std::endl;
std::cout << "NVLink domain size: " << NVLINK_DOMAIN_SIZE << std::endl;
std::cout << "================================" << std::endl;
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
/* Parse command line arguments */
parse_command_line_arguments(argc, argv);
report_configuration();
cudf::size_type RAND_MAX_VAL =
std::max(BUILD_TABLE_NROWS_EACH_RANK, PROBE_TABLE_NROWS_EACH_RANK) * 2;
/* Initialize communicator and memory pool */
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
Communicator *communicator{nullptr};
// `registered_mr` holds reference to the registered memory resource, and *nullptr* if registered
// memory resource is not used.
registered_memory_resource *registered_mr{nullptr};
// pool_mr need to live on heap because for registered memory resources, the memory pool needs
// to deallocated before UCX cleanup, which can be achieved by calling the destructor of
// `poll_mr`.
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(communicator,
registered_mr,
pool_mr,
COMMUNICATOR_NAME,
REGISTRATION_METHOD,
COMMUNICATOR_BUFFER_SIZE);
void *preallocated_pinned_buffer;
CUDA_RT_CALL(cudaMallocHost(&preallocated_pinned_buffer, mpi_size * sizeof(size_t)));
/* Warmup nvcomp */
if (COMPRESSION) { warmup_nvcomp(); }
/* Generate build table and probe table on each rank */
std::unique_ptr<cudf::table> left;
std::unique_ptr<cudf::table> right;
#define generate_tables(KEY_T, PAYLOAD_T) \
{ \
std::tie(left, right) = \
generate_tables_distributed<KEY_T, PAYLOAD_T>(BUILD_TABLE_NROWS_EACH_RANK, \
PROBE_TABLE_NROWS_EACH_RANK, \
SELECTIVITY, \
RAND_MAX_VAL, \
IS_BUILD_TABLE_KEY_UNIQUE, \
communicator); \
}
#define generate_tables_key_type(KEY_T) \
{ \
if (payload_type == "int64_t") { \
generate_tables(KEY_T, int64_t) \
} else if (payload_type == "int32_t") { \
generate_tables(KEY_T, int32_t) \
} else { \
throw std::runtime_error("Unknown payload type"); \
} \
}
if (key_type == "int64_t") {
generate_tables_key_type(int64_t)
} else if (key_type == "int32_t") {
generate_tables_key_type(int32_t)
} else {
throw std::runtime_error("Unknown key type");
}
/* Generate compression options */
std::vector<ColumnCompressionOptions> left_compression_options =
generate_compression_options_distributed(left->view(), COMPRESSION);
std::vector<ColumnCompressionOptions> right_compression_options =
generate_compression_options_distributed(right->view(), COMPRESSION);
/* Distributed join */
CUDA_RT_CALL(cudaDeviceSynchronize());
MPI_Barrier(MPI_COMM_WORLD);
cudaProfilerStart();
double start = MPI_Wtime();
std::unique_ptr<cudf::table> join_result = distributed_inner_join(left->view(),
right->view(),
{0},
{0},
communicator,
left_compression_options,
right_compression_options,
OVER_DECOMPOSITION_FACTOR,
REPORT_TIMING,
preallocated_pinned_buffer,
NVLINK_DOMAIN_SIZE);
MPI_Barrier(MPI_COMM_WORLD);
double stop = MPI_Wtime();
cudaProfilerStop();
if (mpi_rank == 0) { std::cout << "Elasped time (s) " << stop - start << std::endl; }
/* Cleanup */
left.reset();
right.reset();
join_result.reset();
CUDA_RT_CALL(cudaFreeHost(preallocated_pinned_buffer));
CUDA_RT_CALL(cudaDeviceSynchronize());
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, COMMUNICATOR_NAME, REGISTRATION_METHOD);
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/all_to_all.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/communicator.hpp"
#include "../src/error.hpp"
#include "../src/setup.hpp"
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <mpi.h>
#include <cstdint>
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
static int REPEAT = 4;
static std::string COMMUNICATOR_NAME = "UCX";
static std::string REGISTRATION_METHOD = "preregistered";
static int64_t COMMUNICATOR_BUFFER_SIZE = 25'000'000LL;
static constexpr int64_t WARMUP_BUFFER_SIZE = 4'000'000LL;
static const std::vector<int64_t> SIZES{1'000'000LL,
2'000'000LL,
4'000'000LL,
8'000'000LL,
16'000'000LL,
32'000'000LL,
64'000'000LL,
128'000'000LL,
256'000'000LL,
512'000'000LL,
1024'000'000LL,
2048'000'000LL,
4096'000'000LL};
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--repeat")) { REPEAT = atoi(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--communicator")) { COMMUNICATOR_NAME = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--registration-method")) { REGISTRATION_METHOD = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--buffer-size")) { COMMUNICATOR_BUFFER_SIZE = atol(argv[iarg + 1]); }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Communicator: " << COMMUNICATOR_NAME << std::endl;
if (COMMUNICATOR_NAME == "UCX") {
std::cout << "Registration method: " << REGISTRATION_METHOD << std::endl;
if (REGISTRATION_METHOD == "buffer")
std::cout << "Communicator buffer size: " << COMMUNICATOR_BUFFER_SIZE << std::endl;
}
std::cout << "Repeat: " << REPEAT << std::endl;
std::cout << "================================" << std::endl;
}
void run_all_to_all(int64_t size,
Communicator *communicator,
rmm::mr::device_memory_resource *mr,
bool print_result = true)
{
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
/* Allocate send/recv buffers */
std::vector<void *> send_buffer(mpi_size, nullptr);
std::vector<void *> recv_buffer(mpi_size, nullptr);
for (int irank = 0; irank < mpi_size; irank++) {
if (irank == mpi_rank) continue;
send_buffer[irank] = mr->allocate(size / mpi_size, rmm::cuda_stream_default);
recv_buffer[irank] = mr->allocate(size / mpi_size, rmm::cuda_stream_default);
}
CUDA_RT_CALL(cudaStreamSynchronize(0));
/* Communication */
MPI_Barrier(MPI_COMM_WORLD);
cudaProfilerStart();
double start = MPI_Wtime();
for (int run = 0; run < REPEAT; run++) {
communicator->start();
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) communicator->send(send_buffer[irank], size / mpi_size, 1, irank);
}
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) communicator->recv(recv_buffer[irank], size / mpi_size, 1, irank);
}
communicator->stop();
}
double stop = MPI_Wtime();
cudaProfilerStop();
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_rank == 0 && print_result) {
std::cout << "Size (MB): " << size / 1e6 << ", "
<< "Elasped time (s): " << stop - start << ", "
<< "Bandwidth per GPU (GB/s): "
<< (double)size / mpi_size * (mpi_size - 1) * REPEAT / (stop - start) / 1e9
<< std::endl;
}
/* Deallocate send/recv buffers */
for (int irank = 0; irank < mpi_rank; irank++) {
mr->deallocate(send_buffer[irank], size / mpi_size, rmm::cuda_stream_default);
mr->deallocate(recv_buffer[irank], size / mpi_size, rmm::cuda_stream_default);
}
CUDA_RT_CALL(cudaStreamSynchronize(0));
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
/* Parse command line arguments */
parse_command_line_arguments(argc, argv);
report_configuration();
/* Initialize communicator and memory pool */
Communicator *communicator{nullptr};
registered_memory_resource *registered_mr{nullptr};
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(communicator,
registered_mr,
pool_mr,
COMMUNICATOR_NAME,
REGISTRATION_METHOD,
COMMUNICATOR_BUFFER_SIZE);
/* Warmup */
run_all_to_all(WARMUP_BUFFER_SIZE, communicator, pool_mr, false);
/* Benchmark */
for (const int64_t &size : SIZES) run_all_to_all(size, communicator, pool_mr, true);
/* Cleanup */
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, COMMUNICATOR_NAME, REGISTRATION_METHOD);
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/run_sample.sh
|
#!/bin/bash
#lrank=$SLURM_LOCALID
lrank=$OMPI_COMM_WORLD_LOCAL_RANK
export CUDA_DEVICE_MAX_CONNECTIONS=1
# APP="build/bin/benchmark/all_to_all --repeat 1"
APP="build/bin/benchmark/distributed_join"
# this is the list of GPUs we have
GPUS=(0 1 2 3 4 5 6 7)
# This is the list of NICs we should use for each GPU
# e.g., associate GPU0,1 with MLX0, GPU2,3 with MLX1, GPU4,5 with MLX2 and GPU6,7 with MLX3
NICS=(mlx5_0 mlx5_0 mlx5_1 mlx5_1 mlx5_2 mlx5_2 mlx5_3 mlx5_3)
# This is the list of CPU cores we should use for each GPU
# e.g., 2x20 core CPUs split into 4 threads per process with correct NUMA assignment
CPUS=(1-4 5-8 10-13 15-18 21-24 25-28 30-33 35-38)
# this is the order we want the GPUs to be assigned in (e.g. for NVLink connectivity)
REORDER=(0 1 2 3 4 5 6 7)
# now given the REORDER array, we set CUDA_VISIBLE_DEVICES, NIC_REORDER and CPU_REORDER to for this mapping
export CUDA_VISIBLE_DEVICES="${GPUS[${REORDER[0]}]},${GPUS[${REORDER[1]}]},${GPUS[${REORDER[2]}]},${GPUS[${REORDER[3]}]},${GPUS[${REORDER[4]}]},${GPUS[${REORDER[5]}]},${GPUS[${REORDER[6]}]},${GPUS[${REORDER[7]}]}"
NIC_REORDER=(${NICS[${REORDER[0]}]} ${NICS[${REORDER[1]}]} ${NICS[${REORDER[2]}]} ${NICS[${REORDER[3]}]} ${NICS[${REORDER[4]}]} ${NICS[${REORDER[5]}]} ${NICS[${REORDER[6]}]} ${NICS[${REORDER[7]}]})
CPU_REORDER=(${CPUS[${REORDER[0]}]} ${CPUS[${REORDER[1]}]} ${CPUS[${REORDER[2]}]} ${CPUS[${REORDER[3]}]} ${CPUS[${REORDER[4]}]} ${CPUS[${REORDER[5]}]} ${CPUS[${REORDER[6]}]} ${CPUS[${REORDER[7]}]})
export UCX_NET_DEVICES=${NIC_REORDER[lrank]}:1
export UCX_MEMTYPE_CACHE=n
export UCX_TLS=rc,cuda_copy,cuda_ipc
export UCX_WARN_UNUSED_ENV_VARS=n
#export UCX_IB_GPU_DIRECT_RDMA=no
#export UCX_IB_REG_METHODS=rcache
#export UCX_RNDV_THRESH=8192
export UCX_RNDV_SCHEME=put_zcopy
echo "rank" $lrank "gpu list" $CUDA_VISIBLE_DEVICES "cpu bind" ${CPU_REORDER[$lrank]} "ndev" $UCX_NET_DEVICES
numactl --physcpubind=${CPU_REORDER[$lrank]} $APP
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/utility.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../src/compression.hpp"
#include <nvcomp/cascaded.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cstdint>
#include <vector>
/**
* Calculate the table sizes in bytes.
*
* Note: This function only support tables with fixed-width and string columns.
*/
inline int64_t calculate_table_size(cudf::table_view input_table)
{
int64_t table_size = 0;
for (auto ¤t_column : input_table) {
cudf::data_type dtype = current_column.type();
if (cudf::is_fixed_width(dtype)) {
table_size += (cudf::size_of(dtype) * current_column.size());
} else {
assert(dtype.id() == cudf::type_id::STRING);
table_size += current_column.child(1).size();
}
}
return table_size;
}
inline void print_compression_options(std::vector<ColumnCompressionOptions> &compression_options)
{
for (size_t icol = 0; icol < compression_options.size(); icol++) {
nvcompCascadedFormatOpts format = compression_options[icol].cascaded_format;
std::cout << "Column " << icol << " RLE=" << format.num_RLEs << ", Delta=" << format.num_deltas
<< ", Bitpack=" << format.use_bp << std::endl;
}
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/benchmark/tpch.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This benchmark expects split TPC-H lineitem and orders tables in parquet format. The
lineitem tables must be named "lineitem00.parquet", "lineitem01.parquet", etc. The orders table must
be named "orders00.parquet", "orders01.parquet", etc. Each rank will read its corresponding split
files. For example, rank 0 will read "lineitem00.parquet" and "orders00.parquet"; rank 2 will read
"lineitem02.parquet" and "orders02.parquet".
To get the split parquet files, we can
1. Use `tpch-dbgen` to generate TPC-H tables with desired scale factor. See
https://github.com/electrum/tpch-dbgen
2. Split the generated tables, e.g.
split -C <size-of-each-split-file> --numeric-suffixes lineitem.tbl lineitem
3. Convert the split tables to parquet format, e.g.
python scripts/tpch_to_parquet.py <path-to-folder-with-split-files>
Parameters:
--data-folder The forder containing the split parquet files.
--orders Comma-seperated list of column indices for orders table. Must contain 0.
--lineitem Comma-seperated list of column indices for lineitem table. Must contain 0.
--compression If specified, compressed data before all-to-all communication.
Example:
UCX_MEMTYPE_CACHE=n UCX_TLS=sm,cuda_copy,cuda_ipc mpirun -n 4 --cpus-per-rank 2 benchmark/tpch
--data-folder <path-to-data-folder> --orders O_ORDERKEY --lineitem L_ORDERKEY,L_SHIPDATE,L_SUPPKEY
--compression
*/
#include "../src/all_to_all_comm.hpp"
#include "../src/compression.hpp"
#include "../src/distributed_join.hpp"
#include "../src/setup.hpp"
#include "utility.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <mpi.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <iomanip>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
static std::string data_folderpath;
static std::vector<std::string> orders_columns;
static std::vector<std::string> lineitem_columns;
static bool compression = false;
std::vector<std::string> split(char *str)
{
std::vector<std::string> split_result;
char *ptr = strtok(str, ",");
while (ptr != NULL) {
split_result.emplace_back(ptr);
ptr = strtok(NULL, ",");
}
return split_result;
}
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--data-folder")) { data_folderpath = argv[iarg + 1]; }
if (!strcmp(argv[iarg], "--orders")) { orders_columns = split(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--lineitem")) { lineitem_columns = split(argv[iarg + 1]); }
if (!strcmp(argv[iarg], "--compression")) { compression = true; }
}
}
void report_configuration()
{
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
if (mpi_rank != 0) return;
std::cout << "========== Parameters ==========" << std::endl;
std::cout << std::boolalpha;
std::cout << "Data folder: " << data_folderpath << std::endl;
std::cout << "Lineitem columns: ";
std::copy(lineitem_columns.begin(),
lineitem_columns.end(),
std::ostream_iterator<std::string>(std::cout, " "));
std::cout << std::endl;
std::cout << "Orders columns: ";
std::copy(orders_columns.begin(),
orders_columns.end(),
std::ostream_iterator<std::string>(std::cout, " "));
std::cout << std::endl;
std::cout << "Compression: " << compression << std::endl;
std::cout << "================================" << std::endl;
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
parse_command_line_arguments(argc, argv);
report_configuration();
// Initialize communicator and memory pool
Communicator *communicator{nullptr};
registered_memory_resource *registered_mr{nullptr};
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr{nullptr};
setup_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, "UCX", "preregistered", 0);
void *preallocated_pinned_buffer;
CUDA_RT_CALL(
cudaMallocHost(&preallocated_pinned_buffer, communicator->mpi_size * sizeof(size_t)));
// Read input tables
std::stringstream index_string_stream;
index_string_stream << std::setw(2) << std::setfill('0') << communicator->mpi_rank;
std::string index_string = index_string_stream.str();
std::string orders_filepath = data_folderpath + "/orders" + index_string + ".parquet";
std::string lineitem_filepath = data_folderpath + "/lineitem" + index_string + ".parquet";
cudf::io::parquet_reader_options orders_options =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(orders_filepath));
orders_options.set_columns(orders_columns);
auto orders_table = cudf::io::read_parquet(orders_options);
cudf::io::parquet_reader_options lineitem_options =
cudf::io::parquet_reader_options::builder(cudf::io::source_info(lineitem_filepath));
lineitem_options.set_columns(lineitem_columns);
auto lineitem_table = cudf::io::read_parquet(lineitem_options);
/*
// Print the data types used for the lineitem table for debugging
for (cudf::size_type icol = 0; icol < lineitem_table.tbl->view().num_columns(); icol++) {
std::cout << (int32_t)lineitem_table.tbl->view().column(icol).type().id() << " ";
}
std::cout << std::endl;
*/
// Calculate input sizes
int64_t input_size_irank = 0;
int64_t input_size_total;
input_size_irank += calculate_table_size(orders_table.tbl->view());
input_size_irank += calculate_table_size(lineitem_table.tbl->view());
MPI_CALL(
MPI_Allreduce(&input_size_irank, &input_size_total, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD));
// Warmup all-to-all
warmup_all_to_all(communicator);
// Warmup nvcomp
if (compression) { warmup_nvcomp(); }
// Generate compression options
std::vector<ColumnCompressionOptions> orders_compression_options =
generate_compression_options_distributed(orders_table.tbl->view(), compression);
std::vector<ColumnCompressionOptions> lineitem_compression_options =
generate_compression_options_distributed(lineitem_table.tbl->view(), compression);
if (communicator->mpi_rank == 0) {
std::cout << "Orders table compression options: " << std::endl;
print_compression_options(orders_compression_options);
std::cout << "Lineitem table compression options: " << std::endl;
print_compression_options(lineitem_compression_options);
}
// Perform distributed join
CUDA_RT_CALL(cudaDeviceSynchronize());
MPI_Barrier(MPI_COMM_WORLD);
double start = MPI_Wtime();
auto join_result = distributed_inner_join(orders_table.tbl->view(),
lineitem_table.tbl->view(),
{0},
{0},
communicator,
orders_compression_options,
lineitem_compression_options,
1,
true,
preallocated_pinned_buffer);
MPI_Barrier(MPI_COMM_WORLD);
double stop = MPI_Wtime();
if (communicator->mpi_rank == 0) {
double elapsed_time = stop - start;
std::cout << "Average size per rank (GB): " << input_size_total / communicator->mpi_size / 1e9
<< std::endl;
std::cout << "Elasped time (s): " << elapsed_time << std::endl;
std::cout << "Throughput (GB/s): " << input_size_total / 1e9 / elapsed_time << std::endl;
}
// Cleanup
join_result.reset();
lineitem_table.tbl.reset();
orders_table.tbl.reset();
CUDA_RT_CALL(cudaFreeHost(preallocated_pinned_buffer));
destroy_memory_pool_and_communicator(
communicator, registered_mr, pool_mr, "UCX", "preregistered");
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/scripts/tpch_to_parquet.py
|
"""
Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import os.path
import pyarrow
import pyarrow.csv
import pyarrow.parquet
from typing import List, Dict
col_names: Dict[str, List[str]] = {
"lineitem": [
"L_ORDERKEY", "L_PARTKEY", "L_SUPPKEY", "L_LINENUMBER", "L_QUANTITY",
"L_EXTENDEDPRICE", "L_DISCOUNT", "L_TAX", "L_RETURNFLAG", "L_LINESTATUS",
"L_SHIPDATE", "L_COMMITDATE", "L_RECEIPTDATE", "L_SHIPINSTRUCT", "L_SHIPMODE",
"L_COMMENT", "PLACEHOLDER"],
"orders": [
"O_ORDERKEY", "O_CUSTKEY", "O_ORDERSTATUS", "O_TOTALPRICE", "O_ORDERDATE",
"O_ORDERPRIORITY", "O_CLERK", "O_SHIPPRIORITY", "O_COMMENT", "PLACEHOLDER"]
}
col_types: Dict[str, Dict[str, pyarrow.lib.DataType]] = {
"lineitem": {
"L_ORDERKEY": pyarrow.int64(),
"L_PARTKEY": pyarrow.int64(),
"L_SUPPKEY": pyarrow.int64(),
"L_LINENUMBER": pyarrow.int32(),
# Note: Decimal type with precision is not supported in cuDF at the moment
# See: https://github.com/rapidsai/cudf/issues/6656
# "L_QUANTITY": pyarrow.decimal128(12, 2),
# "L_EXTENDEDPRICE": pyarrow.decimal128(12, 2),
# "L_DISCOUNT": pyarrow.decimal128(12, 2),
# "L_TAX": pyarrow.decimal128(12, 2),
"L_RETURNFLAG": pyarrow.string(),
"L_LINESTATUS": pyarrow.string(),
# TODO: From TPC-H specification, it should be possible to represent dates as 32-bit
# integers, but pyarrow currently does not support that, with the following error
# pyarrow.lib.ArrowNotImplementedError: CSV conversion to date32[day] is not supported
# "L_SHIPDATE": pyarrow.date32(),
# "L_COMMITDATE": pyarrow.date32(),
# "L_RECEIPTDATE": pyarrow.date32(),
"L_SHIPINSTRUCT": pyarrow.string(),
"L_SHIPMODE": pyarrow.string(),
"L_COMMENT": pyarrow.string()
},
"orders": {
"O_ORDERKEY": pyarrow.int64(),
"O_CUSTKEY": pyarrow.int64(),
"O_ORDERSTATUS": pyarrow.string(),
# "O_TOTALPRICE": pyarrow.decimal128(12, 2),
# "O_ORDERDATE": pyarrow.date32(),
"O_ORDERPRIORITY": pyarrow.string(),
"O_CLERK": pyarrow.string(),
"O_SHIPPRIORITY": pyarrow.int32(),
"O_COMMENT": pyarrow.string()
}
}
def tpch_to_parquet(path: str, prefix: str) -> None:
# TODO: Process each input file in chunks.
# Currently, this function loads each input file into memory before writing to disk in Parquet
# format. This requires the memory large enough to hold the table. To get away this requirement,
# we could process each input file in small chunks. However, when I implemented it, I got the
# following error:
# TypeError: Cannot convert pyarrow.lib.Int64Array to pyarrow.lib.RecordBatch
# when converting a chunk to a pyarrow table.
input_paths: List[str] = []
for filename in os.listdir(path):
input_path = os.path.join(path, filename)
if filename.startswith(prefix) and os.path.isfile(input_path) \
and not filename.endswith(".parquet"):
input_paths.append(input_path)
for input_path in input_paths:
input_table = pyarrow.csv.read_csv(
input_path,
read_options=pyarrow.csv.ReadOptions(
use_threads=True,
column_names=col_names[prefix],
autogenerate_column_names=False),
parse_options=pyarrow.csv.ParseOptions(delimiter="|"),
convert_options=pyarrow.csv.ConvertOptions(
include_columns=col_names[prefix][:-1],
column_types=col_types[prefix]))
parquet_writer = pyarrow.parquet.ParquetWriter(
input_path + ".parquet", input_table.schema, compression="snappy")
parquet_writer.write_table(input_table)
def main():
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"path", type=str, help="path to the directory containing generated TPC-H tables")
arguments = argument_parser.parse_args()
path: str = arguments.path
tpch_to_parquet(path, "lineitem")
tpch_to_parquet(path, "orders")
if __name__ == '__main__':
main()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/doc/tpch_perf.svg
|
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (https://matplotlib.org/) -->
<svg height="396pt" version="1.1" viewBox="0 0 576 396" width="576pt" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<metadata>
<rdf:RDF xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<cc:Work>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
<dc:date>2021-08-16T23:23:08.480593</dc:date>
<dc:format>image/svg+xml</dc:format>
<dc:creator>
<cc:Agent>
<dc:title>Matplotlib v3.3.0, https://matplotlib.org/</dc:title>
</cc:Agent>
</dc:creator>
</cc:Work>
</rdf:RDF>
</metadata>
<defs>
<style type="text/css">*{stroke-linecap:butt;stroke-linejoin:round;}</style>
</defs>
<g id="figure_1">
<g id="patch_1">
<path d="M 0 396
L 576 396
L 576 0
L 0 0
z
" style="fill:#ffffff;"/>
</g>
<g id="axes_1">
<g id="patch_2">
<path d="M 72 352.44
L 518.4 352.44
L 518.4 47.52
L 72 47.52
z
" style="fill:#eaeaf2;"/>
</g>
<g id="matplotlib.axis_1">
<g id="xtick_1">
<g id="line2d_1">
<path clip-path="url(#p7bc5c21ee2)" d="M 84.828074 352.44
L 84.828074 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_2"/>
<g id="text_1">
<!-- 32 -->
<g style="fill:#262626;" transform="translate(79.267136 366.597813)scale(0.1 -0.1)">
<defs>
<path d="M 4.203125 18.890625
L 12.984375 20.0625
Q 14.5 12.59375 18.140625 9.296875
Q 21.78125 6 27 6
Q 33.203125 6 37.46875 10.296875
Q 41.75 14.59375 41.75 20.953125
Q 41.75 27 37.796875 30.921875
Q 33.84375 34.859375 27.734375 34.859375
Q 25.25 34.859375 21.53125 33.890625
L 22.515625 41.609375
Q 23.390625 41.5 23.921875 41.5
Q 29.546875 41.5 34.03125 44.421875
Q 38.53125 47.359375 38.53125 53.46875
Q 38.53125 58.296875 35.25 61.46875
Q 31.984375 64.65625 26.8125 64.65625
Q 21.6875 64.65625 18.265625 61.421875
Q 14.84375 58.203125 13.875 51.765625
L 5.078125 53.328125
Q 6.6875 62.15625 12.390625 67.015625
Q 18.109375 71.875 26.609375 71.875
Q 32.46875 71.875 37.390625 69.359375
Q 42.328125 66.84375 44.9375 62.5
Q 47.5625 58.15625 47.5625 53.265625
Q 47.5625 48.640625 45.0625 44.828125
Q 42.578125 41.015625 37.703125 38.765625
Q 44.046875 37.3125 47.5625 32.6875
Q 51.078125 28.078125 51.078125 21.140625
Q 51.078125 11.765625 44.234375 5.25
Q 37.40625 -1.265625 26.953125 -1.265625
Q 17.53125 -1.265625 11.296875 4.34375
Q 5.078125 9.96875 4.203125 18.890625
z
" id="ArialMT-51"/>
<path d="M 50.34375 8.453125
L 50.34375 0
L 3.03125 0
Q 2.9375 3.171875 4.046875 6.109375
Q 5.859375 10.9375 9.828125 15.625
Q 13.8125 20.3125 21.34375 26.46875
Q 33.015625 36.03125 37.109375 41.625
Q 41.21875 47.21875 41.21875 52.203125
Q 41.21875 57.421875 37.46875 61
Q 33.734375 64.59375 27.734375 64.59375
Q 21.390625 64.59375 17.578125 60.78125
Q 13.765625 56.984375 13.71875 50.25
L 4.6875 51.171875
Q 5.609375 61.28125 11.65625 66.578125
Q 17.71875 71.875 27.9375 71.875
Q 38.234375 71.875 44.234375 66.15625
Q 50.25 60.453125 50.25 52
Q 50.25 47.703125 48.484375 43.546875
Q 46.734375 39.40625 42.65625 34.8125
Q 38.578125 30.21875 29.109375 22.21875
Q 21.1875 15.578125 18.9375 13.203125
Q 16.703125 10.84375 15.234375 8.453125
z
" id="ArialMT-50"/>
</defs>
<use xlink:href="#ArialMT-51"/>
<use x="55.615234" xlink:href="#ArialMT-50"/>
</g>
</g>
</g>
<g id="xtick_2">
<g id="line2d_3">
<path clip-path="url(#p7bc5c21ee2)" d="M 169.221578 352.44
L 169.221578 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_4"/>
<g id="text_2">
<!-- 64 -->
<g style="fill:#262626;" transform="translate(163.660641 366.597813)scale(0.1 -0.1)">
<defs>
<path d="M 49.75 54.046875
L 41.015625 53.375
Q 39.84375 58.546875 37.703125 60.890625
Q 34.125 64.65625 28.90625 64.65625
Q 24.703125 64.65625 21.53125 62.3125
Q 17.390625 59.28125 14.984375 53.46875
Q 12.59375 47.65625 12.5 36.921875
Q 15.671875 41.75 20.265625 44.09375
Q 24.859375 46.4375 29.890625 46.4375
Q 38.671875 46.4375 44.84375 39.96875
Q 51.03125 33.5 51.03125 23.25
Q 51.03125 16.5 48.125 10.71875
Q 45.21875 4.9375 40.140625 1.859375
Q 35.0625 -1.21875 28.609375 -1.21875
Q 17.625 -1.21875 10.6875 6.859375
Q 3.765625 14.9375 3.765625 33.5
Q 3.765625 54.25 11.421875 63.671875
Q 18.109375 71.875 29.4375 71.875
Q 37.890625 71.875 43.28125 67.140625
Q 48.6875 62.40625 49.75 54.046875
z
M 13.875 23.1875
Q 13.875 18.65625 15.796875 14.5
Q 17.71875 10.359375 21.1875 8.171875
Q 24.65625 6 28.46875 6
Q 34.03125 6 38.03125 10.484375
Q 42.046875 14.984375 42.046875 22.703125
Q 42.046875 30.125 38.078125 34.390625
Q 34.125 38.671875 28.125 38.671875
Q 22.171875 38.671875 18.015625 34.390625
Q 13.875 30.125 13.875 23.1875
z
" id="ArialMT-54"/>
<path d="M 32.328125 0
L 32.328125 17.140625
L 1.265625 17.140625
L 1.265625 25.203125
L 33.9375 71.578125
L 41.109375 71.578125
L 41.109375 25.203125
L 50.78125 25.203125
L 50.78125 17.140625
L 41.109375 17.140625
L 41.109375 0
z
M 32.328125 25.203125
L 32.328125 57.46875
L 9.90625 25.203125
z
" id="ArialMT-52"/>
</defs>
<use xlink:href="#ArialMT-54"/>
<use x="55.615234" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="xtick_3">
<g id="line2d_5">
<path clip-path="url(#p7bc5c21ee2)" d="M 253.615083 352.44
L 253.615083 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_6"/>
<g id="text_3">
<!-- 128 -->
<g style="fill:#262626;" transform="translate(245.273676 366.597813)scale(0.1 -0.1)">
<defs>
<path d="M 37.25 0
L 28.46875 0
L 28.46875 56
Q 25.296875 52.984375 20.140625 49.953125
Q 14.984375 46.921875 10.890625 45.40625
L 10.890625 53.90625
Q 18.265625 57.375 23.78125 62.296875
Q 29.296875 67.234375 31.59375 71.875
L 37.25 71.875
z
" id="ArialMT-49"/>
<path d="M 17.671875 38.8125
Q 12.203125 40.828125 9.5625 44.53125
Q 6.9375 48.25 6.9375 53.421875
Q 6.9375 61.234375 12.546875 66.546875
Q 18.171875 71.875 27.484375 71.875
Q 36.859375 71.875 42.578125 66.421875
Q 48.296875 60.984375 48.296875 53.171875
Q 48.296875 48.1875 45.671875 44.5
Q 43.0625 40.828125 37.75 38.8125
Q 44.34375 36.671875 47.78125 31.875
Q 51.21875 27.09375 51.21875 20.453125
Q 51.21875 11.28125 44.71875 5.03125
Q 38.234375 -1.21875 27.640625 -1.21875
Q 17.046875 -1.21875 10.546875 5.046875
Q 4.046875 11.328125 4.046875 20.703125
Q 4.046875 27.6875 7.59375 32.390625
Q 11.140625 37.109375 17.671875 38.8125
z
M 15.921875 53.71875
Q 15.921875 48.640625 19.1875 45.40625
Q 22.46875 42.1875 27.6875 42.1875
Q 32.765625 42.1875 36.015625 45.375
Q 39.265625 48.578125 39.265625 53.21875
Q 39.265625 58.0625 35.90625 61.359375
Q 32.5625 64.65625 27.59375 64.65625
Q 22.5625 64.65625 19.234375 61.421875
Q 15.921875 58.203125 15.921875 53.71875
z
M 13.09375 20.65625
Q 13.09375 16.890625 14.875 13.375
Q 16.65625 9.859375 20.171875 7.921875
Q 23.6875 6 27.734375 6
Q 34.03125 6 38.125 10.046875
Q 42.234375 14.109375 42.234375 20.359375
Q 42.234375 26.703125 38.015625 30.859375
Q 33.796875 35.015625 27.4375 35.015625
Q 21.234375 35.015625 17.15625 30.90625
Q 13.09375 26.8125 13.09375 20.65625
z
" id="ArialMT-56"/>
</defs>
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-50"/>
<use x="111.230469" xlink:href="#ArialMT-56"/>
</g>
</g>
</g>
<g id="xtick_4">
<g id="line2d_7">
<path clip-path="url(#p7bc5c21ee2)" d="M 338.008587 352.44
L 338.008587 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_8"/>
<g id="text_4">
<!-- 256 -->
<g style="fill:#262626;" transform="translate(329.667181 366.597813)scale(0.1 -0.1)">
<defs>
<path d="M 4.15625 18.75
L 13.375 19.53125
Q 14.40625 12.796875 18.140625 9.390625
Q 21.875 6 27.15625 6
Q 33.5 6 37.890625 10.78125
Q 42.28125 15.578125 42.28125 23.484375
Q 42.28125 31 38.0625 35.34375
Q 33.84375 39.703125 27 39.703125
Q 22.75 39.703125 19.328125 37.765625
Q 15.921875 35.84375 13.96875 32.765625
L 5.71875 33.84375
L 12.640625 70.609375
L 48.25 70.609375
L 48.25 62.203125
L 19.671875 62.203125
L 15.828125 42.96875
Q 22.265625 47.46875 29.34375 47.46875
Q 38.71875 47.46875 45.15625 40.96875
Q 51.609375 34.46875 51.609375 24.265625
Q 51.609375 14.546875 45.953125 7.46875
Q 39.0625 -1.21875 27.15625 -1.21875
Q 17.390625 -1.21875 11.203125 4.25
Q 5.03125 9.71875 4.15625 18.75
z
" id="ArialMT-53"/>
</defs>
<use xlink:href="#ArialMT-50"/>
<use x="55.615234" xlink:href="#ArialMT-53"/>
<use x="111.230469" xlink:href="#ArialMT-54"/>
</g>
</g>
</g>
<g id="xtick_5">
<g id="line2d_9">
<path clip-path="url(#p7bc5c21ee2)" d="M 422.402091 352.44
L 422.402091 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_10"/>
<g id="text_5">
<!-- 512 -->
<g style="fill:#262626;" transform="translate(414.060685 366.597813)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-53"/>
<use x="55.615234" xlink:href="#ArialMT-49"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
</g>
</g>
</g>
<g id="xtick_6">
<g id="line2d_11">
<path clip-path="url(#p7bc5c21ee2)" d="M 506.795596 352.44
L 506.795596 47.52
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_12"/>
<g id="text_6">
<!-- 1024 -->
<g style="fill:#262626;" transform="translate(495.673721 366.597813)scale(0.1 -0.1)">
<defs>
<path d="M 4.15625 35.296875
Q 4.15625 48 6.765625 55.734375
Q 9.375 63.484375 14.515625 67.671875
Q 19.671875 71.875 27.484375 71.875
Q 33.25 71.875 37.59375 69.546875
Q 41.9375 67.234375 44.765625 62.859375
Q 47.609375 58.5 49.21875 52.21875
Q 50.828125 45.953125 50.828125 35.296875
Q 50.828125 22.703125 48.234375 14.96875
Q 45.65625 7.234375 40.5 3
Q 35.359375 -1.21875 27.484375 -1.21875
Q 17.140625 -1.21875 11.234375 6.203125
Q 4.15625 15.140625 4.15625 35.296875
z
M 13.1875 35.296875
Q 13.1875 17.671875 17.3125 11.828125
Q 21.4375 6 27.484375 6
Q 33.546875 6 37.671875 11.859375
Q 41.796875 17.71875 41.796875 35.296875
Q 41.796875 52.984375 37.671875 58.78125
Q 33.546875 64.59375 27.390625 64.59375
Q 21.34375 64.59375 17.71875 59.46875
Q 13.1875 52.9375 13.1875 35.296875
z
" id="ArialMT-48"/>
</defs>
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-48"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
<use x="166.845703" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="xtick_7">
<g id="line2d_13"/>
</g>
<g id="xtick_8">
<g id="line2d_14"/>
</g>
<g id="xtick_9">
<g id="line2d_15"/>
</g>
<g id="xtick_10">
<g id="line2d_16"/>
</g>
<g id="xtick_11">
<g id="line2d_17"/>
</g>
<g id="xtick_12">
<g id="line2d_18"/>
</g>
<g id="xtick_13">
<g id="line2d_19"/>
</g>
<g id="xtick_14">
<g id="line2d_20"/>
</g>
<g id="xtick_15">
<g id="line2d_21"/>
</g>
<g id="xtick_16">
<g id="line2d_22"/>
</g>
<g id="xtick_17">
<g id="line2d_23"/>
</g>
<g id="xtick_18">
<g id="line2d_24"/>
</g>
<g id="xtick_19">
<g id="line2d_25"/>
</g>
<g id="xtick_20">
<g id="line2d_26"/>
</g>
<g id="xtick_21">
<g id="line2d_27"/>
</g>
<g id="text_7">
<!-- #GPUs -->
<g style="fill:#262626;" transform="translate(277.472812 380.458906)scale(0.11 -0.11)">
<defs>
<path d="M 5.03125 -1.21875
L 9.28125 19.578125
L 1.03125 19.578125
L 1.03125 26.859375
L 10.75 26.859375
L 14.359375 44.578125
L 1.03125 44.578125
L 1.03125 51.859375
L 15.828125 51.859375
L 20.0625 72.796875
L 27.390625 72.796875
L 23.140625 51.859375
L 38.53125 51.859375
L 42.78125 72.796875
L 50.140625 72.796875
L 45.90625 51.859375
L 54.34375 51.859375
L 54.34375 44.578125
L 44.4375 44.578125
L 40.765625 26.859375
L 54.34375 26.859375
L 54.34375 19.578125
L 39.3125 19.578125
L 35.0625 -1.21875
L 27.734375 -1.21875
L 31.9375 19.578125
L 16.609375 19.578125
L 12.359375 -1.21875
z
M 18.0625 26.859375
L 33.40625 26.859375
L 37.0625 44.578125
L 21.6875 44.578125
z
" id="ArialMT-35"/>
<path d="M 41.21875 28.078125
L 41.21875 36.46875
L 71.53125 36.53125
L 71.53125 9.96875
Q 64.546875 4.390625 57.125 1.578125
Q 49.703125 -1.21875 41.890625 -1.21875
Q 31.34375 -1.21875 22.71875 3.296875
Q 14.109375 7.8125 9.71875 16.359375
Q 5.328125 24.90625 5.328125 35.453125
Q 5.328125 45.90625 9.6875 54.953125
Q 14.0625 64.015625 22.265625 68.40625
Q 30.46875 72.796875 41.15625 72.796875
Q 48.921875 72.796875 55.1875 70.28125
Q 61.46875 67.78125 65.03125 63.28125
Q 68.609375 58.796875 70.453125 51.5625
L 61.921875 49.21875
Q 60.296875 54.6875 57.90625 57.8125
Q 55.515625 60.9375 51.0625 62.8125
Q 46.625 64.703125 41.21875 64.703125
Q 34.71875 64.703125 29.984375 62.71875
Q 25.25 60.75 22.34375 57.515625
Q 19.4375 54.296875 17.828125 50.4375
Q 15.09375 43.796875 15.09375 36.03125
Q 15.09375 26.46875 18.390625 20.015625
Q 21.6875 13.578125 27.984375 10.453125
Q 34.28125 7.328125 41.359375 7.328125
Q 47.515625 7.328125 53.375 9.6875
Q 59.234375 12.0625 62.25 14.75
L 62.25 28.078125
z
" id="ArialMT-71"/>
<path d="M 7.71875 0
L 7.71875 71.578125
L 34.71875 71.578125
Q 41.84375 71.578125 45.609375 70.90625
Q 50.875 70.015625 54.4375 67.546875
Q 58.015625 65.09375 60.1875 60.640625
Q 62.359375 56.203125 62.359375 50.875
Q 62.359375 41.75 56.546875 35.421875
Q 50.734375 29.109375 35.546875 29.109375
L 17.1875 29.109375
L 17.1875 0
z
M 17.1875 37.546875
L 35.6875 37.546875
Q 44.875 37.546875 48.734375 40.96875
Q 52.59375 44.390625 52.59375 50.59375
Q 52.59375 55.078125 50.3125 58.265625
Q 48.046875 61.46875 44.34375 62.5
Q 41.9375 63.140625 35.5 63.140625
L 17.1875 63.140625
z
" id="ArialMT-80"/>
<path d="M 54.6875 71.578125
L 64.15625 71.578125
L 64.15625 30.21875
Q 64.15625 19.4375 61.71875 13.078125
Q 59.28125 6.734375 52.90625 2.75
Q 46.53125 -1.21875 36.1875 -1.21875
Q 26.125 -1.21875 19.71875 2.25
Q 13.328125 5.71875 10.59375 12.28125
Q 7.859375 18.84375 7.859375 30.21875
L 7.859375 71.578125
L 17.328125 71.578125
L 17.328125 30.28125
Q 17.328125 20.953125 19.0625 16.53125
Q 20.796875 12.109375 25.015625 9.71875
Q 29.25 7.328125 35.359375 7.328125
Q 45.796875 7.328125 50.234375 12.0625
Q 54.6875 16.796875 54.6875 30.28125
z
" id="ArialMT-85"/>
<path d="M 3.078125 15.484375
L 11.765625 16.84375
Q 12.5 11.625 15.84375 8.84375
Q 19.1875 6.0625 25.203125 6.0625
Q 31.25 6.0625 34.171875 8.515625
Q 37.109375 10.984375 37.109375 14.3125
Q 37.109375 17.28125 34.515625 19
Q 32.71875 20.171875 25.53125 21.96875
Q 15.875 24.421875 12.140625 26.203125
Q 8.40625 27.984375 6.46875 31.125
Q 4.546875 34.28125 4.546875 38.09375
Q 4.546875 41.546875 6.125 44.5
Q 7.71875 47.46875 10.453125 49.421875
Q 12.5 50.921875 16.03125 51.96875
Q 19.578125 53.03125 23.640625 53.03125
Q 29.734375 53.03125 34.34375 51.265625
Q 38.96875 49.515625 41.15625 46.5
Q 43.359375 43.5 44.1875 38.484375
L 35.59375 37.3125
Q 35.015625 41.3125 32.203125 43.546875
Q 29.390625 45.796875 24.265625 45.796875
Q 18.21875 45.796875 15.625 43.796875
Q 13.03125 41.796875 13.03125 39.109375
Q 13.03125 37.40625 14.109375 36.03125
Q 15.1875 34.625 17.484375 33.6875
Q 18.796875 33.203125 25.25 31.453125
Q 34.578125 28.953125 38.25 27.359375
Q 41.9375 25.78125 44.03125 22.75
Q 46.140625 19.734375 46.140625 15.234375
Q 46.140625 10.84375 43.578125 6.953125
Q 41.015625 3.078125 36.171875 0.953125
Q 31.34375 -1.171875 25.25 -1.171875
Q 15.140625 -1.171875 9.84375 3.03125
Q 4.546875 7.234375 3.078125 15.484375
z
" id="ArialMT-115"/>
</defs>
<use xlink:href="#ArialMT-35"/>
<use x="55.615234" xlink:href="#ArialMT-71"/>
<use x="133.398438" xlink:href="#ArialMT-80"/>
<use x="200.097656" xlink:href="#ArialMT-85"/>
<use x="272.314453" xlink:href="#ArialMT-115"/>
</g>
</g>
</g>
<g id="matplotlib.axis_2">
<g id="ytick_1">
<g id="line2d_28">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 343.677598
L 518.4 343.677598
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_29"/>
<g id="text_8">
<!-- 512 -->
<g style="fill:#262626;" transform="translate(48.317188 347.256504)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-53"/>
<use x="55.615234" xlink:href="#ArialMT-49"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
</g>
</g>
</g>
<g id="ytick_2">
<g id="line2d_30">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 286.03139
L 518.4 286.03139
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_31"/>
<g id="text_9">
<!-- 1024 -->
<g style="fill:#262626;" transform="translate(42.75625 289.610296)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-48"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
<use x="166.845703" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="ytick_3">
<g id="line2d_32">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 228.385182
L 518.4 228.385182
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_33"/>
<g id="text_10">
<!-- 2048 -->
<g style="fill:#262626;" transform="translate(42.75625 231.964088)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-50"/>
<use x="55.615234" xlink:href="#ArialMT-48"/>
<use x="111.230469" xlink:href="#ArialMT-52"/>
<use x="166.845703" xlink:href="#ArialMT-56"/>
</g>
</g>
</g>
<g id="ytick_4">
<g id="line2d_34">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 170.738973
L 518.4 170.738973
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_35"/>
<g id="text_11">
<!-- 4096 -->
<g style="fill:#262626;" transform="translate(42.75625 174.31788)scale(0.1 -0.1)">
<defs>
<path d="M 5.46875 16.546875
L 13.921875 17.328125
Q 14.984375 11.375 18.015625 8.6875
Q 21.046875 6 25.78125 6
Q 29.828125 6 32.875 7.859375
Q 35.9375 9.71875 37.890625 12.8125
Q 39.84375 15.921875 41.15625 21.1875
Q 42.484375 26.46875 42.484375 31.9375
Q 42.484375 32.515625 42.4375 33.6875
Q 39.796875 29.5 35.234375 26.875
Q 30.671875 24.265625 25.34375 24.265625
Q 16.453125 24.265625 10.296875 30.703125
Q 4.15625 37.15625 4.15625 47.703125
Q 4.15625 58.59375 10.578125 65.234375
Q 17 71.875 26.65625 71.875
Q 33.640625 71.875 39.421875 68.109375
Q 45.21875 64.359375 48.21875 57.390625
Q 51.21875 50.4375 51.21875 37.25
Q 51.21875 23.53125 48.234375 15.40625
Q 45.265625 7.28125 39.375 3.03125
Q 33.5 -1.21875 25.59375 -1.21875
Q 17.1875 -1.21875 11.859375 3.4375
Q 6.546875 8.109375 5.46875 16.546875
z
M 41.453125 48.140625
Q 41.453125 55.71875 37.421875 60.15625
Q 33.40625 64.59375 27.734375 64.59375
Q 21.875 64.59375 17.53125 59.8125
Q 13.1875 55.03125 13.1875 47.40625
Q 13.1875 40.578125 17.3125 36.296875
Q 21.4375 32.03125 27.484375 32.03125
Q 33.59375 32.03125 37.515625 36.296875
Q 41.453125 40.578125 41.453125 48.140625
z
" id="ArialMT-57"/>
</defs>
<use xlink:href="#ArialMT-52"/>
<use x="55.615234" xlink:href="#ArialMT-48"/>
<use x="111.230469" xlink:href="#ArialMT-57"/>
<use x="166.845703" xlink:href="#ArialMT-54"/>
</g>
</g>
</g>
<g id="ytick_5">
<g id="line2d_36">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 113.092765
L 518.4 113.092765
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_37"/>
<g id="text_12">
<!-- 8192 -->
<g style="fill:#262626;" transform="translate(42.75625 116.671671)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-56"/>
<use x="55.615234" xlink:href="#ArialMT-49"/>
<use x="111.230469" xlink:href="#ArialMT-57"/>
<use x="166.845703" xlink:href="#ArialMT-50"/>
</g>
</g>
</g>
<g id="ytick_6">
<g id="line2d_38">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 55.446557
L 518.4 55.446557
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_39"/>
<g id="text_13">
<!-- 16384 -->
<g style="fill:#262626;" transform="translate(37.195312 59.025463)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-54"/>
<use x="111.230469" xlink:href="#ArialMT-51"/>
<use x="166.845703" xlink:href="#ArialMT-56"/>
<use x="222.460938" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="text_14">
<!-- Throughput [GB/s] -->
<g style="fill:#262626;" transform="translate(30.880156 244.916719)rotate(-90)scale(0.11 -0.11)">
<defs>
<path d="M 25.921875 0
L 25.921875 63.140625
L 2.34375 63.140625
L 2.34375 71.578125
L 59.078125 71.578125
L 59.078125 63.140625
L 35.40625 63.140625
L 35.40625 0
z
" id="ArialMT-84"/>
<path d="M 6.59375 0
L 6.59375 71.578125
L 15.375 71.578125
L 15.375 45.90625
Q 21.53125 53.03125 30.90625 53.03125
Q 36.671875 53.03125 40.921875 50.75
Q 45.171875 48.484375 47 44.484375
Q 48.828125 40.484375 48.828125 32.859375
L 48.828125 0
L 40.046875 0
L 40.046875 32.859375
Q 40.046875 39.453125 37.1875 42.453125
Q 34.328125 45.453125 29.109375 45.453125
Q 25.203125 45.453125 21.75 43.421875
Q 18.3125 41.40625 16.84375 37.9375
Q 15.375 34.46875 15.375 28.375
L 15.375 0
z
" id="ArialMT-104"/>
<path d="M 6.5 0
L 6.5 51.859375
L 14.40625 51.859375
L 14.40625 44
Q 17.4375 49.515625 20 51.265625
Q 22.5625 53.03125 25.640625 53.03125
Q 30.078125 53.03125 34.671875 50.203125
L 31.640625 42.046875
Q 28.421875 43.953125 25.203125 43.953125
Q 22.3125 43.953125 20.015625 42.21875
Q 17.71875 40.484375 16.75 37.40625
Q 15.28125 32.71875 15.28125 27.15625
L 15.28125 0
z
" id="ArialMT-114"/>
<path d="M 3.328125 25.921875
Q 3.328125 40.328125 11.328125 47.265625
Q 18.015625 53.03125 27.640625 53.03125
Q 38.328125 53.03125 45.109375 46.015625
Q 51.90625 39.015625 51.90625 26.65625
Q 51.90625 16.65625 48.90625 10.90625
Q 45.90625 5.171875 40.15625 2
Q 34.421875 -1.171875 27.640625 -1.171875
Q 16.75 -1.171875 10.03125 5.8125
Q 3.328125 12.796875 3.328125 25.921875
z
M 12.359375 25.921875
Q 12.359375 15.96875 16.703125 11.015625
Q 21.046875 6.0625 27.640625 6.0625
Q 34.1875 6.0625 38.53125 11.03125
Q 42.875 16.015625 42.875 26.21875
Q 42.875 35.84375 38.5 40.796875
Q 34.125 45.75 27.640625 45.75
Q 21.046875 45.75 16.703125 40.8125
Q 12.359375 35.890625 12.359375 25.921875
z
" id="ArialMT-111"/>
<path d="M 40.578125 0
L 40.578125 7.625
Q 34.515625 -1.171875 24.125 -1.171875
Q 19.53125 -1.171875 15.546875 0.578125
Q 11.578125 2.34375 9.640625 5
Q 7.71875 7.671875 6.9375 11.53125
Q 6.390625 14.109375 6.390625 19.734375
L 6.390625 51.859375
L 15.1875 51.859375
L 15.1875 23.09375
Q 15.1875 16.21875 15.71875 13.8125
Q 16.546875 10.359375 19.234375 8.375
Q 21.921875 6.390625 25.875 6.390625
Q 29.828125 6.390625 33.296875 8.421875
Q 36.765625 10.453125 38.203125 13.9375
Q 39.65625 17.4375 39.65625 24.078125
L 39.65625 51.859375
L 48.4375 51.859375
L 48.4375 0
z
" id="ArialMT-117"/>
<path d="M 4.984375 -4.296875
L 13.53125 -5.5625
Q 14.0625 -9.515625 16.5 -11.328125
Q 19.78125 -13.765625 25.4375 -13.765625
Q 31.546875 -13.765625 34.859375 -11.328125
Q 38.1875 -8.890625 39.359375 -4.5
Q 40.046875 -1.8125 39.984375 6.78125
Q 34.234375 0 25.640625 0
Q 14.9375 0 9.078125 7.71875
Q 3.21875 15.4375 3.21875 26.21875
Q 3.21875 33.640625 5.90625 39.90625
Q 8.59375 46.1875 13.6875 49.609375
Q 18.796875 53.03125 25.6875 53.03125
Q 34.859375 53.03125 40.828125 45.609375
L 40.828125 51.859375
L 48.921875 51.859375
L 48.921875 7.03125
Q 48.921875 -5.078125 46.453125 -10.125
Q 44 -15.1875 38.640625 -18.109375
Q 33.296875 -21.046875 25.484375 -21.046875
Q 16.21875 -21.046875 10.5 -16.875
Q 4.78125 -12.703125 4.984375 -4.296875
z
M 12.25 26.859375
Q 12.25 16.65625 16.296875 11.96875
Q 20.359375 7.28125 26.46875 7.28125
Q 32.515625 7.28125 36.609375 11.9375
Q 40.71875 16.609375 40.71875 26.5625
Q 40.71875 36.078125 36.5 40.90625
Q 32.28125 45.75 26.3125 45.75
Q 20.453125 45.75 16.34375 40.984375
Q 12.25 36.234375 12.25 26.859375
z
" id="ArialMT-103"/>
<path d="M 6.59375 -19.875
L 6.59375 51.859375
L 14.59375 51.859375
L 14.59375 45.125
Q 17.4375 49.078125 21 51.046875
Q 24.5625 53.03125 29.640625 53.03125
Q 36.28125 53.03125 41.359375 49.609375
Q 46.4375 46.1875 49.015625 39.953125
Q 51.609375 33.734375 51.609375 26.3125
Q 51.609375 18.359375 48.75 11.984375
Q 45.90625 5.609375 40.453125 2.21875
Q 35.015625 -1.171875 29 -1.171875
Q 24.609375 -1.171875 21.109375 0.6875
Q 17.625 2.546875 15.375 5.375
L 15.375 -19.875
z
M 14.546875 25.640625
Q 14.546875 15.625 18.59375 10.84375
Q 22.65625 6.0625 28.421875 6.0625
Q 34.28125 6.0625 38.453125 11.015625
Q 42.625 15.96875 42.625 26.375
Q 42.625 36.28125 38.546875 41.203125
Q 34.46875 46.140625 28.8125 46.140625
Q 23.1875 46.140625 18.859375 40.890625
Q 14.546875 35.640625 14.546875 25.640625
z
" id="ArialMT-112"/>
<path d="M 25.78125 7.859375
L 27.046875 0.09375
Q 23.34375 -0.6875 20.40625 -0.6875
Q 15.625 -0.6875 12.984375 0.828125
Q 10.359375 2.34375 9.28125 4.8125
Q 8.203125 7.28125 8.203125 15.1875
L 8.203125 45.015625
L 1.765625 45.015625
L 1.765625 51.859375
L 8.203125 51.859375
L 8.203125 64.703125
L 16.9375 69.96875
L 16.9375 51.859375
L 25.78125 51.859375
L 25.78125 45.015625
L 16.9375 45.015625
L 16.9375 14.703125
Q 16.9375 10.9375 17.40625 9.859375
Q 17.875 8.796875 18.921875 8.15625
Q 19.96875 7.515625 21.921875 7.515625
Q 23.390625 7.515625 25.78125 7.859375
z
" id="ArialMT-116"/>
<path id="ArialMT-32"/>
<path d="M 6.78125 -19.875
L 6.78125 71.578125
L 26.171875 71.578125
L 26.171875 64.3125
L 15.578125 64.3125
L 15.578125 -12.59375
L 26.171875 -12.59375
L 26.171875 -19.875
z
" id="ArialMT-91"/>
<path d="M 7.328125 0
L 7.328125 71.578125
L 34.1875 71.578125
Q 42.390625 71.578125 47.34375 69.40625
Q 52.296875 67.234375 55.09375 62.71875
Q 57.90625 58.203125 57.90625 53.265625
Q 57.90625 48.6875 55.421875 44.625
Q 52.9375 40.578125 47.90625 38.09375
Q 54.390625 36.1875 57.875 31.59375
Q 61.375 27 61.375 20.75
Q 61.375 15.71875 59.25 11.390625
Q 57.125 7.078125 54 4.734375
Q 50.875 2.390625 46.15625 1.1875
Q 41.453125 0 34.625 0
z
M 16.796875 41.5
L 32.28125 41.5
Q 38.578125 41.5 41.3125 42.328125
Q 44.921875 43.40625 46.75 45.890625
Q 48.578125 48.390625 48.578125 52.15625
Q 48.578125 55.71875 46.875 58.421875
Q 45.171875 61.140625 41.984375 62.140625
Q 38.8125 63.140625 31.109375 63.140625
L 16.796875 63.140625
z
M 16.796875 8.453125
L 34.625 8.453125
Q 39.203125 8.453125 41.0625 8.796875
Q 44.34375 9.375 46.53125 10.734375
Q 48.734375 12.109375 50.140625 14.71875
Q 51.5625 17.328125 51.5625 20.75
Q 51.5625 24.75 49.515625 27.703125
Q 47.46875 30.671875 43.828125 31.859375
Q 40.1875 33.0625 33.34375 33.0625
L 16.796875 33.0625
z
" id="ArialMT-66"/>
<path d="M 0 -1.21875
L 20.75 72.796875
L 27.78125 72.796875
L 7.078125 -1.21875
z
" id="ArialMT-47"/>
<path d="M 21.296875 -19.875
L 1.90625 -19.875
L 1.90625 -12.59375
L 12.5 -12.59375
L 12.5 64.3125
L 1.90625 64.3125
L 1.90625 71.578125
L 21.296875 71.578125
z
" id="ArialMT-93"/>
</defs>
<use xlink:href="#ArialMT-84"/>
<use x="61.083984" xlink:href="#ArialMT-104"/>
<use x="116.699219" xlink:href="#ArialMT-114"/>
<use x="150" xlink:href="#ArialMT-111"/>
<use x="205.615234" xlink:href="#ArialMT-117"/>
<use x="261.230469" xlink:href="#ArialMT-103"/>
<use x="316.845703" xlink:href="#ArialMT-104"/>
<use x="372.460938" xlink:href="#ArialMT-112"/>
<use x="428.076172" xlink:href="#ArialMT-117"/>
<use x="483.691406" xlink:href="#ArialMT-116"/>
<use x="511.474609" xlink:href="#ArialMT-32"/>
<use x="539.257812" xlink:href="#ArialMT-91"/>
<use x="567.041016" xlink:href="#ArialMT-71"/>
<use x="644.824219" xlink:href="#ArialMT-66"/>
<use x="711.523438" xlink:href="#ArialMT-47"/>
<use x="739.306641" xlink:href="#ArialMT-115"/>
<use x="789.306641" xlink:href="#ArialMT-93"/>
</g>
</g>
</g>
<g id="patch_3">
<path d="M 72 352.44
L 72 47.52
" style="fill:none;"/>
</g>
<g id="patch_4">
<path d="M 518.4 352.44
L 518.4 47.52
" style="fill:none;"/>
</g>
<g id="patch_5">
<path d="M 72 352.44
L 518.4 352.44
" style="fill:none;"/>
</g>
<g id="patch_6">
<path d="M 72 47.52
L 518.4 47.52
" style="fill:none;"/>
</g>
</g>
<g id="axes_2">
<g id="matplotlib.axis_3">
<g id="ytick_7">
<g id="line2d_40">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 335.751041
L 518.4 335.751041
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_41"/>
<g id="text_15">
<!-- 64 -->
<g style="fill:#262626;" transform="translate(525.4 339.329948)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-54"/>
<use x="55.615234" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="ytick_8">
<g id="line2d_42">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 278.104833
L 518.4 278.104833
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_43"/>
<g id="text_16">
<!-- 128 -->
<g style="fill:#262626;" transform="translate(525.4 281.683739)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-50"/>
<use x="111.230469" xlink:href="#ArialMT-56"/>
</g>
</g>
</g>
<g id="ytick_9">
<g id="line2d_44">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 220.458625
L 518.4 220.458625
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_45"/>
<g id="text_17">
<!-- 256 -->
<g style="fill:#262626;" transform="translate(525.4 224.037531)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-50"/>
<use x="55.615234" xlink:href="#ArialMT-53"/>
<use x="111.230469" xlink:href="#ArialMT-54"/>
</g>
</g>
</g>
<g id="ytick_10">
<g id="line2d_46">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 162.812417
L 518.4 162.812417
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_47"/>
<g id="text_18">
<!-- 512 -->
<g style="fill:#262626;" transform="translate(525.4 166.391323)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-53"/>
<use x="55.615234" xlink:href="#ArialMT-49"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
</g>
</g>
</g>
<g id="ytick_11">
<g id="line2d_48">
<path clip-path="url(#p7bc5c21ee2)" d="M 72 105.166208
L 518.4 105.166208
" style="fill:none;stroke:#ffffff;stroke-linecap:round;"/>
</g>
<g id="line2d_49"/>
<g id="text_19">
<!-- 1024 -->
<g style="fill:#262626;" transform="translate(525.4 108.745115)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-49"/>
<use x="55.615234" xlink:href="#ArialMT-48"/>
<use x="111.230469" xlink:href="#ArialMT-50"/>
<use x="166.845703" xlink:href="#ArialMT-52"/>
</g>
</g>
</g>
<g id="ytick_12">
<g id="line2d_50"/>
</g>
<g id="ytick_13">
<g id="line2d_51"/>
</g>
<g id="ytick_14">
<g id="line2d_52"/>
</g>
<g id="ytick_15">
<g id="line2d_53"/>
</g>
<g id="ytick_16">
<g id="line2d_54"/>
</g>
<g id="ytick_17">
<g id="line2d_55"/>
</g>
<g id="ytick_18">
<g id="line2d_56"/>
</g>
<g id="ytick_19">
<g id="line2d_57"/>
</g>
<g id="ytick_20">
<g id="line2d_58"/>
</g>
<g id="ytick_21">
<g id="line2d_59"/>
</g>
<g id="ytick_22">
<g id="line2d_60"/>
</g>
<g id="ytick_23">
<g id="line2d_61"/>
</g>
<g id="ytick_24">
<g id="line2d_62"/>
</g>
<g id="text_20">
<!-- Throughput [billion tuples/s] -->
<g style="fill:#262626;" transform="translate(559.651406 267.237266)rotate(-90)scale(0.11 -0.11)">
<defs>
<path d="M 14.703125 0
L 6.546875 0
L 6.546875 71.578125
L 15.328125 71.578125
L 15.328125 46.046875
Q 20.90625 53.03125 29.546875 53.03125
Q 34.328125 53.03125 38.59375 51.09375
Q 42.875 49.171875 45.625 45.671875
Q 48.390625 42.1875 49.953125 37.25
Q 51.515625 32.328125 51.515625 26.703125
Q 51.515625 13.375 44.921875 6.09375
Q 38.328125 -1.171875 29.109375 -1.171875
Q 19.921875 -1.171875 14.703125 6.5
z
M 14.59375 26.3125
Q 14.59375 17 17.140625 12.84375
Q 21.296875 6.0625 28.375 6.0625
Q 34.125 6.0625 38.328125 11.0625
Q 42.53125 16.0625 42.53125 25.984375
Q 42.53125 36.140625 38.5 40.96875
Q 34.46875 45.796875 28.765625 45.796875
Q 23 45.796875 18.796875 40.796875
Q 14.59375 35.796875 14.59375 26.3125
z
" id="ArialMT-98"/>
<path d="M 6.640625 61.46875
L 6.640625 71.578125
L 15.4375 71.578125
L 15.4375 61.46875
z
M 6.640625 0
L 6.640625 51.859375
L 15.4375 51.859375
L 15.4375 0
z
" id="ArialMT-105"/>
<path d="M 6.390625 0
L 6.390625 71.578125
L 15.1875 71.578125
L 15.1875 0
z
" id="ArialMT-108"/>
<path d="M 6.59375 0
L 6.59375 51.859375
L 14.5 51.859375
L 14.5 44.484375
Q 20.21875 53.03125 31 53.03125
Q 35.6875 53.03125 39.625 51.34375
Q 43.5625 49.65625 45.515625 46.921875
Q 47.46875 44.1875 48.25 40.4375
Q 48.734375 37.984375 48.734375 31.890625
L 48.734375 0
L 39.9375 0
L 39.9375 31.546875
Q 39.9375 36.921875 38.90625 39.578125
Q 37.890625 42.234375 35.28125 43.8125
Q 32.671875 45.40625 29.15625 45.40625
Q 23.53125 45.40625 19.453125 41.84375
Q 15.375 38.28125 15.375 28.328125
L 15.375 0
z
" id="ArialMT-110"/>
<path d="M 42.09375 16.703125
L 51.171875 15.578125
Q 49.03125 7.625 43.21875 3.21875
Q 37.40625 -1.171875 28.375 -1.171875
Q 17 -1.171875 10.328125 5.828125
Q 3.65625 12.84375 3.65625 25.484375
Q 3.65625 38.578125 10.390625 45.796875
Q 17.140625 53.03125 27.875 53.03125
Q 38.28125 53.03125 44.875 45.953125
Q 51.46875 38.875 51.46875 26.03125
Q 51.46875 25.25 51.421875 23.6875
L 12.75 23.6875
Q 13.234375 15.140625 17.578125 10.59375
Q 21.921875 6.0625 28.421875 6.0625
Q 33.25 6.0625 36.671875 8.59375
Q 40.09375 11.140625 42.09375 16.703125
z
M 13.234375 30.90625
L 42.1875 30.90625
Q 41.609375 37.453125 38.875 40.71875
Q 34.671875 45.796875 27.984375 45.796875
Q 21.921875 45.796875 17.796875 41.75
Q 13.671875 37.703125 13.234375 30.90625
z
" id="ArialMT-101"/>
</defs>
<use xlink:href="#ArialMT-84"/>
<use x="61.083984" xlink:href="#ArialMT-104"/>
<use x="116.699219" xlink:href="#ArialMT-114"/>
<use x="150" xlink:href="#ArialMT-111"/>
<use x="205.615234" xlink:href="#ArialMT-117"/>
<use x="261.230469" xlink:href="#ArialMT-103"/>
<use x="316.845703" xlink:href="#ArialMT-104"/>
<use x="372.460938" xlink:href="#ArialMT-112"/>
<use x="428.076172" xlink:href="#ArialMT-117"/>
<use x="483.691406" xlink:href="#ArialMT-116"/>
<use x="511.474609" xlink:href="#ArialMT-32"/>
<use x="539.257812" xlink:href="#ArialMT-91"/>
<use x="567.041016" xlink:href="#ArialMT-98"/>
<use x="622.65625" xlink:href="#ArialMT-105"/>
<use x="644.873047" xlink:href="#ArialMT-108"/>
<use x="667.089844" xlink:href="#ArialMT-108"/>
<use x="689.306641" xlink:href="#ArialMT-105"/>
<use x="711.523438" xlink:href="#ArialMT-111"/>
<use x="767.138672" xlink:href="#ArialMT-110"/>
<use x="822.753906" xlink:href="#ArialMT-32"/>
<use x="850.537109" xlink:href="#ArialMT-116"/>
<use x="878.320312" xlink:href="#ArialMT-117"/>
<use x="933.935547" xlink:href="#ArialMT-112"/>
<use x="989.550781" xlink:href="#ArialMT-108"/>
<use x="1011.767578" xlink:href="#ArialMT-101"/>
<use x="1067.382812" xlink:href="#ArialMT-115"/>
<use x="1117.382812" xlink:href="#ArialMT-47"/>
<use x="1145.166016" xlink:href="#ArialMT-115"/>
<use x="1195.166016" xlink:href="#ArialMT-93"/>
</g>
</g>
</g>
<g id="line2d_63">
<path clip-path="url(#p7bc5c21ee2)" d="M 84.828074 336.088519
L 169.221578 289.363596
L 253.615083 243.134453
L 338.008587 196.455067
L 422.402091 156.648423
L 506.795596 115.817809
" style="fill:none;stroke:#4c72b0;stroke-linecap:round;stroke-width:1.75;"/>
<defs>
<path d="M 0 3.5
C 0.928211 3.5 1.81853 3.131218 2.474874 2.474874
C 3.131218 1.81853 3.5 0.928211 3.5 0
C 3.5 -0.928211 3.131218 -1.81853 2.474874 -2.474874
C 1.81853 -3.131218 0.928211 -3.5 0 -3.5
C -0.928211 -3.5 -1.81853 -3.131218 -2.474874 -2.474874
C -3.131218 -1.81853 -3.5 -0.928211 -3.5 0
C -3.5 0.928211 -3.131218 1.81853 -2.474874 2.474874
C -1.81853 3.131218 -0.928211 3.5 0 3.5
z
" id="meaaf8c9a96"/>
</defs>
<g clip-path="url(#p7bc5c21ee2)">
<use style="fill:#4c72b0;" x="84.828074" xlink:href="#meaaf8c9a96" y="336.088519"/>
<use style="fill:#4c72b0;" x="169.221578" xlink:href="#meaaf8c9a96" y="289.363596"/>
<use style="fill:#4c72b0;" x="253.615083" xlink:href="#meaaf8c9a96" y="243.134453"/>
<use style="fill:#4c72b0;" x="338.008587" xlink:href="#meaaf8c9a96" y="196.455067"/>
<use style="fill:#4c72b0;" x="422.402091" xlink:href="#meaaf8c9a96" y="156.648423"/>
<use style="fill:#4c72b0;" x="506.795596" xlink:href="#meaaf8c9a96" y="115.817809"/>
</g>
</g>
<g id="line2d_64">
<path clip-path="url(#p7bc5c21ee2)" d="M 84.828074 314.225774
L 169.221578 261.574731
L 253.615083 204.34304
L 338.008587 154.103027
L 422.402091 102.510128
L 506.795596 58.600061
" style="fill:none;stroke:#55a868;stroke-linecap:round;stroke-width:1.75;"/>
<defs>
<path d="M -3.5 3.5
L 3.5 3.5
L 3.5 -3.5
L -3.5 -3.5
z
" id="m4acf839c5c"/>
</defs>
<g clip-path="url(#p7bc5c21ee2)">
<use style="fill:#55a868;" x="84.828074" xlink:href="#m4acf839c5c" y="314.225774"/>
<use style="fill:#55a868;" x="169.221578" xlink:href="#m4acf839c5c" y="261.574731"/>
<use style="fill:#55a868;" x="253.615083" xlink:href="#m4acf839c5c" y="204.34304"/>
<use style="fill:#55a868;" x="338.008587" xlink:href="#m4acf839c5c" y="154.103027"/>
<use style="fill:#55a868;" x="422.402091" xlink:href="#m4acf839c5c" y="102.510128"/>
<use style="fill:#55a868;" x="506.795596" xlink:href="#m4acf839c5c" y="58.600061"/>
</g>
</g>
<g id="patch_7">
<path d="M 72 352.44
L 72 47.52
" style="fill:none;"/>
</g>
<g id="patch_8">
<path d="M 518.4 352.44
L 518.4 47.52
" style="fill:none;"/>
</g>
<g id="patch_9">
<path d="M 72 352.44
L 518.4 352.44
" style="fill:none;"/>
</g>
<g id="patch_10">
<path d="M 72 47.52
L 518.4 47.52
" style="fill:none;"/>
</g>
<g id="text_21">
<!-- Weak-scaling performance on TPC-H dataset -->
<g style="fill:#262626;" transform="translate(174.368437 41.52)scale(0.12 -0.12)">
<defs>
<path d="M 20.21875 0
L 1.21875 71.578125
L 10.9375 71.578125
L 21.828125 24.65625
Q 23.578125 17.28125 24.859375 10.015625
Q 27.59375 21.484375 28.078125 23.25
L 41.703125 71.578125
L 53.125 71.578125
L 63.375 35.359375
Q 67.234375 21.875 68.953125 10.015625
Q 70.3125 16.796875 72.515625 25.59375
L 83.734375 71.578125
L 93.265625 71.578125
L 73.640625 0
L 64.5 0
L 49.421875 54.546875
Q 47.515625 61.375 47.171875 62.9375
Q 46.046875 58.015625 45.0625 54.546875
L 29.890625 0
z
" id="ArialMT-87"/>
<path d="M 40.4375 6.390625
Q 35.546875 2.25 31.03125 0.53125
Q 26.515625 -1.171875 21.34375 -1.171875
Q 12.796875 -1.171875 8.203125 3
Q 3.609375 7.171875 3.609375 13.671875
Q 3.609375 17.484375 5.34375 20.625
Q 7.078125 23.78125 9.890625 25.6875
Q 12.703125 27.59375 16.21875 28.5625
Q 18.796875 29.25 24.03125 29.890625
Q 34.671875 31.15625 39.703125 32.90625
Q 39.75 34.71875 39.75 35.203125
Q 39.75 40.578125 37.25 42.78125
Q 33.890625 45.75 27.25 45.75
Q 21.046875 45.75 18.09375 43.578125
Q 15.140625 41.40625 13.71875 35.890625
L 5.125 37.0625
Q 6.296875 42.578125 8.984375 45.96875
Q 11.671875 49.359375 16.75 51.1875
Q 21.828125 53.03125 28.515625 53.03125
Q 35.15625 53.03125 39.296875 51.46875
Q 43.453125 49.90625 45.40625 47.53125
Q 47.359375 45.171875 48.140625 41.546875
Q 48.578125 39.3125 48.578125 33.453125
L 48.578125 21.734375
Q 48.578125 9.46875 49.140625 6.21875
Q 49.703125 2.984375 51.375 0
L 42.1875 0
Q 40.828125 2.734375 40.4375 6.390625
z
M 39.703125 26.03125
Q 34.90625 24.078125 25.34375 22.703125
Q 19.921875 21.921875 17.671875 20.9375
Q 15.4375 19.96875 14.203125 18.09375
Q 12.984375 16.21875 12.984375 13.921875
Q 12.984375 10.40625 15.640625 8.0625
Q 18.3125 5.71875 23.4375 5.71875
Q 28.515625 5.71875 32.46875 7.9375
Q 36.421875 10.15625 38.28125 14.015625
Q 39.703125 17 39.703125 22.796875
z
" id="ArialMT-97"/>
<path d="M 6.640625 0
L 6.640625 71.578125
L 15.4375 71.578125
L 15.4375 30.765625
L 36.234375 51.859375
L 47.609375 51.859375
L 27.78125 32.625
L 49.609375 0
L 38.765625 0
L 21.625 26.515625
L 15.4375 20.5625
L 15.4375 0
z
" id="ArialMT-107"/>
<path d="M 3.171875 21.484375
L 3.171875 30.328125
L 30.171875 30.328125
L 30.171875 21.484375
z
" id="ArialMT-45"/>
<path d="M 40.4375 19
L 49.078125 17.875
Q 47.65625 8.9375 41.8125 3.875
Q 35.984375 -1.171875 27.484375 -1.171875
Q 16.84375 -1.171875 10.375 5.78125
Q 3.90625 12.75 3.90625 25.734375
Q 3.90625 34.125 6.6875 40.421875
Q 9.46875 46.734375 15.15625 49.875
Q 20.84375 53.03125 27.546875 53.03125
Q 35.984375 53.03125 41.359375 48.75
Q 46.734375 44.484375 48.25 36.625
L 39.703125 35.296875
Q 38.484375 40.53125 35.375 43.15625
Q 32.28125 45.796875 27.875 45.796875
Q 21.234375 45.796875 17.078125 41.03125
Q 12.9375 36.28125 12.9375 25.984375
Q 12.9375 15.53125 16.9375 10.796875
Q 20.953125 6.0625 27.390625 6.0625
Q 32.5625 6.0625 36.03125 9.234375
Q 39.5 12.40625 40.4375 19
z
" id="ArialMT-99"/>
<path d="M 8.6875 0
L 8.6875 45.015625
L 0.921875 45.015625
L 0.921875 51.859375
L 8.6875 51.859375
L 8.6875 57.375
Q 8.6875 62.59375 9.625 65.140625
Q 10.890625 68.5625 14.078125 70.671875
Q 17.28125 72.796875 23.046875 72.796875
Q 26.765625 72.796875 31.25 71.921875
L 29.9375 64.265625
Q 27.203125 64.75 24.75 64.75
Q 20.75 64.75 19.09375 63.03125
Q 17.4375 61.328125 17.4375 56.640625
L 17.4375 51.859375
L 27.546875 51.859375
L 27.546875 45.015625
L 17.4375 45.015625
L 17.4375 0
z
" id="ArialMT-102"/>
<path d="M 6.59375 0
L 6.59375 51.859375
L 14.453125 51.859375
L 14.453125 44.578125
Q 16.890625 48.390625 20.9375 50.703125
Q 25 53.03125 30.171875 53.03125
Q 35.9375 53.03125 39.625 50.640625
Q 43.3125 48.25 44.828125 43.953125
Q 50.984375 53.03125 60.84375 53.03125
Q 68.5625 53.03125 72.703125 48.75
Q 76.859375 44.484375 76.859375 35.59375
L 76.859375 0
L 68.109375 0
L 68.109375 32.671875
Q 68.109375 37.9375 67.25 40.25
Q 66.40625 42.578125 64.15625 43.984375
Q 61.921875 45.40625 58.890625 45.40625
Q 53.421875 45.40625 49.796875 41.765625
Q 46.1875 38.140625 46.1875 30.125
L 46.1875 0
L 37.40625 0
L 37.40625 33.6875
Q 37.40625 39.546875 35.25 42.46875
Q 33.109375 45.40625 28.21875 45.40625
Q 24.515625 45.40625 21.359375 43.453125
Q 18.21875 41.5 16.796875 37.734375
Q 15.375 33.984375 15.375 26.90625
L 15.375 0
z
" id="ArialMT-109"/>
<path d="M 58.796875 25.09375
L 68.265625 22.703125
Q 65.28125 11.03125 57.546875 4.90625
Q 49.8125 -1.21875 38.625 -1.21875
Q 27.046875 -1.21875 19.796875 3.484375
Q 12.546875 8.203125 8.765625 17.140625
Q 4.984375 26.078125 4.984375 36.328125
Q 4.984375 47.515625 9.25 55.828125
Q 13.53125 64.15625 21.40625 68.46875
Q 29.296875 72.796875 38.765625 72.796875
Q 49.515625 72.796875 56.828125 67.328125
Q 64.15625 61.859375 67.046875 51.953125
L 57.71875 49.75
Q 55.21875 57.5625 50.484375 61.125
Q 45.75 64.703125 38.578125 64.703125
Q 30.328125 64.703125 24.78125 60.734375
Q 19.234375 56.78125 16.984375 50.109375
Q 14.75 43.453125 14.75 36.375
Q 14.75 27.25 17.40625 20.4375
Q 20.0625 13.625 25.671875 10.25
Q 31.296875 6.890625 37.84375 6.890625
Q 45.796875 6.890625 51.3125 11.46875
Q 56.84375 16.0625 58.796875 25.09375
z
" id="ArialMT-67"/>
<path d="M 8.015625 0
L 8.015625 71.578125
L 17.484375 71.578125
L 17.484375 42.1875
L 54.6875 42.1875
L 54.6875 71.578125
L 64.15625 71.578125
L 64.15625 0
L 54.6875 0
L 54.6875 33.734375
L 17.484375 33.734375
L 17.484375 0
z
" id="ArialMT-72"/>
<path d="M 40.234375 0
L 40.234375 6.546875
Q 35.296875 -1.171875 25.734375 -1.171875
Q 19.53125 -1.171875 14.328125 2.25
Q 9.125 5.671875 6.265625 11.796875
Q 3.421875 17.921875 3.421875 25.875
Q 3.421875 33.640625 6 39.96875
Q 8.59375 46.296875 13.765625 49.65625
Q 18.953125 53.03125 25.34375 53.03125
Q 30.03125 53.03125 33.6875 51.046875
Q 37.359375 49.078125 39.65625 45.90625
L 39.65625 71.578125
L 48.390625 71.578125
L 48.390625 0
z
M 12.453125 25.875
Q 12.453125 15.921875 16.640625 10.984375
Q 20.84375 6.0625 26.5625 6.0625
Q 32.328125 6.0625 36.34375 10.765625
Q 40.375 15.484375 40.375 25.140625
Q 40.375 35.796875 36.265625 40.765625
Q 32.171875 45.75 26.171875 45.75
Q 20.3125 45.75 16.375 40.96875
Q 12.453125 36.1875 12.453125 25.875
z
" id="ArialMT-100"/>
</defs>
<use xlink:href="#ArialMT-87"/>
<use x="92.634766" xlink:href="#ArialMT-101"/>
<use x="148.25" xlink:href="#ArialMT-97"/>
<use x="203.865234" xlink:href="#ArialMT-107"/>
<use x="253.865234" xlink:href="#ArialMT-45"/>
<use x="287.166016" xlink:href="#ArialMT-115"/>
<use x="337.166016" xlink:href="#ArialMT-99"/>
<use x="387.166016" xlink:href="#ArialMT-97"/>
<use x="442.78125" xlink:href="#ArialMT-108"/>
<use x="464.998047" xlink:href="#ArialMT-105"/>
<use x="487.214844" xlink:href="#ArialMT-110"/>
<use x="542.830078" xlink:href="#ArialMT-103"/>
<use x="598.445312" xlink:href="#ArialMT-32"/>
<use x="626.228516" xlink:href="#ArialMT-112"/>
<use x="681.84375" xlink:href="#ArialMT-101"/>
<use x="737.458984" xlink:href="#ArialMT-114"/>
<use x="770.759766" xlink:href="#ArialMT-102"/>
<use x="798.542969" xlink:href="#ArialMT-111"/>
<use x="854.158203" xlink:href="#ArialMT-114"/>
<use x="887.458984" xlink:href="#ArialMT-109"/>
<use x="970.759766" xlink:href="#ArialMT-97"/>
<use x="1026.375" xlink:href="#ArialMT-110"/>
<use x="1081.990234" xlink:href="#ArialMT-99"/>
<use x="1131.990234" xlink:href="#ArialMT-101"/>
<use x="1187.605469" xlink:href="#ArialMT-32"/>
<use x="1215.388672" xlink:href="#ArialMT-111"/>
<use x="1271.003906" xlink:href="#ArialMT-110"/>
<use x="1326.619141" xlink:href="#ArialMT-32"/>
<use x="1352.652344" xlink:href="#ArialMT-84"/>
<use x="1413.736328" xlink:href="#ArialMT-80"/>
<use x="1480.435547" xlink:href="#ArialMT-67"/>
<use x="1552.652344" xlink:href="#ArialMT-45"/>
<use x="1585.953125" xlink:href="#ArialMT-72"/>
<use x="1658.169922" xlink:href="#ArialMT-32"/>
<use x="1685.953125" xlink:href="#ArialMT-100"/>
<use x="1741.568359" xlink:href="#ArialMT-97"/>
<use x="1797.183594" xlink:href="#ArialMT-116"/>
<use x="1824.966797" xlink:href="#ArialMT-97"/>
<use x="1880.582031" xlink:href="#ArialMT-115"/>
<use x="1930.582031" xlink:href="#ArialMT-101"/>
<use x="1986.197266" xlink:href="#ArialMT-116"/>
</g>
</g>
<g id="legend_1">
<g id="line2d_65">
<path d="M 81 60.299688
L 101 60.299688
" style="fill:none;stroke:#4c72b0;stroke-linecap:round;stroke-width:1.75;"/>
</g>
<g id="line2d_66">
<g>
<use style="fill:#4c72b0;" x="91" xlink:href="#meaaf8c9a96" y="60.299688"/>
</g>
</g>
<g id="text_22">
<!-- w/o compression -->
<g style="fill:#262626;" transform="translate(109 63.799688)scale(0.1 -0.1)">
<defs>
<path d="M 16.15625 0
L 0.296875 51.859375
L 9.375 51.859375
L 17.625 21.921875
L 20.703125 10.796875
Q 20.90625 11.625 23.390625 21.484375
L 31.640625 51.859375
L 40.671875 51.859375
L 48.4375 21.78125
L 51.03125 11.859375
L 54 21.875
L 62.890625 51.859375
L 71.4375 51.859375
L 55.21875 0
L 46.09375 0
L 37.84375 31.0625
L 35.84375 39.890625
L 25.34375 0
z
" id="ArialMT-119"/>
</defs>
<use xlink:href="#ArialMT-119"/>
<use x="72.216797" xlink:href="#ArialMT-47"/>
<use x="100" xlink:href="#ArialMT-111"/>
<use x="155.615234" xlink:href="#ArialMT-32"/>
<use x="183.398438" xlink:href="#ArialMT-99"/>
<use x="233.398438" xlink:href="#ArialMT-111"/>
<use x="289.013672" xlink:href="#ArialMT-109"/>
<use x="372.314453" xlink:href="#ArialMT-112"/>
<use x="427.929688" xlink:href="#ArialMT-114"/>
<use x="461.230469" xlink:href="#ArialMT-101"/>
<use x="516.845703" xlink:href="#ArialMT-115"/>
<use x="566.845703" xlink:href="#ArialMT-115"/>
<use x="616.845703" xlink:href="#ArialMT-105"/>
<use x="639.0625" xlink:href="#ArialMT-111"/>
<use x="694.677734" xlink:href="#ArialMT-110"/>
</g>
</g>
<g id="line2d_67">
<path d="M 81 74.566875
L 101 74.566875
" style="fill:none;stroke:#55a868;stroke-linecap:round;stroke-width:1.75;"/>
</g>
<g id="line2d_68">
<g>
<use style="fill:#55a868;" x="91" xlink:href="#m4acf839c5c" y="74.566875"/>
</g>
</g>
<g id="text_23">
<!-- w/ compression -->
<g style="fill:#262626;" transform="translate(109 78.066875)scale(0.1 -0.1)">
<use xlink:href="#ArialMT-119"/>
<use x="72.216797" xlink:href="#ArialMT-47"/>
<use x="100" xlink:href="#ArialMT-32"/>
<use x="127.783203" xlink:href="#ArialMT-99"/>
<use x="177.783203" xlink:href="#ArialMT-111"/>
<use x="233.398438" xlink:href="#ArialMT-109"/>
<use x="316.699219" xlink:href="#ArialMT-112"/>
<use x="372.314453" xlink:href="#ArialMT-114"/>
<use x="405.615234" xlink:href="#ArialMT-101"/>
<use x="461.230469" xlink:href="#ArialMT-115"/>
<use x="511.230469" xlink:href="#ArialMT-115"/>
<use x="561.230469" xlink:href="#ArialMT-105"/>
<use x="583.447266" xlink:href="#ArialMT-111"/>
<use x="639.0625" xlink:href="#ArialMT-110"/>
</g>
</g>
</g>
</g>
</g>
<defs>
<clipPath id="p7bc5c21ee2">
<rect height="304.92" width="446.4" x="72" y="47.52"/>
</clipPath>
</defs>
</svg>
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/FindUCX.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
find_path(UCX_INCLUDE_DIR NAMES ucp/api/ucp.h)
find_library(UCS_LIBRARY NAMES ucs)
find_library(UCT_LIBRARY NAMES uct)
find_library(UCP_LIBRARY NAMES ucp)
include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
find_package_handle_standard_args(UCX DEFAULT_MSG UCX_INCLUDE_DIR UCS_LIBRARY UCT_LIBRARY UCP_LIBRARY)
set(UCX_INCLUDE_DIRS ${UCX_INCLUDE_DIR})
set(UCX_LIBRARIES ${UCS_LIBRARY} ${UCT_LIBRARY} ${UCP_LIBRARY})
if (UCX_FOUND)
mark_as_advanced(UCX_INCLUDE_DIR UCX_LIBRARIES)
endif ()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/BuildHelpers.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
function(include_and_link_dependencies target_name)
set_property(TARGET ${target_name} PROPERTY CUDA_ARCHITECTURES ${GPU_ARCHS})
target_include_directories(${target_name} PUBLIC "${CMAKE_SOURCE_DIR}/src")
target_include_directories(${target_name} PUBLIC "${CUDAToolkit_INCLUDE_DIRS}")
target_include_directories(${target_name} PUBLIC "${NCCL_INCLUDE_DIRS}")
target_include_directories(${target_name} PUBLIC "${UCX_INCLUDE_DIRS}")
target_include_directories(${target_name} PUBLIC "${NVCOMP_INCLUDE_DIR}")
target_include_directories(${target_name} PUBLIC "${MPI_CXX_INCLUDE_DIRS}")
target_include_directories(${target_name} PUBLIC "${CUDF_INCLUDE_DIRS}")
target_include_directories(${target_name} PUBLIC "${RMM_INCLUDE_DIRS}")
target_link_libraries(${target_name} PUBLIC ${NCCL_LIBRARIES})
target_link_libraries(${target_name} PUBLIC ${UCX_LIBRARIES})
target_link_libraries(${target_name} PUBLIC ${NVCOMP_LIBRARIES})
target_link_libraries(${target_name} PUBLIC MPI::MPI_CXX)
target_link_libraries(${target_name} PUBLIC ${CUDF_LIBRARIES})
target_link_libraries(${target_name} PUBLIC CUDA::cudart)
target_compile_options(${target_name} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:--expt-extended-lambda>)
target_compile_options(${target_name} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
endfunction()
function(build_executables sources)
foreach(source IN LISTS ${sources})
get_filename_component(target_name ${source} NAME_WLE)
add_executable(${target_name} ${source})
include_and_link_dependencies(${target_name})
target_link_libraries(${target_name} PUBLIC distributed)
endforeach()
endfunction()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/FindNCCL.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
find_path(NCCL_INCLUDE_DIR NAMES nccl.h)
find_library(NCCL_LIBRARIES NAMES nccl)
include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_LIBRARIES NCCL_INCLUDE_DIR)
if (NCCL_FOUND)
mark_as_advanced(NCCL_INCLUDE_DIR NCCL_LIBRARIES)
endif ()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/FindCUDF.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
find_path(CUDF_INCLUDE_DIR NAMES cudf/join.hpp)
find_library(CUDF_LIBRARIES NAMES cudf REQUIRED)
include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
find_package_handle_standard_args(CUDF DEFAULT_MSG CUDF_LIBRARIES CUDF_INCLUDE_DIR)
if(CUDF_FOUND)
mark_as_advanced(CUDF_INCLUDE_DIR CUDF_LIBRARIES)
set(CUDF_INCLUDE_DIRS ${CUDF_INCLUDE_DIR} ${CUDF_INCLUDE_DIR}/libcudf/libcudacxx)
endif()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/FindNVCOMP.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
find_path(NVCOMP_INCLUDE_DIR NAMES nvcomp.hpp)
find_library(NVCOMP_LIBRARIES NAMES nvcomp)
include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
find_package_handle_standard_args(NVCOMP DEFAULT_MSG NVCOMP_LIBRARIES NVCOMP_INCLUDE_DIR)
if (NVCOMP_FOUND)
mark_as_advanced(NVCOMP_INCLUDE_DIR NVCOMP_LIBRARIES)
endif ()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/cmake/FindRMM.cmake
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
find_path(RMM_INCLUDE_DIR NAMES rmm/device_buffer.hpp)
include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
find_package_handle_standard_args(RMM DEFAULT_MSG RMM_INCLUDE_DIR)
if (RMM_FOUND)
mark_as_advanced(RMM_INCLUDE_DIR)
set(RMM_INCLUDE_DIRS ${RMM_INCLUDE_DIR})
endif ()
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/ci/install-dependencies.sh
|
#!/bin/bash
set -exo pipefail
export DEBIAN_FRONTEND=noninteractive
apt-get update -y && apt-get install -y build-essential wget cmake git vim
wget -O /miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
sh /miniconda.sh -b -p /conda && /conda/bin/conda update -y -n base conda
export PATH=${PATH}:/conda/bin
cd /root
conda create -y --name join
source activate join
conda install -y -c rapidsai -c nvidia -c conda-forge cudf=0.19 python=3.8 cudatoolkit=11.0
conda install -y -c rapidsai -c nvidia -c conda-forge ucx ucx-proc=*=gpu nccl openmpi
conda install -y -c conda-forge cmake
cd /root
git clone https://github.com/NVIDIA/nvcomp && cd nvcomp
mkdir -p build && cd build && cmake ..
make -j
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/shuffle_on.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "shuffle_on.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "compression.hpp"
#include "error.hpp"
#include <cudf/partitioning.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <mpi.h>
#include <memory>
#include <vector>
using cudf::table;
using std::vector;
std::unique_ptr<cudf::table> shuffle_on(cudf::table_view const& input,
std::vector<cudf::size_type> const& on_columns,
CommunicationGroup comm_group,
Communicator* communicator,
std::vector<ColumnCompressionOptions> compression_options,
cudf::hash_id hash_function,
uint32_t hash_seed,
bool report_timing,
void* preallocated_pinned_buffer)
{
int mpi_rank = communicator->mpi_rank;
int comm_group_size = comm_group.size();
double start_time = 0.0;
double stop_time = 0.0;
/* Hash partition */
std::unique_ptr<table> hashed_input;
vector<cudf::size_type> offsets;
if (report_timing) { start_time = MPI_Wtime(); }
std::tie(hashed_input, offsets) =
cudf::hash_partition(input, on_columns, comm_group_size, hash_function, hash_seed);
CUDA_RT_CALL(cudaStreamSynchronize(0));
offsets.push_back(hashed_input->num_rows());
if (report_timing) {
stop_time = MPI_Wtime();
std::cout << "Rank " << mpi_rank << ": Hash partition in shuffle takes "
<< (stop_time - start_time) * 1e3 << "ms" << std::endl;
}
/* All_to_all communication */
if (report_timing) { start_time = MPI_Wtime(); }
AllToAllCommunicator all_to_all_communicator(
hashed_input->view(), offsets, comm_group, communicator, compression_options, true);
std::unique_ptr<table> shuffled = all_to_all_communicator.allocate_communicated_table();
all_to_all_communicator.launch_communication(
shuffled->mutable_view(), report_timing, preallocated_pinned_buffer);
if (report_timing) {
stop_time = MPI_Wtime();
std::cout << "Rank " << mpi_rank << ": All-to-all communication in shuffle takes "
<< (stop_time - start_time) * 1e3 << "ms" << std::endl;
}
return shuffled;
}
std::unique_ptr<cudf::table> shuffle_on(cudf::table_view const& input,
std::vector<cudf::size_type> const& on_columns,
Communicator* communicator,
std::vector<ColumnCompressionOptions> compression_options,
cudf::hash_id hash_function,
uint32_t hash_seed,
bool report_timing,
void* preallocated_pinned_buffer)
{
return shuffle_on(input,
on_columns,
CommunicationGroup(communicator->mpi_size, 1),
communicator,
compression_options,
hash_function,
hash_seed,
report_timing,
preallocated_pinned_buffer);
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/distribute_table.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "communicator.hpp"
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <memory>
/**
* Distribute a table from root rank to worker ranks.
*
* This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] global_table The global table. Only significant on root rank.
* @param[in] communicator An instance of `Communicator` used for communication.
*
* @returns The local table on each rank.
*/
std::unique_ptr<cudf::table> distribute_table(cudf::table_view global_table,
Communicator *communicator);
/**
* Merge tables from all worker ranks to the root rank.
*
* This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] table The table on each rank to be sent to the master rank. Significant on all ranks.
* @param[in] communicator An instance of `Communicator` used for communication.
*
* @return Merged table on the root rank. `nullptr` on all other ranks.
*/
std::unique_ptr<cudf::table> collect_tables(cudf::table_view table, Communicator *communicator);
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/communicator.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <mpi.h>
#include <nccl.h>
#include <ucp/api/ucp.h>
#include <cuda_runtime.h>
#include <cstdint>
#include <queue>
#include <vector>
#define comm_handle_t void *
class Communicator {
// Note: There is no guarantee that communicators will be thread-safe.
public:
/**
* Initialize the communicator. This method should be called by all ranks in MPI_COMM_WORLD. Also,
* each rank should call this method at most once. In other word, multiple communicators are not
* supported.
*/
virtual void initialize() = 0;
/**
* Define the start point of a collective communication.
*
* Note: nested start/stop pairs are not supported and will lead to undefined behavior.
*/
virtual void start() = 0;
/**
* Blocked until all communication since the *start* has been completed.
*/
virtual void stop() = 0;
/**
* Send data to a remote rank.
*
* @param[in] buf Data buffer to send to remote rank
* @param[in] count Number of elements to send
* @param[in] element_size Size of each element
* @param[in] dest Destination rank
*/
virtual void send(const void *buf, int64_t count, int element_size, int dest) = 0;
/**
* Receive data from a remote rank.
*
* @param[in] buf Receive buffer to place received data into
* @param[in] count Number of elements to receive
* @param[in] element_size Size of each element
* @param[in] source Source rank
*/
virtual void recv(void *buf, int64_t count, int element_size, int source) = 0;
/**
* Close the endpoints, free up used communication resources, and stop the communication runtime.
*/
virtual void finalize() = 0;
/**
* Whether the distributed join implementation using this communicator should group by batch or
* group by column.
*/
virtual bool group_by_batch() = 0;
virtual ~Communicator() = default;
int mpi_rank;
int mpi_size;
int current_device;
};
class MPILikeCommunicator : public Communicator {
// *MPILikeCommunicator* is an abstract class which implements the behavior of start/stop pairs
// for communication libraries like MPI or UCX.
// Note: For all tag send/recv operations, -1 is reserved and should not be used as a user tag.
// TODO: Enforce this assumption by runtime checking.
public:
virtual void initialize();
virtual void start();
virtual void stop();
virtual void send(const void *buf, int64_t count, int element_size, int dest);
/**
* Send data to a remote rank asynchronously.
*
* @param[in] buf Data buffer to send to remote rank
* @param[in] count Number of elements to send
* @param[in] element_size Size of each element
* @param[in] dest Destination rank
* @param[in] tag Message tag
*
* @returns Communication handle for waiting. See 'wait' and 'waitall'.
*/
virtual comm_handle_t send(
const void *buf, int64_t count, int element_size, int dest, int tag) = 0;
virtual void recv(void *buf, int64_t count, int element_size, int source);
/**
* Receive data from a remote rank asynchronously. Use this version if the receive size is known.
*
* @param[in] buf Receive buffer to place received data into
* @param[in] count Number of elements to receive
* @param[in] element_size Size of each element
* @param[in] source Source rank
* @param[in] tag Message tag
*
* @returns Communication handle for waiting. See 'wait' and 'waitall'.
*/
virtual comm_handle_t recv(void *buf, int64_t count, int element_size, int source, int tag) = 0;
/**
* Receive data from a remote rank asynchronously. Use this version if the receive size is
* unknown.
*
* @param[out] buf Allocated receive buffer. It is the caller's responsibility to free
* this buffer.
* @param[out] count Number of elements received
* @param[in] element_size The size of each element
* @param[in] source Source rank
* @param[in] tag Message tag
*
* @returns Communication handle for waiting. See 'wait' and 'waitall'.
*/
virtual comm_handle_t recv(void **buf, int64_t *count, int element_size, int source, int tag) = 0;
/**
* The host process will block until a single communication is completed.
*
* @param[in] request Communication handle to wait for
*/
virtual void wait(comm_handle_t request) = 0;
/**
* The host process will block until all communications specified are completed.
*
* @param[in] requests A vector of communication handles to wait for
*/
virtual void waitall(std::vector<comm_handle_t> requests) = 0;
/**
* The host process will block until all communications specified are completed.
*
* @param[in] begin Iterator points to the first handle to wait for
* @param[in] end Iterator points to the next handle after the last
*/
virtual void waitall(std::vector<comm_handle_t>::const_iterator begin,
std::vector<comm_handle_t>::const_iterator end) = 0;
virtual ~MPILikeCommunicator() = default;
// used for keeping track of the pending requests since the last *start* call
std::vector<comm_handle_t> pending_requests;
// tag to use for group calls
static constexpr int reserved_tag{-1};
};
class UCXCommunicator : public MPILikeCommunicator {
public:
virtual void initialize();
using MPILikeCommunicator::send;
virtual comm_handle_t send(const void *buf, int64_t count, int element_size, int dest, int tag);
using MPILikeCommunicator::recv;
virtual comm_handle_t recv(void *buf, int64_t count, int element_size, int source, int tag);
virtual comm_handle_t recv(void **buf, int64_t *count, int element_size, int source, int tag);
/**
* Register a buffer through UCX.
*
* @param[in] buf Buffer to be registered.
* @param[in] size Size in byte to register.
* @param[out] memory_handle Memory handle to the registered buffer.
*/
virtual void register_buffer(void *buf, size_t size, ucp_mem_h *memory_handle);
/**
* Deregister a buffer through UCX.
*
* @param[in] memory_handle Memory handle of which the associated buffer will be deregistered.
*/
virtual void deregister_buffer(ucp_mem_h memory_handle);
virtual void wait(comm_handle_t request);
virtual void waitall(std::vector<comm_handle_t> requests);
virtual void waitall(std::vector<comm_handle_t>::const_iterator begin,
std::vector<comm_handle_t>::const_iterator end);
virtual void finalize();
virtual bool group_by_batch() { return true; }
ucp_context_h ucp_context;
ucp_worker_h ucp_worker;
ucp_address_t *ucp_worker_address;
size_t ucp_worker_address_len;
std::vector<ucp_ep_h> ucp_endpoints;
private:
virtual void initialize_ucx();
virtual void create_endpoints();
};
class UCXBufferCommunicator : public UCXCommunicator {
public:
virtual void initialize();
/**
* Allocate communication buffers and put them into the 'buffer_cache' queue.
*
* @param[in] ncaches Total number of communication buffers.
* @param[in] cache_size Size of each communication buffer.
*/
virtual void setup_cache(int64_t ncaches, int64_t cache_size);
using UCXCommunicator::send;
virtual comm_handle_t send(const void *buf, int64_t count, int element_size, int dest, int tag);
using UCXCommunicator::recv;
virtual comm_handle_t recv(void *buf, int64_t count, int element_size, int source, int tag);
virtual comm_handle_t recv(void **buf, int64_t *count, int element_size, int source, int tag);
virtual void wait(comm_handle_t request);
virtual void waitall(std::vector<comm_handle_t> requests);
virtual void waitall(std::vector<comm_handle_t>::const_iterator begin,
std::vector<comm_handle_t>::const_iterator end);
virtual void finalize();
// It is possible to use different tags to distinguish between different messages destined to the
// same remote GPU, but grouping by batch means more communication buffers are needed, which makes
// each communication buffer smaller, resulting in a performance degradation.
virtual bool group_by_batch() { return false; }
enum CommInfoTypes { SEND, RECV };
struct CommInfo {
CommInfoTypes types;
bool completed; // used only for the handle returned to the user to signal completion
UCXBufferCommunicator *comm; // pointer to the Communicator object
CommInfo *orig_info; // pointer to the handle returned to the user
void *comm_buffer; // communication buffer
bool custom_allocated; // indicate whether this object is allocated by UCX or communicator
};
struct SendInfo {
CommInfoTypes types;
bool completed;
UCXBufferCommunicator *comm;
CommInfo *orig_info;
void *comm_buffer;
bool custom_allocated;
const void *send_buffer; // user buffer
int64_t *count; // pointer to the total number of elements need to be sent
int element_size; // the size of each element
int dest; // destination rank
int user_tag; // tag specified by the user
int ibatch; // current batch number
};
struct RecvInfo {
CommInfoTypes types;
bool completed;
UCXBufferCommunicator *comm;
CommInfo *orig_info;
void *comm_buffer;
bool custom_allocated;
void **recv_buffer; // pointer to the receive buffer
int64_t *count; // pointer to the total number of elements need to be received
int element_size; // the size of each element
int source; // source rank
int user_tag; // tag specified by the user
int ibatch; // current batch number
};
int64_t comm_buffer_size;
std::queue<void *> buffer_cache;
cudaStream_t copy_stream; // stream used for copying between user buffer and communication buffer
private:
virtual void initialize_ucx();
bool wait_send(SendInfo *info);
bool wait_recv(RecvInfo *info);
comm_handle_t recv_helper(void **buf, int64_t *count, int element_size, int source, int tag);
void *cache_start_addr;
ucp_mem_h cache_mem_handle;
};
/**
* Helper function for constructing a *UCXCommunicator*. This function needed to be called after
* MPI is initialized and CUDA device has been selected.
*
* @param[in] use_buffer_communicator If this argument is set to *true*, a buffered communicator
* is constructed. Otherwise, a normal UCX communicator is
* constructed.
* @param[in] num_comm_buffers Number of communication buffers for buffered communicator.
* This argument is omitted if *use_buffer_communicator* is
* false.
* @param[in] comm_buffer_size The size of each communication buffer for buffered
* communicator. This argument is omitted if
* *use_buffer_communicator* is false.
*
* @returns Constructed communicator. It is the caller's responsibility
* to free this communicator using *delete*.
*/
UCXCommunicator *initialize_ucx_communicator(bool use_buffer_communicator,
int num_comm_buffers,
int64_t comm_buffer_size);
class NCCLCommunicator : public Communicator {
public:
virtual void initialize();
virtual void start();
virtual void stop();
virtual void send(const void *buf, int64_t count, int element_size, int dest);
virtual void recv(void *buf, int64_t count, int element_size, int source);
virtual void finalize();
// NCCL does not support sending multiple messages to the same remote GPUs within `start` and
// `stop` as of V2.7. This feature is planned for V2.8.
virtual bool group_by_batch() { return false; }
// Stream created/destroyed by the communicator object that is used for communication-related
// kernels/copies
cudaStream_t comm_stream;
ncclComm_t nccl_comm;
std::vector<void *> comm_buffers; // used for 128-bit alignment
// used for keeping track of size allocated in comm_buffers
std::vector<std::size_t> comm_buffer_sizes;
std::vector<void *> recv_buffers;
std::vector<std::size_t> recv_buffer_idx;
};
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/setup.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "setup.hpp"
#include "communicator.hpp"
#include "error.hpp"
#include "registered_memory_resource.hpp"
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <mpi.h>
#include <cuda_runtime.h>
#include <cstdint>
#include <iostream>
#include <stdexcept>
#include <string>
void set_cuda_device()
{
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
int device_count;
CUDA_RT_CALL(cudaGetDeviceCount(&device_count));
std::cout << "Device count: " << device_count << std::endl;
int current_device = mpi_rank % device_count;
CUDA_RT_CALL(cudaSetDevice(current_device));
std::cout << "Rank " << mpi_rank << " select " << current_device << "/" << device_count << " GPU"
<< std::endl;
}
void setup_memory_pool_and_communicator(
Communicator *&communicator,
registered_memory_resource *®istered_mr,
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *&pool_mr,
std::string communicator_name,
std::string registration_method,
int64_t communicator_buffer_size)
{
int mpi_size;
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
registered_mr = nullptr;
// Calculate the memory pool size
size_t free_memory, total_memory;
CUDA_RT_CALL(cudaMemGetInfo(&free_memory, &total_memory));
const size_t pool_size = free_memory / 284 * 256;
if (communicator_name == "NCCL") {
communicator = new NCCLCommunicator;
communicator->initialize();
pool_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(
rmm::mr::get_current_device_resource(), pool_size, pool_size);
rmm::mr::set_current_device_resource(pool_mr);
} else if (communicator_name == "UCX") {
if (registration_method == "buffer") {
// For UCX with buffer communicator, a memory pool is first constructed so that the
// communication buffers will be allocated in memory pool.
pool_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(
rmm::mr::get_current_device_resource(), pool_size, pool_size);
rmm::mr::set_current_device_resource(pool_mr);
// *2 because buffers are needed for both sends and receives
const int num_comm_buffers = 2 * mpi_size;
communicator = initialize_ucx_communicator(
true, num_comm_buffers, communicator_buffer_size / num_comm_buffers - 100'000LL);
} else if (registration_method == "preregistered") {
// For UCX with preregistered memory pool, a communicator is first constructed so that
// `registered_memory_resource` can use the communicator for buffer registrations.
UCXCommunicator *ucx_communicator = initialize_ucx_communicator(false, 0, 0);
communicator = ucx_communicator;
registered_mr = new registered_memory_resource(ucx_communicator);
pool_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(
registered_mr, pool_size, pool_size);
rmm::mr::set_current_device_resource(pool_mr);
} else if (registration_method == "none") {
communicator = initialize_ucx_communicator(false, 0, 0);
pool_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(
rmm::mr::get_current_device_resource(), pool_size, pool_size);
rmm::mr::set_current_device_resource(pool_mr);
} else {
throw std::runtime_error("Unknown registration method");
}
} else {
throw std::runtime_error("Unknown communicator name");
}
}
void destroy_memory_pool_and_communicator(
Communicator *communicator,
registered_memory_resource *registered_mr,
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr,
std::string communicator_name,
std::string registration_method)
{
if (communicator_name == "UCX" && registration_method == "buffer") {
// When finalizing buffer communicator, communication buffers need be deallocated, so
// `finalize` needs to be called before the memory pool is deleted.
communicator->finalize();
delete pool_mr;
delete registered_mr;
} else {
// For registered memory resouce, the memory pool needs to be deleted before finalizing
// the communicator, so that all buffers can be deregistered through UCX.
// For every other scenario, the order of deleting memory pool and finalizing the communicator
// does not matter, and we just choose this path.
delete pool_mr;
delete registered_mr;
communicator->finalize();
}
delete communicator;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/CMakeLists.txt
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
file(GLOB CUDA_SOURCES *.cu)
file(GLOB CPP_SOURCES *.cpp)
add_library(distributed ${CUDA_SOURCES} ${CPP_SOURCES})
include(BuildHelpers)
include_and_link_dependencies(distributed)
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/distributed_join.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "communicator.hpp"
#include "compression.hpp"
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cstdint>
#include <memory>
#include <vector>
/**
* Top level interface for distributed inner join.
*
* This function should be called collectively by all processes in MPI_COMM_WORLD. All arguments
* are significant for all ranks.
*
* The argument `left` and `right` are the left table and the right table distributed on each
* rank. In other word, the left (right) table to be joined is the concatenation of `left`
* (`right`) on all ranks. If the whole tables reside on a single rank, you should use
* *distribute_table* to distribute the table before calling this function.
*
* @param[in] left The left table distributed on each rank.
* @param[in] right The right table distributed on each rank.
* @param[in] left_on The column indices from `left` to join on.
* The column from `left` indicated by `left_on[i]` will be compared against the column
* from `right` indicated by `right_on[i]`.
* @param[in] right_on The column indices from `right` to join on.
* The column from `right` indicated by `right_on[i]` will be compared against the column
* from `left` indicated by `left_on[i]`.
* @param[in] communicator An instance of `Communicator` used for communication.
* @param[in] left_compression_options Vector of length equal to the number of columns in *left*,
* indicating whether/how each column of the left table needs to be compressed before communication.
* @param[in] right_compression_options Vector of length equal to the number of columns in *right*,
* indicating whether/how each column of the right table needs to be compressed before
* communication.
* @param[in] over_decom_factor Over-decomposition factor used for overlapping computation and
* communication.
* @param[in] report_timing Whether collect and print timing.
* @param[in] preallocated_pinned_buffer Preallocated page-locked host buffer with size at least
* `mpi_size * sizeof(size_t)`, used for holding the compressed sizes.
* @return Result of joining `left` and `right` tables on the columns
* specified by `left_on` and `right_on`. The resulting table will be joined columns of
* `left(including common columns)+right(excluding common columns)`. The join result is the
* concatenation of the returned tables on all ranks.
*/
std::unique_ptr<cudf::table> distributed_inner_join(
cudf::table_view left,
cudf::table_view right,
std::vector<cudf::size_type> const &left_on,
std::vector<cudf::size_type> const &right_on,
Communicator *communicator,
std::vector<ColumnCompressionOptions> left_compression_options,
std::vector<ColumnCompressionOptions> right_compression_options,
int over_decom_factor = 1,
bool report_timing = false,
void *preallocated_pinned_buffer = nullptr,
int nvlink_domain_size = 1);
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/compression.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "error.hpp"
#include <nvcomp.hpp>
#include <nvcomp/cascaded.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <cuda_runtime.h>
#include <cuda/std/type_traits>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <memory>
#include <stdexcept>
#include <type_traits>
#include <vector>
enum class CompressionMethod { none, cascaded, lz4 };
/* A structure outlining how to compress a column */
struct ColumnCompressionOptions {
CompressionMethod compression_method;
nvcompCascadedFormatOpts cascaded_format;
std::vector<ColumnCompressionOptions> children_compression_options;
ColumnCompressionOptions(CompressionMethod compression_method = CompressionMethod::none,
nvcompCascadedFormatOpts cascaded_format = {},
std::vector<ColumnCompressionOptions> children_compression_options = {})
: compression_method(compression_method),
cascaded_format(cascaded_format),
children_compression_options(children_compression_options)
{
}
};
template <typename T>
using is_cascaded_supported = cuda::std::disjunction<std::is_same<int8_t, T>,
std::is_same<uint8_t, T>,
std::is_same<int16_t, T>,
std::is_same<uint16_t, T>,
std::is_same<int32_t, T>,
std::is_same<uint32_t, T>,
std::is_same<int64_t, T>,
std::is_same<uint64_t, T>>;
template <typename T>
using is_time_t = cuda::std::disjunction<cudf::is_timestamp_t<T>, cudf::is_duration_t<T>>;
struct compression_functor {
/**
* Compress a vector of buffers using cascaded compression.
*
* @param[in] uncompressed_data Input buffers to be compressed.
* @param[in] uncompressed_counts Number of elements to be compressed for each buffer in
* *uncompressed_data*. Note that in general this is different from the size of the buffer.
* @param[out] compressed_data Compressed buffers after cascaded compression. This argument does
* not need to be preallocated.
* @param[out] compressed_sizes Number of bytes for each buffer in *compressed_data*.
* @param[in] streams CUDA streams used for the compression kernels.
*/
template <typename T, std::enable_if_t<is_cascaded_supported<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &uncompressed_data,
std::vector<cudf::size_type> const &uncompressed_counts,
std::vector<rmm::device_buffer> &compressed_data,
size_t *compressed_sizes,
std::vector<rmm::cuda_stream_view> const &streams,
nvcompCascadedFormatOpts cascaded_format)
{
size_t npartitions = uncompressed_counts.size();
compressed_data.resize(npartitions);
std::vector<rmm::device_buffer> nvcomp_temp_spaces(npartitions);
std::vector<size_t> nvcomp_temp_sizes(npartitions);
for (size_t ipartition = 0; ipartition < npartitions; ipartition++) {
if (uncompressed_counts[ipartition] == 0) {
compressed_sizes[ipartition] = 0;
continue;
}
nvcomp::CascadedCompressor compressor(nvcomp::TypeOf<T>(),
cascaded_format.num_RLEs,
cascaded_format.num_deltas,
cascaded_format.use_bp);
compressor.configure(uncompressed_counts[ipartition] * sizeof(T),
&nvcomp_temp_sizes[ipartition],
&compressed_sizes[ipartition]);
nvcomp_temp_spaces[ipartition] =
rmm::device_buffer(nvcomp_temp_sizes[ipartition], streams[ipartition]);
compressed_data[ipartition] =
rmm::device_buffer(compressed_sizes[ipartition], streams[ipartition]);
}
for (size_t ipartition = 0; ipartition < npartitions; ipartition++) {
if (uncompressed_counts[ipartition] == 0) continue;
nvcomp::CascadedCompressor compressor(nvcomp::TypeOf<T>(),
cascaded_format.num_RLEs,
cascaded_format.num_deltas,
cascaded_format.use_bp);
compressor.compress_async(uncompressed_data[ipartition],
uncompressed_counts[ipartition] * sizeof(T),
nvcomp_temp_spaces[ipartition].data(),
nvcomp_temp_sizes[ipartition],
compressed_data[ipartition].data(),
&compressed_sizes[ipartition],
streams[ipartition].value());
}
}
template <typename T, std::enable_if_t<is_time_t<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &uncompressed_data,
std::vector<cudf::size_type> const &uncompressed_counts,
std::vector<rmm::device_buffer> &compressed_data,
size_t *compressed_sizes,
std::vector<rmm::cuda_stream_view> const &streams,
nvcompCascadedFormatOpts cascaded_format)
{
// If the data type is duration or time, use the corresponding arithmetic type
operator()<typename T::rep>(uncompressed_data,
uncompressed_counts,
compressed_data,
compressed_sizes,
streams,
cascaded_format);
}
// Checking whether the type is supported if necessary because T might be an incomplete type.
template <typename T,
std::enable_if_t<!is_cascaded_supported<T>::value && !is_time_t<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &uncompressed_data,
std::vector<cudf::size_type> const &uncompressed_counts,
std::vector<rmm::device_buffer> &compressed_data,
size_t *compressed_sizes,
std::vector<rmm::cuda_stream_view> const &streams,
nvcompCascadedFormatOpts cascaded_format)
{
throw std::runtime_error("Unsupported type for cascaded compressor");
}
};
struct decompression_functor {
/**
* Decompress a vector of buffers previously compressed by `compression_functor{}.operator()`.
*
* @param[in] compressed_data Vector of input data to be decompressed.
* @param[in] compressed_sizes Sizes of *compressed_data* in bytes.
* @param[out] outputs Decompressed outputs. This argument needs to be preallocated.
* @param[in] expected_output_counts Expected number of elements in the decompressed buffers.
*/
template <typename T, std::enable_if_t<is_cascaded_supported<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &compressed_data,
std::vector<int64_t> const &compressed_sizes,
std::vector<void *> const &outputs,
std::vector<int64_t> const &expected_output_counts,
std::vector<rmm::cuda_stream_view> const &streams)
{
size_t npartitions = compressed_sizes.size();
std::vector<rmm::device_buffer> nvcomp_temp_spaces(npartitions);
std::vector<size_t> nvcomp_temp_sizes(npartitions);
// nvcomp::Decompressor objects are reused in the two passes below since nvcomp::Decompressor
// constructor can be synchrnous to the host thread. std::make_unique is used instead of
// std::vector because the copy constructor in nvcomp::Decompressor is deleted.
auto decompressors =
std::make_unique<std::unique_ptr<nvcomp::CascadedDecompressor>[]>(npartitions);
for (size_t ipartition = 0; ipartition < npartitions; ipartition++) {
if (expected_output_counts[ipartition] == 0) continue;
decompressors[ipartition] = std::make_unique<nvcomp::CascadedDecompressor>();
size_t output_bytes;
decompressors[ipartition]->configure(compressed_data[ipartition],
compressed_sizes[ipartition],
&nvcomp_temp_sizes[ipartition],
&output_bytes,
streams[ipartition].value());
assert(output_bytes == expected_output_counts[ipartition] * sizeof(T));
nvcomp_temp_spaces[ipartition] =
rmm::device_buffer(nvcomp_temp_sizes[ipartition], streams[ipartition]);
}
for (size_t ipartition = 0; ipartition < npartitions; ipartition++) {
if (expected_output_counts[ipartition] == 0) continue;
decompressors[ipartition]->decompress_async(compressed_data[ipartition],
compressed_sizes[ipartition],
nvcomp_temp_spaces[ipartition].data(),
nvcomp_temp_sizes[ipartition],
outputs[ipartition],
expected_output_counts[ipartition] * sizeof(T),
streams[ipartition].value());
}
}
template <typename T, std::enable_if_t<is_time_t<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &compressed_data,
std::vector<int64_t> const &compressed_sizes,
std::vector<void *> const &outputs,
std::vector<int64_t> const &expected_output_counts,
std::vector<rmm::cuda_stream_view> const &streams)
{
// If the data type is duration or time, use the corresponding arithmetic type
operator()<typename T::rep>(
compressed_data, compressed_sizes, outputs, expected_output_counts, streams);
}
// Checking whether the type is supported if necessary because T might be an incomplete type.
template <typename T,
std::enable_if_t<!is_cascaded_supported<T>::value && !is_time_t<T>::value> * = nullptr>
void operator()(std::vector<const void *> const &compressed_data,
std::vector<int64_t> const &compressed_sizes,
std::vector<void *> const &outputs,
std::vector<int64_t> const &expected_output_counts,
std::vector<rmm::cuda_stream_view> const &streams)
{
throw std::runtime_error("Unsupported type for cascaded decompressor");
}
};
struct cascaded_selector_functor {
/**
* Generate cascaded compression configuration options using auto-selector.
*
* @param[in] uncompressed_data Data used by auto-selector.
* @param[in] byte_len Number of bytes in *uncompressed_data*.
*
* @returns Cascaded compression configuration options for *uncompressed_data*.
*/
template <typename T, std::enable_if_t<is_cascaded_supported<T>::value> * = nullptr>
nvcompCascadedFormatOpts operator()(const void *uncompressed_data, size_t byte_len)
{
nvcompCascadedSelectorOpts selector_opts;
selector_opts.sample_size = 1024;
selector_opts.num_samples = 100;
nvcomp::CascadedSelector<T> selector(uncompressed_data, byte_len, selector_opts);
size_t temp_bytes = selector.get_temp_size();
rmm::device_buffer temp_space(temp_bytes);
double estimate_ratio;
return selector.select_config(temp_space.data(), temp_bytes, &estimate_ratio, 0);
}
template <typename T, std::enable_if_t<is_time_t<T>::value> * = nullptr>
nvcompCascadedFormatOpts operator()(const void *uncompressed_data, size_t byte_len)
{
// If the data type is duration or time, use the corresponding arithmetic type
return operator()<typename T::rep>(uncompressed_data, byte_len);
}
template <typename T,
std::enable_if_t<!is_cascaded_supported<T>::value && !is_time_t<T>::value> * = nullptr>
nvcompCascadedFormatOpts operator()(const void *uncompressed_data, size_t byte_len)
{
throw std::runtime_error("Unsupported type for CascadedSelector");
return nvcompCascadedFormatOpts();
}
};
/**
* Generate compression options using auto selector.
*
* @param[in] input_table Table for which to generate compression options.
*
* @returns Vector of length equal to number of columns in *input_table*, where each element
* representing the compression options for each column.
*/
std::vector<ColumnCompressionOptions> generate_auto_select_compression_options(
cudf::table_view input_table);
/**
* Generate compression options that no compression should be performed.
*
* @param[in] input_table Table for which to generate compression options.
*
* @returns Vector of length equal to number of columns in *input_table*, where each element
* representing the compression options for each column.
*/
std::vector<ColumnCompressionOptions> generate_none_compression_options(
cudf::table_view input_table);
/**
* Broadcast the compression options of a column from the root rank to all ranks.
*
* Note: This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] input_column Column which *input_options* is associated with. This argument is
* significant on all ranks.
* @param[in] input_options Compression options associated with *input_column* that needs to be
* broadcasted. This argument is only significant on the root rank.
*
* @returns Broadcasted compression options on all ranks.
*/
ColumnCompressionOptions broadcast_compression_options(cudf::column_view input_column,
ColumnCompressionOptions input_options);
/**
* Broadcast the compression options of a table from the root rank to all ranks.
*
* Note: This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] input_table Table which *input_options* is associated with. This argument is
* significant on all ranks.
* @param[in] input_options Vector of lenght equal to the number of columns in *input_table*,
* representing compression options associated with *input_table* that needs to be
* broadcasted. Each element represents the compression option of one column in *input_table*. This
* argument is only significant on the root rank.
*
* @returns Broadcasted compression options on all ranks.
*/
std::vector<ColumnCompressionOptions> broadcast_compression_options(
cudf::table_view input_table, std::vector<ColumnCompressionOptions> input_options);
/**
* Generate the same compression option on all ranks.
*
* Note: This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] input_table Table to generate compression options on. This argument is significant on
* all ranks.
* @param[in] compression Whether to use compression. If *true*, the compression options will be
* generated by auto selector on the root rank. If *false*, compression options indicating no
* compression will be generated.
*
* @returns Compression options for *input_table* on all ranks.
*/
std::vector<ColumnCompressionOptions> generate_compression_options_distributed(
cudf::table_view input_table, bool compression);
/**
* This helper function runs compression and decompression on a small buffer to avoid nvcomp's
* setup time during the actual run.
*/
void warmup_nvcomp();
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/strings_column.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "strings_column.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "error.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cstdint>
#include <vector>
void gather_string_offsets(cudf::table_view table,
std::vector<cudf::size_type> const &offsets,
std::vector<std::vector<cudf::size_type>> &string_send_offsets,
std::vector<std::vector<int64_t>> &string_recv_offsets,
CommunicationGroup comm_group,
Communicator *communicator)
{
int comm_group_size = comm_group.size();
rmm::device_vector<cudf::size_type> d_offsets(offsets);
for (cudf::size_type icol = 0; icol < table.num_columns(); icol++) {
cudf::data_type dtype = table.column(icol).type();
if (dtype.id() != cudf::type_id::STRING) {
// 1. If not a string column, push an empty vector
string_send_offsets.emplace_back();
string_recv_offsets.emplace_back();
continue;
} else {
string_send_offsets.emplace_back(comm_group_size + 1);
string_recv_offsets.emplace_back(comm_group_size + 1);
// 2. Gather `string_send_offsets` using the offset subcolumn and `d_offsets`
rmm::device_vector<cudf::size_type> d_string_send_offsets(comm_group_size + 1);
thrust::gather(rmm::exec_policy(),
d_offsets.begin(),
d_offsets.end(),
thrust::device_ptr<const cudf::size_type>(
table.column(icol).child(0).head<cudf::size_type>()),
d_string_send_offsets.begin());
CUDA_RT_CALL(cudaMemcpy(string_send_offsets[icol].data(),
thrust::raw_pointer_cast(d_string_send_offsets.data()),
(comm_group_size + 1) * sizeof(cudf::size_type),
cudaMemcpyDeviceToHost));
// 3. Communicate string_send_offsets and receive string_recv_offsets
communicate_sizes(
string_send_offsets[icol], string_recv_offsets[icol], comm_group, communicator);
}
}
}
void calculate_string_sizes_from_offsets(
cudf::table_view input_table,
cudf::size_type begin,
cudf::size_type end,
std::vector<rmm::device_uvector<cudf::size_type>> &output_sizes)
{
output_sizes.clear();
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::column_view input_column = input_table.column(icol);
if (input_column.type().id() != cudf::type_id::STRING) {
output_sizes.emplace_back(0, rmm::cuda_stream_default);
continue;
}
output_sizes.emplace_back(end - begin, rmm::cuda_stream_default);
thrust::transform(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + end + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin),
thrust::device_ptr<cudf::size_type>(output_sizes[icol].data()),
thrust::minus<cudf::size_type>());
}
}
void calculate_string_offsets_from_sizes(
cudf::mutable_table_view output_table,
std::vector<rmm::device_uvector<cudf::size_type>> const &input_sizes)
{
for (cudf::size_type icol = 0; icol < output_table.num_columns(); icol++) {
cudf::mutable_column_view output_column = output_table.column(icol);
if (output_column.type().id() != cudf::type_id::STRING) continue;
cudf::size_type nrows = output_column.size();
const cudf::size_type *sizes_start = input_sizes[icol].data();
const cudf::size_type *sizes_end = sizes_start + nrows;
thrust::inclusive_scan(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(sizes_start),
thrust::device_ptr<const cudf::size_type>(sizes_end),
thrust::device_ptr<cudf::size_type>(
static_cast<cudf::size_type *>(output_column.child(0).head())) +
1);
CUDA_RT_CALL(cudaMemsetAsync(output_column.child(0).head(), 0, sizeof(cudf::size_type), 0));
}
}
void allocate_string_sizes_receive_buffer(
cudf::table_view input_table,
std::vector<int64_t> recv_offsets,
std::vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv)
{
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
if (input_table.column(icol).type().id() != cudf::type_id::STRING) {
string_sizes_recv.emplace_back(0, rmm::cuda_stream_default);
} else {
string_sizes_recv.emplace_back(recv_offsets.back(), rmm::cuda_stream_default);
}
}
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/registered_memory_resource.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This file is adapted from "rmm/mr/device/cuda_memory_resource.hpp" */
#pragma once
#include "communicator.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <cuda_runtime.h>
#include <map>
/**
* @brief `device_memory_resource` derived class that uses cudaMalloc/Free for
* allocation/deallocation, and register through UCX at allocation time.
*/
class registered_memory_resource final : public rmm::mr::device_memory_resource {
public:
registered_memory_resource(UCXCommunicator* communicator) { this->communicator = communicator; }
~registered_memory_resource() = default;
registered_memory_resource(registered_memory_resource const&) = default;
registered_memory_resource(registered_memory_resource&&) = default;
registered_memory_resource& operator=(registered_memory_resource const&) = default;
registered_memory_resource& operator=(registered_memory_resource&&) = default;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation. `registered_memory_resource` does not support streams.
*
* @returns bool false
*/
bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
bool supports_get_mem_info() const noexcept override { return true; }
private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view) override
{
void* p{nullptr};
RMM_CUDA_TRY(cudaMalloc(&p, bytes), rmm::bad_alloc);
ucp_mem_h memory_handle;
communicator->register_buffer(p, bytes, &memory_handle);
registered_handles[p] = memory_handle;
return p;
}
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*/
void do_deallocate(void* p, std::size_t, rmm::cuda_stream_view) override
{
ucp_mem_h memory_handle = registered_handles.find(p)->second;
communicator->deregister_buffer(memory_handle);
RMM_ASSERT_CUDA_SUCCESS(cudaFree(p));
}
/**
* @brief Compare this resource to another.
*
* Two registered_memory_resource always compare equal, because they can each
* deallocate memory allocated by the other.
*
* @throws Nothing.
*
* @param other The other resource to compare to
* @return true If the two resources are equivalent
* @return false If the two resources are not equal
*/
bool do_is_equal(device_memory_resource const& other) const noexcept override
{
return dynamic_cast<registered_memory_resource const*>(&other) != nullptr;
}
/**
* @brief Get free and available memory for memory resource
*
* @throws `rmm::cuda_error` if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
std::pair<size_t, size_t> do_get_mem_info(rmm::cuda_stream_view) const override
{
std::size_t free_size;
std::size_t total_size;
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
UCXCommunicator* communicator;
std::map<void*, ucp_mem_h> registered_handles;
};
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/compression.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "compression.hpp"
#include "error.hpp"
#include <nvcomp.hpp>
#include <nvcomp/cascaded.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <mpi.h>
#include <cstdint>
#include <vector>
std::vector<ColumnCompressionOptions> generate_auto_select_compression_options(
cudf::table_view input_table)
{
std::vector<ColumnCompressionOptions> compression_options;
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::column_view input_column = input_table.column(icol);
cudf::data_type dtype = input_column.type();
if (dtype.id() == cudf::type_id::STRING) {
std::vector<ColumnCompressionOptions> children_options;
// offset subcolumn
cudf::data_type offset_dtype = input_column.child(0).type();
nvcompCascadedFormatOpts offset_cascaded_opts =
cudf::type_dispatcher(offset_dtype,
cascaded_selector_functor{},
input_column.child(0).head(),
input_column.child(0).size() * cudf::size_of(offset_dtype));
children_options.emplace_back(CompressionMethod::cascaded, offset_cascaded_opts);
// do not compress char subcolumn
children_options.emplace_back(CompressionMethod::none);
compression_options.emplace_back(
CompressionMethod::none, nvcompCascadedFormatOpts(), children_options);
} else {
nvcompCascadedFormatOpts column_cascaded_opts =
cudf::type_dispatcher(dtype,
cascaded_selector_functor{},
input_column.head(),
input_column.size() * cudf::size_of(dtype));
compression_options.emplace_back(CompressionMethod::cascaded, column_cascaded_opts);
}
}
return compression_options;
}
std::vector<ColumnCompressionOptions> generate_none_compression_options(
cudf::table_view input_table)
{
std::vector<ColumnCompressionOptions> compression_options;
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
if (input_table.column(icol).type().id() == cudf::type_id::STRING) {
std::vector<ColumnCompressionOptions> children_options;
// offset subcolumn
children_options.emplace_back(CompressionMethod::none);
// char subcolumn
children_options.emplace_back(CompressionMethod::none);
compression_options.emplace_back(
CompressionMethod::none, nvcompCascadedFormatOpts(), children_options);
} else {
compression_options.emplace_back(CompressionMethod::none);
}
}
return compression_options;
}
ColumnCompressionOptions broadcast_compression_options(cudf::column_view input_column,
ColumnCompressionOptions input_options)
{
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
cudf::data_type dtype = input_column.type();
CompressionMethod compression_method = input_options.compression_method;
nvcompCascadedFormatOpts cascaded_format = input_options.cascaded_format;
std::vector<ColumnCompressionOptions> children_compression_options;
MPI_CALL(MPI_Bcast(&compression_method, sizeof(compression_method), MPI_CHAR, 0, MPI_COMM_WORLD));
MPI_CALL(MPI_Bcast(&cascaded_format, sizeof(cascaded_format), MPI_CHAR, 0, MPI_COMM_WORLD));
if (dtype.id() == cudf::type_id::STRING) {
ColumnCompressionOptions compression_options;
if (mpi_rank == 0) {
// a string column should always contain two subcolumns
assert(input_options.children_compression_options.size() == 2);
}
for (size_t icol = 0; icol < 2; icol++) {
if (mpi_rank == 0) { compression_options = input_options.children_compression_options[icol]; }
children_compression_options.push_back(
broadcast_compression_options(input_column.child(icol), compression_options));
}
}
return ColumnCompressionOptions(
compression_method, cascaded_format, children_compression_options);
}
std::vector<ColumnCompressionOptions> broadcast_compression_options(
cudf::table_view input_table, std::vector<ColumnCompressionOptions> input_options)
{
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
std::vector<ColumnCompressionOptions> output_options;
output_options.reserve(input_table.num_columns());
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
ColumnCompressionOptions input_options_icol;
if (mpi_rank == 0) { input_options_icol = input_options[icol]; }
output_options.push_back(
broadcast_compression_options(input_table.column(icol), input_options_icol));
}
return output_options;
}
std::vector<ColumnCompressionOptions> generate_compression_options_distributed(
cudf::table_view input_table, bool compression)
{
if (!compression) { return generate_none_compression_options(input_table); }
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
std::vector<ColumnCompressionOptions> compression_options;
if (mpi_rank == 0) {
compression_options = generate_auto_select_compression_options(input_table);
}
compression_options = broadcast_compression_options(input_table, compression_options);
return compression_options;
}
void warmup_nvcomp()
{
using T = int;
constexpr size_t warmup_size = 1000;
rmm::device_buffer input_data(warmup_size * sizeof(T));
std::vector<rmm::device_buffer> compressed_data(1);
size_t compressed_size;
nvcompCascadedFormatOpts cascaded_format = {.num_RLEs = 1, .num_deltas = 1, .use_bp = 1};
compression_functor{}.operator()<T>({input_data.data()},
{warmup_size},
compressed_data,
&compressed_size,
{rmm::cuda_stream_default},
cascaded_format);
rmm::device_buffer decompressed_data(warmup_size * sizeof(T));
decompression_functor{}.operator()<T>({compressed_data[0].data()},
{static_cast<int64_t>(compressed_size)},
{decompressed_data.data()},
{warmup_size},
{rmm::cuda_stream_default});
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/distribute_table.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distribute_table.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "error.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <mpi.h>
#include <cuda_runtime.h>
#include <memory>
#include <vector>
/**
* Helper function for calculating the number of rows of a local table.
*
* This function is useful, for example, for calculating the size on root and allocating receive
* buffer on workers when distributing tables from root rank to worker ranks. This function mainly
* solves the problem when the number of ranks does not divide the number of rows in the global
* table.
*
* @param[in] global_table_size Number of rows in the global table.
* @param[in] mpi_rank Target rank for which this function will calculate the local
* table size.
* @param[in] mpi_size Total number of MPI ranks.
*
* @returns Number of rows in the local table of rank *mpi_rank*.
*/
static inline cudf::size_type get_local_table_size(cudf::size_type global_table_size,
int mpi_rank,
int mpi_size)
{
cudf::size_type local_table_size = global_table_size / mpi_size;
if (mpi_rank < global_table_size % mpi_size) { local_table_size++; }
return local_table_size;
}
/**
* Distribute a column from root rank to worker ranks.
*
* This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] global_col The global table column. Significant only on root rank.
* @param[out] local_col The local table column to be filled with data in *global_col*.
* Significant on all ranks.
* @param[in] communicator An instance of `Communicator` used for communication.
*/
static void distribute_cols(cudf::column_view global_col,
cudf::mutable_column_view local_col,
Communicator *communicator)
{
/* Get MPI information */
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
std::size_t dtype_size = cudf::size_of(local_col.type());
communicator->start();
if (mpi_rank == 0) {
// Send global_col from the root rank to each worker rank
cudf::size_type global_size = global_col.size();
for (cudf::size_type irank = 1; irank < mpi_size; irank++) {
cudf::size_type start_idx =
std::min<cudf::size_type>(irank, global_size % mpi_size) + (global_size / mpi_size) * irank;
cudf::size_type irank_size = get_local_table_size(global_size, irank, mpi_size);
void *start_addr = (void *)(global_col.head<char>() + start_idx * dtype_size);
communicator->send(start_addr, irank_size, dtype_size, irank);
}
// Fill the root rank's local_col
cudf::size_type rank0_size = get_local_table_size(global_size, 0, mpi_size);
CUDA_RT_CALL(cudaMemcpy(
local_col.head(), global_col.head(), rank0_size * dtype_size, cudaMemcpyDeviceToDevice));
} else {
// worker rank
communicator->recv(local_col.head(), local_col.size(), dtype_size, 0);
}
communicator->stop();
}
std::unique_ptr<cudf::table> distribute_table(cudf::table_view global_table,
Communicator *communicator)
{
/* Get MPI information */
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
MPI_Datatype mpi_size_type = mpi_dtype_from_c_type<cudf::size_type>();
/* Broadcast global table size */
cudf::size_type global_table_size{-1};
if (mpi_rank == 0) { global_table_size = global_table.num_rows(); }
MPI_Bcast(&global_table_size, 1, mpi_size_type, 0, MPI_COMM_WORLD);
/* Broadcast number of columns */
cudf::size_type ncols{-1};
if (mpi_rank == 0) { ncols = global_table.num_columns(); }
MPI_Bcast(&ncols, 1, mpi_size_type, 0, MPI_COMM_WORLD);
/* Broadcast column datatype */
std::vector<cudf::data_type> columns_dtype(ncols);
for (cudf::size_type icol = 0; icol < ncols; icol++) {
if (mpi_rank == 0) columns_dtype[icol] = global_table.column(icol).type();
MPI_Bcast(&columns_dtype[icol], sizeof(cudf::size_type), MPI_CHAR, 0, MPI_COMM_WORLD);
}
/* Allocate local tables across ranks */
cudf::size_type local_table_size = get_local_table_size(global_table_size, mpi_rank, mpi_size);
std::vector<std::unique_ptr<cudf::column>> local_table;
for (int icol = 0; icol < ncols; icol++) {
local_table.push_back(cudf::make_fixed_width_column(columns_dtype[icol], local_table_size));
}
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
/* Send table from the root rank to all ranks */
for (int icol = 0; icol < ncols; icol++) {
cudf::column_view global_col;
if (mpi_rank == 0) global_col = global_table.column(icol);
distribute_cols(global_col, local_table[icol]->mutable_view(), communicator);
}
return std::make_unique<cudf::table>(std::move(local_table));
}
std::unique_ptr<cudf::table> collect_tables(cudf::table_view table, Communicator *communicator)
{
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
int ncols = table.num_columns();
int nrows = table.num_rows();
/* Send the table size to the root */
std::vector<cudf::size_type> table_nrows(mpi_size, -1);
MPI_CALL(MPI_Gather(&nrows,
1,
mpi_dtype_from_c_type<cudf::size_type>(),
table_nrows.data(),
1,
mpi_dtype_from_c_type<cudf::size_type>(),
0,
MPI_COMM_WORLD));
/* Compute the scan of table_size on root */
std::vector<cudf::size_type> table_nrows_scan(mpi_size + 1, -1);
if (mpi_rank == 0) {
table_nrows_scan[0] = 0;
for (int irank = 0; irank < mpi_size; irank++) {
table_nrows_scan[irank + 1] = table_nrows_scan[irank] + table_nrows[irank];
}
}
/* Construct receive buffer on root */
std::vector<std::unique_ptr<cudf::column>> merged_columns;
if (mpi_rank == 0) {
for (int icol = 0; icol < ncols; icol++) {
merged_columns.push_back(
cudf::make_fixed_width_column(table.column(icol).type(), table_nrows_scan.back()));
}
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
}
/* Send table from each rank to root */
for (int icol = 0; icol < ncols; icol++) {
std::size_t dtype_size = cudf::size_of(table.column(icol).type());
communicator->start();
if (mpi_rank == 0) {
for (int irank = 1; irank < mpi_size; irank++) {
void *start_addr = (void *)(merged_columns[icol]->mutable_view().head<char>() +
table_nrows_scan[irank] * dtype_size);
communicator->recv(start_addr, table_nrows[irank], dtype_size, irank);
}
CUDA_RT_CALL(cudaMemcpy(merged_columns[icol]->mutable_view().head(),
table.column(icol).head(),
table_nrows[0] * dtype_size,
cudaMemcpyDeviceToDevice));
} else {
communicator->send(table.column(icol).head(), nrows, dtype_size, 0);
}
communicator->stop();
}
if (mpi_rank == 0) {
return std::make_unique<cudf::table>(std::move(merged_columns));
} else {
return std::unique_ptr<cudf::table>(nullptr);
}
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/generate_table.cuh
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../generate_dataset/generate_dataset.cuh"
#include "all_to_all_comm.hpp"
#include "compression.hpp"
#include "error.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <utility>
#include <vector>
struct generate_payload_functor {
template <typename T,
std::enable_if_t<not cudf::is_timestamp_t<T>::value and
not cudf::is_duration_t<T>::value> * = nullptr>
void operator()(T *ptr, cudf::size_type nelements)
{
thrust::sequence(thrust::device, ptr, ptr + nelements);
}
template <
typename T,
std::enable_if_t<cudf::is_timestamp_t<T>::value or cudf::is_duration_t<T>::value> * = nullptr>
void operator()(T *ptr, cudf::size_type nelements)
{
thrust::sequence(thrust::device,
reinterpret_cast<typename T::rep *>(ptr),
reinterpret_cast<typename T::rep *>(ptr) + nelements);
}
};
/**
* Generate a build table and a probe table for testing distributed join.
*
* Both the build table and the probe table have two columns. The first column is the key column,
* with datatype KEY_T. The second column is the payload column, with datatype PAYLOAD_T.
*
* @param[in] build_table_nrows The number of rows in the build table.
* @param[in] probe_table_nrows The number of rows in the probe table.
* @param[in] selectivity Propability with which an element of the probe table is present in the
* build table.
* @param[in] rand_max Maximum random number to generate, i.e., random numbers are integers from
* [0, rand_max].
* @param[in] uniq_build_tbl_keys If each key in the build table should appear exactly once.
*
* @return A pair of generated build table and probe table.
*/
template <typename KEY_T, typename PAYLOAD_T>
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>> generate_build_probe_tables(
cudf::size_type build_table_nrows,
cudf::size_type probe_table_nrows,
double selectivity,
KEY_T rand_max,
bool uniq_build_tbl_keys)
{
// Allocate device memory for the generated columns
std::vector<std::unique_ptr<cudf::column>> build;
std::vector<std::unique_ptr<cudf::column>> probe;
constexpr cudf::data_type key_type = cudf::data_type(cudf::type_to_id<KEY_T>());
constexpr cudf::data_type payload_type = cudf::data_type(cudf::type_to_id<PAYLOAD_T>());
build.push_back(cudf::make_numeric_column(key_type, build_table_nrows));
build.push_back(cudf::make_fixed_width_column(payload_type, build_table_nrows));
probe.push_back(cudf::make_numeric_column(key_type, probe_table_nrows));
probe.push_back(cudf::make_fixed_width_column(payload_type, probe_table_nrows));
// Generate build and probe table data
generate_input_tables<KEY_T, cudf::size_type>(build[0]->mutable_view().head<KEY_T>(),
build_table_nrows,
probe[0]->mutable_view().head<KEY_T>(),
probe_table_nrows,
selectivity,
rand_max,
uniq_build_tbl_keys);
generate_payload_functor{}.operator()<PAYLOAD_T>(build[1]->mutable_view().head<PAYLOAD_T>(),
build_table_nrows);
generate_payload_functor{}.operator()<PAYLOAD_T>(probe[1]->mutable_view().head<PAYLOAD_T>(),
probe_table_nrows);
CUDA_RT_CALL(cudaGetLastError());
CUDA_RT_CALL(cudaDeviceSynchronize());
// return the generated tables
auto build_table = std::make_unique<cudf::table>(std::move(build));
auto probe_table = std::make_unique<cudf::table>(std::move(probe));
return std::make_pair(std::move(build_table), std::move(probe_table));
}
template <typename data_type>
void add_constant_to_column(cudf::mutable_column_view column, data_type constant)
{
auto buffer_ptr = thrust::device_pointer_cast(column.head<data_type>());
thrust::transform(
buffer_ptr, buffer_ptr + column.size(), buffer_ptr, [=] __device__(data_type & i) {
return i + constant;
});
}
/**
* This function generates build table and probe table distributed, and it need to be called
* collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] build_table_nrows_per_rank The number of rows of build table on each rank.
* @param[in] probe_table_nrows_per_rank The number of rows of probe table on each rank.
* @param[in] selectivity The percentage of keys in the probe table present in the
* build table.
* @param[in] rand_max_per_rank The lottery size on each rank. This argument should be
* set larger than `build_table_size_per_rank`.
* @param[in] uniq_build_tbl_keys Whether the keys in the build table are unique.
* @param[in] communicator An instance of `Communicator` used for communication.
*
* Note: require build_table_size_per_rank % mpi_rank == 0 and probe_table_size_per_rank % mpi_rank
* == 0.
*
* @return A pair of generated build and probe table distributed on each rank.
*/
template <typename KEY_T, typename PAYLOAD_T>
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>> generate_tables_distributed(
cudf::size_type build_table_nrows_per_rank,
cudf::size_type probe_table_nrows_per_rank,
double selectivity,
KEY_T rand_max_per_rank,
bool uniq_build_tbl_keys,
Communicator *communicator)
{
// Algorithm used for distributed generation:
// Rank i generates build and probe table independently with keys randomly selected from range
// [i*uniq_build_tbl_keys, (i+1)*uniq_build_tbl_keys] (called pre_shuffle_table). Afterwards,
// pre_shuffle_table will be divided into N chunks with the same number of rows, and then send
// chunk j to rank j. This all-to-all communication will make each local table have keys
// uniformly from the whole range.
// Get MPI information
int mpi_rank, mpi_size;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
CommunicationGroup comm_group(mpi_size);
// Generate local build and probe table on each rank
std::unique_ptr<cudf::table> pre_shuffle_build_table;
std::unique_ptr<cudf::table> pre_shuffle_probe_table;
std::tie(pre_shuffle_build_table, pre_shuffle_probe_table) =
generate_build_probe_tables<KEY_T, PAYLOAD_T>(build_table_nrows_per_rank,
probe_table_nrows_per_rank,
selectivity,
rand_max_per_rank,
uniq_build_tbl_keys);
// Add constant to build and probe table to make sure the range is correct
add_constant_to_column<KEY_T>(pre_shuffle_build_table->mutable_view().column(0),
rand_max_per_rank * mpi_rank);
add_constant_to_column<KEY_T>(pre_shuffle_probe_table->mutable_view().column(0),
rand_max_per_rank * mpi_rank);
add_constant_to_column<PAYLOAD_T>(pre_shuffle_build_table->mutable_view().column(1),
build_table_nrows_per_rank * mpi_rank);
add_constant_to_column<PAYLOAD_T>(pre_shuffle_probe_table->mutable_view().column(1),
probe_table_nrows_per_rank * mpi_rank);
// Construct buffer offset to indicate the start indices to each rank
std::vector<cudf::size_type> build_table_offset(mpi_size + 1);
std::vector<cudf::size_type> probe_table_offset(mpi_size + 1);
for (cudf::size_type irank = 0; irank <= mpi_size; irank++) {
build_table_offset[irank] = build_table_nrows_per_rank / mpi_size * irank;
probe_table_offset[irank] = probe_table_nrows_per_rank / mpi_size * irank;
}
// Allocate memory for the result tables
std::vector<int64_t> build_table_recv_offset;
std::vector<int64_t> probe_table_recv_offset;
communicate_sizes(build_table_offset, build_table_recv_offset, comm_group, communicator);
communicate_sizes(probe_table_offset, probe_table_recv_offset, comm_group, communicator);
std::vector<std::unique_ptr<cudf::column>> build_table_columns;
for (cudf::size_type icol = 0; icol < pre_shuffle_build_table->num_columns(); icol++) {
build_table_columns.push_back(make_fixed_width_column(
pre_shuffle_build_table->view().column(icol).type(), build_table_recv_offset.back()));
}
std::unique_ptr<cudf::table> build_table =
std::make_unique<cudf::table>(std::move(build_table_columns));
std::vector<std::unique_ptr<cudf::column>> probe_table_columns;
for (cudf::size_type icol = 0; icol < pre_shuffle_probe_table->num_columns(); icol++) {
probe_table_columns.push_back(make_fixed_width_column(
pre_shuffle_probe_table->view().column(icol).type(), probe_table_recv_offset.back()));
}
std::unique_ptr<cudf::table> probe_table =
std::make_unique<cudf::table>(std::move(probe_table_columns));
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
// Set compression options to none
std::vector<ColumnCompressionOptions> build_compression_options =
generate_none_compression_options(pre_shuffle_build_table->view());
std::vector<ColumnCompressionOptions> probe_compression_options =
generate_none_compression_options(pre_shuffle_probe_table->view());
// Send each bucket to the desired target rank
if (communicator->group_by_batch()) communicator->start();
std::vector<AllToAllCommBuffer> all_to_all_comm_buffers;
append_to_all_to_all_comm_buffers(pre_shuffle_build_table->view(),
build_table->mutable_view(),
build_table_offset,
build_table_recv_offset,
all_to_all_comm_buffers,
build_compression_options);
append_to_all_to_all_comm_buffers(pre_shuffle_probe_table->view(),
probe_table->mutable_view(),
probe_table_offset,
probe_table_recv_offset,
all_to_all_comm_buffers,
probe_compression_options);
all_to_all_comm(all_to_all_comm_buffers, comm_group, communicator, true);
if (communicator->group_by_batch()) communicator->stop();
return std::make_pair(std::move(build_table), std::move(probe_table));
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/error.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdio>
#include <cstdlib>
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, \
__LINE__, \
__FILE__, \
cudaGetErrorString(cudaStatus), \
cudaStatus); \
exit(1); \
} \
}
#endif
#ifndef UCX_CALL
#define UCX_CALL(call) \
{ \
ucs_status_t status = call; \
if (UCS_OK != status) { \
fprintf(stderr, \
"\"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, \
__LINE__, \
__FILE__, \
ucs_status_string(status), \
status); \
exit(1); \
} \
}
#endif
#ifndef MPI_CALL
#define MPI_CALL(call) \
{ \
int status = call; \
if (MPI_SUCCESS != status) { \
int len; \
char estring[MPI_MAX_ERROR_STRING]; \
MPI_Error_string(status, estring, &len); \
fprintf(stderr, \
"\"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, \
__LINE__, \
__FILE__, \
estring, \
status); \
exit(1); \
} \
}
#endif
#ifndef NCCL_CALL
#define NCCL_CALL(call) \
{ \
ncclResult_t status = call; \
if (ncclSuccess != status) { \
fprintf(stderr, \
"ERROR: nccl call \"%s\" in line %d of file %s failed with %s.\n", \
#call, \
__LINE__, \
__FILE__, \
ncclGetErrorString(status)); \
exit(1); \
} \
}
#endif
#define CHECK_ERROR(rtv, expected_value, msg) \
{ \
if (rtv != expected_value) { \
fprintf(stderr, "ERROR on line %d of file %s: %s\n", __LINE__, __FILE__, msg); \
std::cerr << rtv << std::endl; \
exit(1); \
} \
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/all_to_all_comm.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "communicator.hpp"
#include "compression.hpp"
#include <nvcomp/cascaded.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <mpi.h>
#include <cstdint>
#include <type_traits>
#include <vector>
enum COMM_TAGS { placeholder_tag, exchange_size_tag };
/**
* Usage: mpi_dtype_from_c_type<input_type>() returns the MPI datatype corresponding to a native C
* type "input_type".
*
* For example, mpi_dtype_from_c_type<int>() would return MPI_INT32_T.
*/
template <typename c_type>
MPI_Datatype mpi_dtype_from_c_type()
{
MPI_Datatype mpi_dtype;
if (std::is_same<c_type, int8_t>::value)
mpi_dtype = MPI_INT8_T;
else if (std::is_same<c_type, uint8_t>::value)
mpi_dtype = MPI_UINT8_T;
else if (std::is_same<c_type, int16_t>::value)
mpi_dtype = MPI_INT16_T;
else if (std::is_same<c_type, uint16_t>::value)
mpi_dtype = MPI_UINT16_T;
else if (std::is_same<c_type, int32_t>::value)
mpi_dtype = MPI_INT32_T;
else if (std::is_same<c_type, uint32_t>::value)
mpi_dtype = MPI_UINT32_T;
else if (std::is_same<c_type, int64_t>::value)
mpi_dtype = MPI_INT64_T;
else if (std::is_same<c_type, uint64_t>::value)
mpi_dtype = MPI_UINT64_T;
else if (std::is_same<c_type, float>::value)
mpi_dtype = MPI_FLOAT;
else if (std::is_same<c_type, double>::value)
mpi_dtype = MPI_DOUBLE;
return mpi_dtype;
}
class CommunicationGroup {
public:
/**
* CommunicationGroup represents a group of ranks for all-to-all communication.
*
* The group of ranks is determined by the following two filters on MPI_COMM_WORLD:
*
* First, all ranks are partitioned into "grid" with size *grid_size*.
* {{0,1,2,...,grid_size-1},
* {grid_size,grid_size+1,grid_size+2,...,grid_size*2-1},
* ...
* }
* *grid_size* must divide `mpi_size`.
*
* Second, for each grid, ranks are sampled with spacing *stride*. For example, if *stride* is 2,
* {0,2,4,6,8} is a group, while {1,3,5,7,9} is another group. *stride* must divide *grid_size*.
*
* For example, if there are 16 ranks, with grid_size 8, and stride 2, we have the following
* groups: {0,2,4,6}, {1,3,5,7}, {8,10,12,14}, {9,11,13,15}.
*/
CommunicationGroup(int grid_size, int stride = 1) : grid_size(grid_size), stride(stride)
{
assert(grid_size % stride == 0 && "Group size should be a multiple of stride");
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
group_start = mpi_rank / grid_size * grid_size + mpi_rank % stride;
}
// Get the size of the communication group
int size() const { return grid_size / stride; }
// Get the global MPI rank in MPI_COMM_WORLD corresponding to local index in the group
int get_global_rank(int local_idx) const { return group_start + local_idx * stride; }
// Get the local index in the group corresponding to the current rank
int get_local_idx() const { return (mpi_rank - group_start) / stride; }
private:
int mpi_rank;
int group_start;
int grid_size;
int stride;
};
/**
* Communicate number of elements recieved from each rank during all-to-all communication.
*
* Note: This function needs to be called collectively by all ranks in *comm_group*.
*
* @param[in] send_offset Vector of length `comm_group_size + 1` such that `send_offset[i+1] -
* send_offset[i]` is the number of elements sent from the current rank to local rank `i` during the
* all-to-all communication.
* @param[out] recv_offset Vector of length `comm_group_size + 1` such that `recv_offset[i+1] -
* recv_offset[i]` is the number of elements received from local rank `i` during the all-to-all
* communication. The vector will be resized in this function and does not need to be preallocated.
*/
void communicate_sizes(std::vector<int64_t> const &send_offset,
std::vector<int64_t> &recv_offset,
CommunicationGroup comm_group,
Communicator *communicator);
void communicate_sizes(std::vector<cudf::size_type> const &send_offset,
std::vector<int64_t> &recv_offset,
CommunicationGroup comm_group,
Communicator *communicator);
void warmup_all_to_all(Communicator *communicator);
struct AllToAllCommBuffer {
// the buffer to be all-to-all communicated
const void *send_buffer;
// the receive buffer for all-to-all communication
void *recv_buffer;
// vector of size `comm_group_size + 1`, the start index of items in `send_buffer` to be sent to
// each rank
std::vector<int64_t> send_offsets;
// vector of size `comm_group_size + 1`, the start index of items in `recv_buffer` to receive data
// from each rank
std::vector<int64_t> recv_offsets;
// data type of each element
cudf::data_type dtype;
// the compression method used
CompressionMethod compression_method;
// cascaded compression format
nvcompCascadedFormatOpts cascaded_format;
// compressed `send_buffer` to be all-to-all communicated
rmm::device_buffer compressed_send_buffer;
// the receive buffer for the compressed data
rmm::device_buffer compressed_recv_buffer;
// vector of size `comm_group_size + 1`, the start byte in `compressed_send_buffer` to be sent to
// each rank
std::vector<int64_t> compressed_send_offsets;
// vector of size `comm_group_size + 1`, the start byte in `compressed_recv_buffer` to receive
// data from each rank
std::vector<int64_t> compressed_recv_offsets;
AllToAllCommBuffer(const void *send_buffer,
void *recv_buffer,
std::vector<int64_t> send_offsets,
std::vector<int64_t> recv_offsets,
cudf::data_type dtype,
CompressionMethod compression_method,
nvcompCascadedFormatOpts cascaded_format)
: send_buffer(send_buffer),
recv_buffer(recv_buffer),
send_offsets(send_offsets),
recv_offsets(recv_offsets),
dtype(dtype),
compression_method(compression_method),
cascaded_format(cascaded_format)
{
}
};
/**
* Generate plans for all-to-all communication.
*
* Note: This function does not perform the actual communication. It simply puts required
* information about all-to-all communication (e.g. the send buffer, the receive buffer, offsets,
* whether to use compression etc.) into `all_to_all_comm_buffers`.
*
* @param[in] input Table to be all-to-all communicated.
* @param[in] output Table after all-to-all communication. This argument needs to be preallocated.
* The helper function `allocate_communicated_table` can be used to allocate this table.
* @param[in] send_offset Vector of size `comm_group_size + 1` such that `send_offset[i]` represents
* the start index of `input` to be sent to local rank `i`.
* @param[in] recv_offset Vector of size `comm_group_size + 1` such that `recv_offset[i]` represents
* the start index of `output` to receive data from local rank `i`.
* @param[in] string_send_offsets Vector with shape `(num_columns, comm_group_size + 1)`, such
* that `string_send_offsets[j,k]` representing the start index in the char subcolumn of column
* `j` that needs to be sent to local rank `k`. The helper function `gather_string_offsets` can be
* used to generate this field.
* @param[in] string_recv_offsets Vector with shape `(num_columns, comm_group_size + 1)`, such
* that `string_recv_offsets[j,k]` representing the start index in the char subcolumn of column
* `j` that receives data from local rank `k`. The helper function `gather_string_offsets` can be
* used to generate this field.
* @param[in] string_sizes_send String sizes of each row for all string columns. The helper function
* `calculate_string_sizes_from_offsets` can be used to generate this field.
* @param[in] string_sizes_recv Receive buffers for string sizes. This argument needs to be
* preallocated. The helper function `allocate_string_sizes_receive_buffer` can be used for
* allocating the buffers.
* @param[out] all_to_all_comm_buffers Each element in this vector represents a buffer that needs to
* be all-to-all communicated.
* @param[in] compression_options Vector of length equal to the number of columns in *input*,
* indicating whether/how each column needs to be compressed before communication.
*/
void append_to_all_to_all_comm_buffers(
cudf::table_view input,
cudf::mutable_table_view output,
std::vector<cudf::size_type> const &send_offsets,
std::vector<int64_t> const &recv_offsets,
std::vector<std::vector<cudf::size_type>> const &string_send_offsets,
std::vector<std::vector<int64_t>> const &string_recv_offsets,
std::vector<rmm::device_uvector<cudf::size_type>> const &string_sizes_send,
std::vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv,
std::vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
std::vector<ColumnCompressionOptions> const &compression_options);
void append_to_all_to_all_comm_buffers(cudf::table_view input,
cudf::mutable_table_view output,
std::vector<cudf::size_type> const &send_offsets,
std::vector<int64_t> const &recv_offsets,
std::vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
std::vector<ColumnCompressionOptions> compression_options);
/**
* Perform all-to-all communication according to plans.
*
* Note: If the communicator supports grouping by batches, this call is nonblocking and should
* be enclosed by `communicator->start()` and `communicator->stop()`.
*
* This function needs to be called collectively by all ranks in *comm_group*.
*
* @param[in] all_to_all_comm_buffers Plans for all-to-all communication, generated by
* `append_to_all_to_all_comm_buffers`. Note that the send/recv offsets specified must be compatible
* with *comm_group*.
* @param[in] communicator An instance of `Communicator` used for communication.
* @param[in] include_current_rank If true, this function will send the partition destined to the
* current rank.
* @param[in] preallocated_pinned_buffer Preallocated page-locked host buffer with size at least
* `comm_group_size * sizeof(size_t)`, used for holding the compressed sizes.
*/
void all_to_all_comm(std::vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
CommunicationGroup comm_group,
Communicator *communicator,
bool include_current_rank = true,
bool report_timing = false,
void *preallocated_pinned_buffer = nullptr);
/**
* Actions to be performed after all-to-all communication is finished.
*
* Note: The arguments of this function need to match those of `all_to_all_comm`.
*/
void postprocess_all_to_all_comm(std::vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
CommunicationGroup comm_group,
Communicator *communicator,
bool include_current_rank = true,
bool report_timing = false);
/**
* High-level interface for all-to-all communicating a cuDF table.
*/
class AllToAllCommunicator {
// Note: General stategy for the string columns during all-to-all communication
// Each string column in cuDF consists of two subcolumns: a char subcolumn and an offset
// subcolumn. For the char subcolumn, we need to first gather the offsets in this string
// subcolumn of all ranks by using `gather_string_offsets`, and then it can be all-to-all
// communicated using the gathered offsets. For the offset subcolumn, we can first calculate the
// sizes of all rows by calculating the adjacent differences. Then, the sizes are all-to-all
// communicated. Once the all-to-all communication finishes, on target rank we can reconstruct
// the offset subcolumn by using a scan on sizes.
public:
/**
* Note: This custructor needs to be called collectively for all ranks in *comm_group*.
*
* @param[in] input_table Table to be all-to-all communicated.
* @param[in] offsets Vector of length `comm_group_size + 1`, indexed into *input_table*,
* representing the start/end row index to send to each rank.
* @param[in] compression_options Vector of length equal to the number of columns, indicating
* whether/how each column needs to be compressed before communication.
* @param[in] explicit_copy_to_current_rank If true, rows destined to the current rank are copied
* using explicit device-to-device memory copy instead of going through communicator.
*/
AllToAllCommunicator(cudf::table_view input_table,
std::vector<cudf::size_type> offsets,
CommunicationGroup comm_group,
Communicator *communicator,
std::vector<ColumnCompressionOptions> compression_options,
bool explicit_copy_to_current_rank = false);
/**
* This variant of *AllToAllCommunicator* uses a communication group with all ranks and
* stride 1.
*/
AllToAllCommunicator(cudf::table_view input_table,
std::vector<cudf::size_type> offsets,
Communicator *communicator,
std::vector<ColumnCompressionOptions> compression_options,
bool explicit_copy_to_current_rank = false);
AllToAllCommunicator(const AllToAllCommunicator &) = delete;
AllToAllCommunicator &operator=(const AllToAllCommunicator &) = delete;
AllToAllCommunicator(AllToAllCommunicator &&) = default;
/**
* Allocate the tables after all-to-all communication.
*
* Note: This function uses the default stream for allocation and is synchronous to the host
* thread.
*
* @return Allocated table.
*/
std::unique_ptr<cudf::table> allocate_communicated_table();
/**
* Launch the all-to-all communication.
*
* Note: This function needs to be called collectively by all ranks in *comm_group*.
* Note: This function will block the host thread until the communication is completed.
*
* @param[in] communicated_table Preallocated table for receiving incoming data.
* @param[in] preallocated_pinned_buffer Preallocated page-locked host buffer with size at least
* `comm_group_size * sizeof(size_t)`, used for holding the compressed sizes.
*/
void launch_communication(cudf::mutable_table_view communicated_table,
bool report_timing = false,
void *preallocated_pinned_buffer = nullptr);
private:
cudf::table_view input_table;
CommunicationGroup comm_group;
Communicator *communicator;
bool explicit_copy_to_current_rank;
std::vector<cudf::size_type> send_offsets;
// Start row index in the communicated table to receive data from each rank.
std::vector<int64_t> recv_offsets;
// `string_send_offsets[j, k]` represents the start index into char subcolumn to be sent to local
// rank `k` for column `j`. If column `j` is not a string column, `string_send_offsets[j]` will be
// an empty vector. Otherwise, `string_send_offsets[j]` will be a vector of length
// `comm_group_size + 1`.
std::vector<std::vector<cudf::size_type>> string_send_offsets;
// `string_recv_offsets[j, k]` represents the start index into char subcolumn
// to receive data from local rank `k` for column `j`. If column `j` is not a string column,
// `string_recv_offsets[j]` will be an empty vector. Otherwise, `string_recv_offsets[j]`
// will be a vector of length `comm_group_size + 1`.
std::vector<std::vector<int64_t>> string_recv_offsets;
std::vector<rmm::device_uvector<cudf::size_type>> string_sizes_to_send;
std::vector<rmm::device_uvector<cudf::size_type>> string_sizes_received;
std::vector<ColumnCompressionOptions> compression_options;
};
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/setup.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "communicator.hpp"
#include "registered_memory_resource.hpp"
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cstdint>
#include <string>
void set_cuda_device();
/**
* Setup RMM memory pool and communicator.
*
* This function will set the current device's memory pool. The memory pool and communicator
* initialized in this function can be destroyed by *destroy_memory_pool_and_communicator*.
*
* @param[out]: communicator Communicator to be constructed.
* @param[out]: registered_mr If the memory pool needs to be preregistered, this argument holds
* pointer to the registered memory resource. If not preregistered, this argument will be *nullptr*.
* @param[out]: pool_mr RMM memory resource for memory pool.
* @param[in]: communicator_name Can be either "NCCL" or "UCX".
* @param[in]: registration_method If using UCX communicator, this argument can be either "none",
* "buffer" or "preregistered".
* @param[in]: communicator_buffer_size If the registration_method is set to "buffer", this argument
* controls the size of the communication buffer used by the communicator.
*/
void setup_memory_pool_and_communicator(
Communicator *&communicator,
registered_memory_resource *®istered_mr,
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *&pool_mr,
std::string communicator_name,
std::string registration_method,
int64_t communicator_buffer_size);
void destroy_memory_pool_and_communicator(
Communicator *communicator,
registered_memory_resource *registered_mr,
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *pool_mr,
std::string communicator_name,
std::string registration_method);
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/distributed_join.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distributed_join.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "compression.hpp"
#include "error.hpp"
#include "shuffle_on.hpp"
#include <cudf/column/column.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/join.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <cuda_runtime.h>
#include <atomic>
#include <chrono>
#include <cmath>
#include <functional>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
using cudf::column;
using cudf::table;
using std::vector;
using std::chrono::duration_cast;
using std::chrono::high_resolution_clock;
using std::chrono::milliseconds;
/**
* Helper function for getting the number of partitions in NVLink communication stage.
*
* This function calculates the largest integer that divides *mpi_size* and is not larger than
* *nvlink_domain_size*.
*/
static int get_nvl_partition_size(int mpi_size, int nvlink_domain_size)
{
if (nvlink_domain_size >= mpi_size) return mpi_size;
for (int size = ceil(sqrt(mpi_size)); size > 0; size--) {
if (mpi_size % size == 0 && size <= nvlink_domain_size) { return size; }
}
return 1;
}
static std::unique_ptr<table> local_join_helper(cudf::table_view left,
cudf::table_view right,
vector<cudf::size_type> const &left_on,
vector<cudf::size_type> const &right_on)
{
if (left.num_rows() && right.num_rows()) {
// Perform local join only when both left and right tables are not empty.
// If either is empty, cuDF's inner join will return the other table, which is not desired.
return cudf::inner_join(left, right, left_on, right_on);
}
return std::make_unique<table>();
}
/**
* Local join thread used for merging incoming partitions and performing local joins.
*
* @param[in] communicated_left Left table after all-to-all communication.
* @param[in] communicated_right Right table after all-to-all communication.
* @param[out] batch_join_results Inner join result of each batch.
* @param[in] left_on Column indices from the left table to join on. This argument will be
* passed directly to *cudf::inner_join*.
* @param[in] right_on Column indices from the right table to join on. This argument will be
* passed directly to *cudf::inner_join*.
* @param[in] flags *flags[i]* is true if and only if the ith batch has finished the all-to-all
* communication.
* @param[in] report_timing Whether to print the local join time.
* @param[in] mr RMM memory resource.
*/
static void inner_join_func(vector<std::unique_ptr<table>> &communicated_left,
vector<std::unique_ptr<table>> &communicated_right,
vector<std::unique_ptr<table>> &batch_join_results,
vector<cudf::size_type> const &left_on,
vector<cudf::size_type> const &right_on,
vector<std::atomic<bool>> const &flags,
Communicator *communicator,
bool report_timing,
rmm::mr::device_memory_resource *mr)
{
CUDA_RT_CALL(cudaSetDevice(communicator->current_device));
rmm::mr::set_current_device_resource(mr);
std::chrono::time_point<high_resolution_clock> start_time;
std::chrono::time_point<high_resolution_clock> stop_time;
for (size_t ibatch = 0; ibatch < flags.size(); ibatch++) {
// busy waiting for all-to-all communication of ibatch to finish
while (!flags[ibatch]) { ; }
if (report_timing) { start_time = high_resolution_clock::now(); }
batch_join_results[ibatch] = local_join_helper(
communicated_left[ibatch]->view(), communicated_right[ibatch]->view(), left_on, right_on);
if (report_timing) {
stop_time = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop_time - start_time);
std::cout << "Rank " << communicator->mpi_rank << ": Local join on batch " << ibatch
<< " takes " << duration.count() << "ms" << std::endl;
}
}
}
std::unique_ptr<table> distributed_inner_join(
cudf::table_view left,
cudf::table_view right,
vector<cudf::size_type> const &left_on,
vector<cudf::size_type> const &right_on,
Communicator *communicator,
vector<ColumnCompressionOptions> left_compression_options,
vector<ColumnCompressionOptions> right_compression_options,
int over_decom_factor,
bool report_timing,
void *preallocated_pinned_buffer,
int nvlink_domain_size)
{
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
std::chrono::time_point<high_resolution_clock> start_time;
std::chrono::time_point<high_resolution_clock> stop_time;
int nvlink_partition_size = get_nvl_partition_size(mpi_size, nvlink_domain_size);
/* Shuffle in Infiniband domain */
std::unique_ptr<table> shuffled_left_ib;
std::unique_ptr<table> shuffled_right_ib;
if (nvlink_partition_size != mpi_size) {
constexpr uint32_t hash_partition_seed_ib = 87654321;
shuffled_left_ib = shuffle_on(left,
left_on,
CommunicationGroup(mpi_size, nvlink_partition_size),
communicator,
left_compression_options,
cudf::hash_id::HASH_MURMUR3,
hash_partition_seed_ib,
report_timing,
preallocated_pinned_buffer);
shuffled_right_ib = shuffle_on(right,
right_on,
CommunicationGroup(mpi_size, nvlink_partition_size),
communicator,
right_compression_options,
cudf::hash_id::HASH_MURMUR3,
hash_partition_seed_ib,
report_timing,
preallocated_pinned_buffer);
left = shuffled_left_ib->view();
right = shuffled_right_ib->view();
}
if (nvlink_partition_size == 1) {
if (report_timing) { start_time = high_resolution_clock::now(); }
auto join_result = local_join_helper(left, right, left_on, right_on);
if (report_timing) {
stop_time = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop_time - start_time);
std::cout << "Rank " << mpi_rank << ": Hash partition takes " << duration.count() << "ms"
<< std::endl;
}
return join_result;
}
/* Hash partition */
if (report_timing) { start_time = high_resolution_clock::now(); }
std::unique_ptr<table> hashed_left;
vector<cudf::size_type> left_offset;
std::unique_ptr<table> hashed_right;
vector<cudf::size_type> right_offset;
constexpr uint32_t hash_partition_seed = 12345678;
std::tie(hashed_left, left_offset) =
cudf::hash_partition(left,
left_on,
nvlink_partition_size * over_decom_factor,
cudf::hash_id::HASH_MURMUR3,
hash_partition_seed);
std::tie(hashed_right, right_offset) =
cudf::hash_partition(right,
right_on,
nvlink_partition_size * over_decom_factor,
cudf::hash_id::HASH_MURMUR3,
hash_partition_seed);
CUDA_RT_CALL(cudaStreamSynchronize(0));
shuffled_left_ib.reset();
shuffled_right_ib.reset();
left_offset.push_back(left.num_rows());
right_offset.push_back(right.num_rows());
if (report_timing) {
stop_time = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop_time - start_time);
std::cout << "Rank " << mpi_rank << ": Hash partition takes " << duration.count() << "ms"
<< std::endl;
}
/* Construct AllToAllCommunicator */
std::vector<AllToAllCommunicator> all_to_all_communicator_left;
std::vector<AllToAllCommunicator> all_to_all_communicator_right;
for (int ibatch = 0; ibatch < over_decom_factor; ibatch++) {
int start_idx = ibatch * nvlink_partition_size;
int end_idx = (ibatch + 1) * nvlink_partition_size + 1;
all_to_all_communicator_left.emplace_back(
hashed_left->view(),
vector<cudf::size_type>(&left_offset[start_idx], &left_offset[end_idx]),
CommunicationGroup(nvlink_partition_size, 1),
communicator,
generate_none_compression_options(hashed_left->view()),
true);
all_to_all_communicator_right.emplace_back(
hashed_right->view(),
vector<cudf::size_type>(&right_offset[start_idx], &right_offset[end_idx]),
CommunicationGroup(nvlink_partition_size, 1),
communicator,
generate_none_compression_options(hashed_right->view()),
true);
}
/* Allocate storage for the table after all-to-all communication */
vector<std::unique_ptr<table>> communicated_left;
vector<std::unique_ptr<table>> communicated_right;
for (int ibatch = 0; ibatch < over_decom_factor; ibatch++) {
communicated_left.push_back(all_to_all_communicator_left[ibatch].allocate_communicated_table());
communicated_right.push_back(
all_to_all_communicator_right[ibatch].allocate_communicated_table());
}
// *flags* indicates whether each batch has finished communication
// *flags* uses std::atomic because unsynchronized access to an object which is modified in one
// thread and read in another is undefined behavior.
vector<std::atomic<bool>> flags(over_decom_factor);
vector<std::unique_ptr<table>> batch_join_results(over_decom_factor);
for (auto &flag : flags) { flag = false; }
/* Launch inner join thread */
std::thread inner_join_thread(inner_join_func,
std::ref(communicated_left),
std::ref(communicated_right),
std::ref(batch_join_results),
left_on,
right_on,
std::ref(flags),
communicator,
report_timing,
rmm::mr::get_current_device_resource());
/* Use the current thread for all-to-all communication */
for (int ibatch = 0; ibatch < over_decom_factor; ibatch++) {
if (report_timing) { start_time = high_resolution_clock::now(); }
all_to_all_communicator_left[ibatch].launch_communication(
communicated_left[ibatch]->mutable_view(), report_timing, preallocated_pinned_buffer);
all_to_all_communicator_right[ibatch].launch_communication(
communicated_right[ibatch]->mutable_view(), report_timing, preallocated_pinned_buffer);
// mark the communication of ibatch as finished.
// the join thread is safe to start performing local join on ibatch
flags[ibatch] = true;
if (report_timing) {
stop_time = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop_time - start_time);
std::cout << "Rank " << mpi_rank << ": All-to-all communication on batch " << ibatch
<< " takes " << duration.count() << "ms" << std::endl;
}
}
// hashed left and right tables should not be needed now
hashed_left.reset();
hashed_right.reset();
// wait for all join batches to finish
inner_join_thread.join();
/* Merge join results from different batches into a single table */
vector<cudf::table_view> batch_join_results_view;
for (auto &table_ptr : batch_join_results) {
batch_join_results_view.push_back(table_ptr->view());
}
return cudf::concatenate(batch_join_results_view);
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/shuffle_on.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "compression.hpp"
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <memory>
#include <vector>
/**
* Shuffle the table according to the hash values.
*
* Note: This function needs to be called collectively by all ranks in MPI_COMM_WORLD.
*
* @param[in] input Input table to be shuffled.
* @param[in] on_columns Columns used when computing the hash value of each row.
* @param[in] compression_options Vector of length equal to the number of columns in *input*,
* indicating whether/how each column needs to be compressed before communication.
* @param[in] hash_function Hash function used for computing the hash value of each row.
* @param[in] hash_seed Hash seed used with *hash_function*.
* @param[in] preallocated_pinned_buffer Preallocated page-locked host buffer with size at least
* `mpi_size * sizeof(size_t)`, used for holding the compressed sizes.
*/
std::unique_ptr<cudf::table> shuffle_on(cudf::table_view const& input,
std::vector<cudf::size_type> const& on_columns,
CommunicationGroup comm_group,
Communicator* communicator,
std::vector<ColumnCompressionOptions> compression_options,
cudf::hash_id hash_function = cudf::hash_id::HASH_MURMUR3,
uint32_t hash_seed = cudf::DEFAULT_HASH_SEED,
bool report_timing = false,
void* preallocated_pinned_buffer = nullptr);
/**
* This variant of *shuffle_on* uses a communication group with all ranks and stride 1.
*/
std::unique_ptr<cudf::table> shuffle_on(cudf::table_view const& input,
std::vector<cudf::size_type> const& on_columns,
Communicator* communicator,
std::vector<ColumnCompressionOptions> compression_options,
cudf::hash_id hash_function = cudf::hash_id::HASH_MURMUR3,
uint32_t hash_seed = cudf::DEFAULT_HASH_SEED,
bool report_timing = false,
void* preallocated_pinned_buffer = nullptr);
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/all_to_all_comm.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "compression.hpp"
#include "error.hpp"
#include "strings_column.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <mpi.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstring>
#include <iostream>
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
using cudf::column;
using cudf::table;
using std::vector;
void communicate_sizes(std::vector<int64_t> const &send_offset,
std::vector<int64_t> &recv_offset,
CommunicationGroup comm_group,
Communicator *communicator)
{
int comm_group_size = comm_group.size();
vector<int64_t> send_count(comm_group_size, -1);
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
send_count[local_idx] = send_offset[local_idx + 1] - send_offset[local_idx];
}
vector<int64_t> recv_count(comm_group_size, -1);
// Note: MPI is used for communicating the sizes instead of *Communicator* because
// *Communicator* is not guaranteed to work with host buffers.
vector<MPI_Request> send_req(comm_group_size);
vector<MPI_Request> recv_req(comm_group_size);
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
MPI_CALL(MPI_Isend(&send_count[local_idx],
1,
MPI_INT64_T,
comm_group.get_global_rank(local_idx),
exchange_size_tag,
MPI_COMM_WORLD,
&send_req[local_idx]));
}
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
MPI_CALL(MPI_Irecv(&recv_count[local_idx],
1,
MPI_INT64_T,
comm_group.get_global_rank(local_idx),
exchange_size_tag,
MPI_COMM_WORLD,
&recv_req[local_idx]));
}
MPI_CALL(MPI_Waitall(comm_group_size, send_req.data(), MPI_STATUSES_IGNORE));
MPI_CALL(MPI_Waitall(comm_group_size, recv_req.data(), MPI_STATUSES_IGNORE));
recv_offset.resize(comm_group_size + 1, -1);
recv_offset[0] = 0;
std::partial_sum(recv_count.begin(), recv_count.end(), recv_offset.begin() + 1);
}
void communicate_sizes(std::vector<cudf::size_type> const &send_offset,
std::vector<int64_t> &recv_offset,
CommunicationGroup comm_group,
Communicator *communicator)
{
communicate_sizes(std::vector<int64_t>(send_offset.begin(), send_offset.end()),
recv_offset,
comm_group,
communicator);
}
/**
* Send data from the current rank to other ranks in *comm_group* according to offset.
*
* Note: This call should be enclosed by communicator->start() and communicator->stop().
*
* @param[in] data The starting address of data to be sent in device buffer.
* @param[in] offset Vector of length `comm_group_size + 1`. Items in *data* with
* indicies from offset[i] to offset[i+1] will be sent to local index i in the communication group.
* @param[in] item_size The size of each item.
* @param[in] communicator An instance of 'Communicator' used for communication.
* @param[in] self_send Whether sending data to itself. If this argument is false, items
* in *data* destined for the current rank will not be copied.
*/
static void send_data_by_offset(const void *data,
std::vector<int64_t> const &offset,
size_t item_size,
CommunicationGroup comm_group,
Communicator *communicator,
bool self_send = true)
{
int mpi_rank = communicator->mpi_rank;
for (int local_idx = 0; local_idx < comm_group.size(); local_idx++) {
int target_rank = comm_group.get_global_rank(local_idx);
if (!self_send && target_rank == mpi_rank) continue;
// calculate the number of elements to send
int64_t count = offset[local_idx + 1] - offset[local_idx];
// calculate the starting address
const void *start_addr =
static_cast<const void *>(static_cast<const char *>(data) + offset[local_idx] * item_size);
// send buffer to the target rank
communicator->send(start_addr, count, item_size, target_rank);
}
}
/**
* Receive data sent by 'send_data_by_offset'.
*
* Note: This call should be enclosed by communicator->start() and communicator->stop().
*
* @param[out] data Items received from all ranks in the communication group will be placed
* contiguously in *data*. This argument needs to be preallocated.
* @param[in] offset The items received from local rank `i` will be stored at the start of
* `data[offset[i]]`.
* @param[in] item_size The size of each item.
* @param[in] communicator An instance of 'Communicator' used for communication.
* @param[in] self_recv Whether recving data from itself. If this argument is false, items in
* *data* from the current rank will not be received.
*/
static void recv_data_by_offset(void *data,
std::vector<int64_t> const &offset,
size_t item_size,
CommunicationGroup comm_group,
Communicator *communicator,
bool self_recv = true)
{
int mpi_rank = communicator->mpi_rank;
for (int local_idx = 0; local_idx < comm_group.size(); local_idx++) {
int source_rank = comm_group.get_global_rank(local_idx);
if (!self_recv && mpi_rank == source_rank) continue;
// calculate the number of elements to receive
int64_t count = offset[local_idx + 1] - offset[local_idx];
// calculate the starting address
void *start_addr =
static_cast<void *>(static_cast<char *>(data) + offset[local_idx] * item_size);
communicator->recv(start_addr, count, item_size, source_rank);
}
}
void warmup_all_to_all(Communicator *communicator)
{
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
int64_t size = 10'000'000LL;
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource();
/* Allocate send/recv buffers */
std::vector<void *> send_buffer(mpi_size, nullptr);
std::vector<void *> recv_buffer(mpi_size, nullptr);
for (int irank = 0; irank < mpi_size; irank++) {
if (irank == mpi_rank) continue;
send_buffer[irank] = mr->allocate(size / mpi_size, rmm::cuda_stream_default);
recv_buffer[irank] = mr->allocate(size / mpi_size, rmm::cuda_stream_default);
}
CUDA_RT_CALL(cudaStreamSynchronize(0));
/* Communication */
communicator->start();
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) communicator->send(send_buffer[irank], size / mpi_size, 1, irank);
}
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) communicator->recv(recv_buffer[irank], size / mpi_size, 1, irank);
}
communicator->stop();
/* Deallocate send/recv buffers */
for (int irank = 0; irank < mpi_rank; irank++) {
mr->deallocate(send_buffer[irank], size / mpi_size, rmm::cuda_stream_default);
mr->deallocate(recv_buffer[irank], size / mpi_size, rmm::cuda_stream_default);
}
CUDA_RT_CALL(cudaStreamSynchronize(0));
}
void append_to_all_to_all_comm_buffers(
cudf::table_view input,
cudf::mutable_table_view output,
vector<cudf::size_type> const &send_offsets,
vector<int64_t> const &recv_offsets,
vector<vector<cudf::size_type>> const &string_send_offsets,
vector<vector<int64_t>> const &string_recv_offsets,
vector<rmm::device_uvector<cudf::size_type>> const &string_sizes_send,
vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv,
vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
vector<ColumnCompressionOptions> const &compression_options)
{
for (cudf::size_type icol = 0; icol < input.num_columns(); icol++) {
cudf::data_type dtype = input.column(icol).type();
assert(dtype == output.column(icol).type());
if (dtype.id() != cudf::type_id::STRING) {
// This is a fixed-width column
all_to_all_comm_buffers.emplace_back(
input.column(icol).head(),
output.column(icol).head(),
vector<int64_t>(send_offsets.begin(), send_offsets.end()),
recv_offsets,
dtype,
compression_options[icol].compression_method,
compression_options[icol].cascaded_format);
} else {
// This is a string column
all_to_all_comm_buffers.emplace_back(
string_sizes_send[icol].data(),
string_sizes_recv[icol].data(),
vector<int64_t>(send_offsets.begin(), send_offsets.end()),
recv_offsets,
input.column(icol).child(0).type(),
compression_options[icol].children_compression_options[0].compression_method,
compression_options[icol].children_compression_options[0].cascaded_format);
all_to_all_comm_buffers.emplace_back(
input.column(icol).child(1).head(),
output.column(icol).child(1).head(),
vector<int64_t>(string_send_offsets[icol].begin(), string_send_offsets[icol].end()),
string_recv_offsets[icol],
input.column(icol).child(1).type(),
compression_options[icol].children_compression_options[1].compression_method,
compression_options[icol].children_compression_options[1].cascaded_format);
}
}
}
void append_to_all_to_all_comm_buffers(cudf::table_view input,
cudf::mutable_table_view output,
vector<cudf::size_type> const &send_offsets,
vector<int64_t> const &recv_offsets,
vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
vector<ColumnCompressionOptions> compression_options)
{
// Without string columns, `string_sizes_recv` is not needed. This is only a placeholder passed to
// `append_to_all_to_all_comm_buffers`.
vector<rmm::device_uvector<cudf::size_type>> string_sizes_recv;
append_to_all_to_all_comm_buffers(input,
output,
send_offsets,
recv_offsets,
vector<vector<cudf::size_type>>(),
vector<vector<int64_t>>(),
vector<rmm::device_uvector<cudf::size_type>>(),
string_sizes_recv,
all_to_all_comm_buffers,
compression_options);
}
void all_to_all_comm(vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
CommunicationGroup comm_group,
Communicator *communicator,
bool include_current_rank,
bool report_timing,
void *preallocated_pinned_buffer)
{
int mpi_rank = communicator->mpi_rank;
int comm_group_size = comm_group.size();
double start_time = 0.0;
double stop_time = 0.0;
double total_compression_time = 0.0;
double total_uncompressed_size = 0.0;
double total_compressed_size = 0.0;
size_t *compressed_buffer_sizes_pinned = static_cast<size_t *>(preallocated_pinned_buffer);
bool alloc_compressed_buffer_sizes_pinned = false;
vector<rmm::cuda_stream> compression_streams(comm_group_size);
vector<rmm::cuda_stream_view> compression_stream_views;
compression_stream_views.reserve(comm_group_size);
for (const auto &compression_stream : compression_streams) {
compression_stream_views.push_back(compression_stream.view());
}
for (auto &buffer : all_to_all_comm_buffers) {
if (buffer.compression_method == CompressionMethod::none) {
if (!communicator->group_by_batch()) communicator->start();
send_data_by_offset(buffer.send_buffer,
buffer.send_offsets,
cudf::size_of(buffer.dtype),
comm_group,
communicator,
include_current_rank);
recv_data_by_offset(buffer.recv_buffer,
buffer.recv_offsets,
cudf::size_of(buffer.dtype),
comm_group,
communicator,
include_current_rank);
if (!communicator->group_by_batch()) communicator->stop();
continue;
}
// If the code reaches here, the buffer will be compressed before communication
assert(buffer.compression_method == CompressionMethod::cascaded);
// General strategy of all-to-all with compression:
// The all-to-all interface works on a single buffer with offsets. Since we don't know the
// compressed size without actually doing the compression, we cannot pre-allocate this buffer
// beforehand. Instead we compress each partition in the send buffer separately. Once the
// compression is done, we can allocate the compressed send buffer and copy the compressed
// data into the buffer. Then, all-to-all communication can reuse helper functions
// `send_data_by_offset` and `recv_data_by_offset` functions.
if (report_timing) { start_time = MPI_Wtime(); }
if (compressed_buffer_sizes_pinned == nullptr) {
CUDA_RT_CALL(
cudaMallocHost(&compressed_buffer_sizes_pinned, comm_group_size * sizeof(size_t)));
alloc_compressed_buffer_sizes_pinned = true;
}
// Compress each partition in the send buffer separately and store the result in
// `compressed_buffers`
vector<const void *> uncompressed_data(comm_group_size);
vector<cudf::size_type> uncompressed_counts(comm_group_size);
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
if (!include_current_rank && comm_group.get_global_rank(local_idx) == mpi_rank) {
uncompressed_data[local_idx] = nullptr;
uncompressed_counts[local_idx] = 0;
continue;
}
uncompressed_data[local_idx] = static_cast<const int8_t *>(buffer.send_buffer) +
buffer.send_offsets[local_idx] * cudf::size_of(buffer.dtype);
uncompressed_counts[local_idx] =
buffer.send_offsets[local_idx + 1] - buffer.send_offsets[local_idx];
}
vector<rmm::device_buffer> compressed_buffers;
vector<size_t> compressed_buffer_sizes(comm_group_size);
cudf::type_dispatcher(buffer.dtype,
compression_functor{},
uncompressed_data,
uncompressed_counts,
compressed_buffers,
compressed_buffer_sizes_pinned,
compression_stream_views,
buffer.cascaded_format);
for (auto &stream : compression_streams) stream.synchronize();
memcpy(compressed_buffer_sizes.data(),
compressed_buffer_sizes_pinned,
comm_group_size * sizeof(size_t));
// Calculate and communicate offsets for the compressed buffers
buffer.compressed_send_offsets.resize(comm_group_size + 1);
buffer.compressed_send_offsets[0] = 0;
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
buffer.compressed_send_offsets[local_idx + 1] =
buffer.compressed_send_offsets[local_idx] + compressed_buffer_sizes[local_idx];
}
if (report_timing) {
stop_time = MPI_Wtime();
total_compression_time += (stop_time - start_time);
total_uncompressed_size +=
((buffer.send_offsets.back() - buffer.send_offsets[0]) * cudf::size_of(buffer.dtype));
total_compressed_size += buffer.compressed_send_offsets.back();
}
communicate_sizes(
buffer.compressed_send_offsets, buffer.compressed_recv_offsets, comm_group, communicator);
// Merge compressed data of all partitions in `compressed_buffers` into a single buffer
buffer.compressed_send_buffer.resize(buffer.compressed_send_offsets.back());
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
if (!include_current_rank && comm_group.get_global_rank(local_idx) == mpi_rank) continue;
CUDA_RT_CALL(cudaMemcpy(static_cast<int8_t *>(buffer.compressed_send_buffer.data()) +
buffer.compressed_send_offsets[local_idx],
compressed_buffers[local_idx].data(),
compressed_buffer_sizes[local_idx],
cudaMemcpyDeviceToDevice));
}
compressed_buffers.clear();
// Allocate receive buffer and launch all-to-all communication on the compressed buffer
buffer.compressed_recv_buffer.resize(buffer.compressed_recv_offsets.back());
CUDA_RT_CALL(cudaStreamSynchronize(0));
if (!communicator->group_by_batch()) communicator->start();
send_data_by_offset(buffer.compressed_send_buffer.data(),
buffer.compressed_send_offsets,
1,
comm_group,
communicator,
include_current_rank);
recv_data_by_offset(buffer.compressed_recv_buffer.data(),
buffer.compressed_recv_offsets,
1,
comm_group,
communicator,
include_current_rank);
if (!communicator->group_by_batch()) communicator->stop();
}
if (alloc_compressed_buffer_sizes_pinned) {
CUDA_RT_CALL(cudaFreeHost(compressed_buffer_sizes_pinned));
}
if (total_uncompressed_size && report_timing) {
std::cout << "Rank " << mpi_rank << ": compression takes " << total_compression_time * 1e3
<< "ms"
<< " with compression ratio " << total_uncompressed_size / total_compressed_size
<< " and throughput " << total_uncompressed_size / total_compression_time / 1e9
<< "GB/s" << std::endl;
}
}
void postprocess_all_to_all_comm(vector<AllToAllCommBuffer> &all_to_all_comm_buffers,
CommunicationGroup comm_group,
Communicator *communicator,
bool include_current_rank,
bool report_timing)
{
int mpi_rank = communicator->mpi_rank;
int comm_group_size = comm_group.size();
double start_time = 0.0;
double stop_time = 0.0;
double total_uncompressed_size = 0.0;
if (report_timing) { start_time = MPI_Wtime(); }
vector<rmm::cuda_stream> decompression_streams(comm_group_size);
vector<rmm::cuda_stream_view> decompression_stream_views;
decompression_stream_views.reserve(comm_group_size);
for (const auto &decompression_stream : decompression_streams) {
decompression_stream_views.push_back(decompression_stream.view());
}
// Decompress compressed data into destination buffer
for (auto &buffer : all_to_all_comm_buffers) {
if (buffer.compression_method == CompressionMethod::none) continue;
vector<const void *> compressed_data(comm_group_size);
vector<int64_t> compressed_sizes(comm_group_size);
vector<void *> outputs(comm_group_size);
vector<int64_t> expected_output_counts(comm_group_size);
for (int local_idx = 0; local_idx < comm_group_size; local_idx++) {
if (!include_current_rank && comm_group.get_global_rank(local_idx) == mpi_rank) {
compressed_sizes[local_idx] = 0;
expected_output_counts[local_idx] = 0;
continue;
}
compressed_data[local_idx] = static_cast<int8_t *>(buffer.compressed_recv_buffer.data()) +
buffer.compressed_recv_offsets[local_idx];
compressed_sizes[local_idx] =
buffer.compressed_recv_offsets[local_idx + 1] - buffer.compressed_recv_offsets[local_idx];
outputs[local_idx] = static_cast<int8_t *>(buffer.recv_buffer) +
buffer.recv_offsets[local_idx] * cudf::size_of(buffer.dtype);
expected_output_counts[local_idx] =
buffer.recv_offsets[local_idx + 1] - buffer.recv_offsets[local_idx];
}
cudf::type_dispatcher(buffer.dtype,
decompression_functor{},
compressed_data,
compressed_sizes,
outputs,
expected_output_counts,
decompression_stream_views);
for (auto &stream : decompression_streams) stream.synchronize();
if (report_timing)
total_uncompressed_size += (buffer.recv_offsets.back() * cudf::size_of(buffer.dtype));
}
if (total_uncompressed_size && report_timing) {
stop_time = MPI_Wtime();
double duration = stop_time - start_time;
std::cout << "Rank " << mpi_rank << ": decompression takes " << duration * 1e3 << "ms"
<< " with throughput " << total_uncompressed_size / duration / 1e9 << "GB/s"
<< std::endl;
}
}
/**
* Allocate the table after all-to-all communication.
*
* @param[in] input_table Table that needs to be all-to-all communicated.
* @param[in] recv_offsets Vector of size `comm_group_size + 1`, indicating the start row index in
* *input_table* to receive from each rank in the communication group.
* @param[in] string_recv_offsets Vector with shape `(num_columns, comm_group_size + 1)`. The output
* of `gather_string_offsets`.
*
* @return Allocated table after all-to-all communication.
*/
static std::unique_ptr<table> allocate_communicated_table_helper(
cudf::table_view input_table,
vector<int64_t> const &recv_offsets,
vector<vector<int64_t>> const &string_recv_offsets)
{
vector<std::unique_ptr<column>> communicated_columns;
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::column_view input_column = input_table.column(icol);
cudf::data_type dtype = input_column.type();
if (dtype.id() == cudf::type_id::STRING) {
std::unique_ptr<column> chars_column = cudf::strings::detail::create_chars_child_column(
recv_offsets.back(), 0, string_recv_offsets[icol].back());
std::unique_ptr<column> offset_column =
cudf::make_numeric_column(input_column.child(0).type(), recv_offsets.back() + 1);
communicated_columns.push_back(cudf::make_strings_column(
recv_offsets.back(), std::move(offset_column), std::move(chars_column), 0, {}));
} else {
communicated_columns.push_back(cudf::make_fixed_width_column(dtype, recv_offsets.back()));
}
}
return std::make_unique<table>(std::move(communicated_columns));
}
/**
* Explicitly copy the part of the input table destined to the current rank during all-to-all
* communication to the communicated table.
*
* This function can be used together with `all_to_all_comm` with `include_current_rank = false` for
* a complete all-to-all communication.
*
* @param[in] input_table Table to be all-to-all communicated.
* @param[in] communicated_table Table after all-to-all communication.
* @param[in] send_offset Vector of size `comm_group_size + 1` indicating the start row index of
* `input_table` to be sent to each rank.
* @param[in] recv_offset Vector of size `comm_group_size + 1` indicating the start row index of
* `communicated_table` to receive data from each rank.
* @param[in] string_send_offsets Vector with shape `(num_columns, comm_group_size + 1)`, such that
* `string_send_offsets[j,k]` representing the start index in the char subcolumn of column `j` that
* needs to be sent to local rank `k`.
* @param[in] string_recv_offsets Vector with shape `(num_columns, comm_group_size + 1)`, such that
* `string_recv_offsets[j,k]` representing the start index in the char subcolumn of column `j` that
* receives data from local rank `k`.
* @param[in] string_sizes_send String sizes of each row for all string columns.
* @param[in] string_sizes_recv Receive buffers for string sizes. This argument needs to be
* preallocated.
*/
static void copy_table_to_current_rank(
cudf::table_view input_table,
cudf::mutable_table_view communicated_table,
vector<cudf::size_type> const &send_offsets,
vector<int64_t> const &recv_offsets,
vector<vector<cudf::size_type>> const &string_send_offsets,
vector<vector<int64_t>> const &string_recv_offsets,
vector<rmm::device_uvector<cudf::size_type>> const &string_sizes_send,
vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv,
CommunicationGroup comm_group,
Communicator *communicator)
{
int local_idx = comm_group.get_local_idx();
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::data_type dtype = input_table.column(icol).type();
if (dtype.id() != cudf::type_id::STRING) {
// This is a fixed-width column
cudf::size_type dtype_size = cudf::size_of(dtype);
CUDA_RT_CALL(cudaMemcpy(
static_cast<void *>(communicated_table.column(icol).head<char>() +
recv_offsets[local_idx] * dtype_size),
static_cast<const void *>(input_table.column(icol).head<char>() +
static_cast<int64_t>(send_offsets[local_idx]) * dtype_size),
(recv_offsets[local_idx + 1] - recv_offsets[local_idx]) * dtype_size,
cudaMemcpyDeviceToDevice));
} else {
// This is a string column
CUDA_RT_CALL(cudaMemcpy(
string_sizes_recv[icol].data() + recv_offsets[local_idx],
string_sizes_send[icol].data() + send_offsets[local_idx],
(recv_offsets[local_idx + 1] - recv_offsets[local_idx]) * sizeof(cudf::size_type),
cudaMemcpyDeviceToDevice));
CUDA_RT_CALL(cudaMemcpy(
communicated_table.column(icol).child(1).head<char>() +
string_recv_offsets[icol][local_idx],
input_table.column(icol).child(1).head<char>() + string_send_offsets[icol][local_idx],
string_send_offsets[icol][local_idx + 1] - string_send_offsets[icol][local_idx],
cudaMemcpyDeviceToDevice));
}
}
}
AllToAllCommunicator::AllToAllCommunicator(
cudf::table_view input_table,
std::vector<cudf::size_type> offsets,
CommunicationGroup comm_group,
Communicator *communicator,
std::vector<ColumnCompressionOptions> compression_options,
bool explicit_copy_to_current_rank)
: input_table(input_table),
comm_group(comm_group),
communicator(communicator),
explicit_copy_to_current_rank(explicit_copy_to_current_rank),
send_offsets(offsets),
compression_options(compression_options)
{
/* Communicate number of rows */
communicate_sizes(send_offsets, recv_offsets, comm_group, communicator);
/* Communicate the number of bytes of string columns */
gather_string_offsets(
input_table, send_offsets, string_send_offsets, string_recv_offsets, comm_group, communicator);
/* Calculate the number of bytes from string offsets */
calculate_string_sizes_from_offsets(
input_table, offsets.front(), offsets.back(), string_sizes_to_send);
allocate_string_sizes_receive_buffer(input_table, recv_offsets, string_sizes_received);
}
AllToAllCommunicator::AllToAllCommunicator(
cudf::table_view input_table,
std::vector<cudf::size_type> offsets,
Communicator *communicator,
std::vector<ColumnCompressionOptions> compression_options,
bool explicit_copy_to_current_rank)
: AllToAllCommunicator::AllToAllCommunicator(input_table,
offsets,
CommunicationGroup(communicator->mpi_size, 1),
communicator,
compression_options,
explicit_copy_to_current_rank)
{
}
std::unique_ptr<cudf::table> AllToAllCommunicator::allocate_communicated_table()
{
std::unique_ptr<cudf::table> communicated_table =
allocate_communicated_table_helper(input_table, recv_offsets, string_recv_offsets);
// Synchronization on the default stream is necessary here because subsequently the communicator
// can use a different stream to receive data into allocated tables
CUDA_RT_CALL(cudaStreamSynchronize(0));
if (explicit_copy_to_current_rank) {
// The device-to-device memory copies are performed explicitly here before all-to-all
// communication and local join, because if they are part of the communication, they could block
// the host thread (even if they are launched on different streams) while the local join kernel
// is running, limiting the efficacy of overlapping.
copy_table_to_current_rank(input_table,
communicated_table->mutable_view(),
send_offsets,
recv_offsets,
string_send_offsets,
string_recv_offsets,
string_sizes_to_send,
string_sizes_received,
comm_group,
communicator);
}
return communicated_table;
}
void AllToAllCommunicator::launch_communication(cudf::mutable_table_view communicated_table,
bool report_timing,
void *preallocated_pinned_buffer)
{
vector<AllToAllCommBuffer> all_to_all_comm_buffers;
append_to_all_to_all_comm_buffers(input_table,
communicated_table,
send_offsets,
recv_offsets,
string_send_offsets,
string_recv_offsets,
string_sizes_to_send,
string_sizes_received,
all_to_all_comm_buffers,
compression_options);
if (communicator->group_by_batch()) communicator->start();
all_to_all_comm(all_to_all_comm_buffers,
comm_group,
communicator,
!explicit_copy_to_current_rank,
report_timing,
preallocated_pinned_buffer);
if (communicator->group_by_batch()) communicator->stop();
postprocess_all_to_all_comm(all_to_all_comm_buffers,
comm_group,
communicator,
!explicit_copy_to_current_rank,
report_timing);
calculate_string_offsets_from_sizes(communicated_table, string_sizes_received);
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/strings_column.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/device_uvector.hpp>
#include <cstdint>
#include <vector>
/**
* Calculate and communicate the number of bytes sent/received during all-to-all communication for
* all string columns.
*
* Note: This function needs to be called collectively by all ranks in `MPI_COMM_WORLD`.
*
* @param[in] table Table that needs to be all-to-all communicated.
* @param[in] offsets Vector of size `mpi_size + 1` indexed into `table`, indicating the start row
* index to be sent to each rank.
* @param[out] string_send_offsets Vector with shape `(num_columns, mpi_size + 1)`,
* such that `string_send_offsets[j,k]` representing the start index in the char subcolumn of
* column `j` that needs to be sent to rank `k`.
* @param[out] string_recv_offsets Vector with shape `(num_columns, mpi_size + 1)`,
* such that `string_recv_offsets[j,k]` representing the start index in the char subcolumn of
* column `j` that receives data from rank `k`.
*/
void gather_string_offsets(cudf::table_view table,
std::vector<cudf::size_type> const &offsets,
std::vector<std::vector<cudf::size_type>> &string_send_offsets,
std::vector<std::vector<int64_t>> &string_recv_offsets,
CommunicationGroup comm_group,
Communicator *communicator);
/**
* Calculate the string size of each row.
*
* Note: This function is the reverse of `calculate_string_offsets_from_sizes`.
*
* @param[in] input_table Table for which the string sizes are calculated.
* @param[in] start Start row index.
* @param[in] end End row index. Strings with row index [start, end) will be calcualted.
* @param[out] output_sizes Vector of size `num_columns`, where `output_sizes[j]` is a device vector
* of size `end - start`, storing the string size of each row in column `j`.
*/
void calculate_string_sizes_from_offsets(
cudf::table_view input_table,
cudf::size_type start,
cudf::size_type end,
std::vector<rmm::device_uvector<cudf::size_type>> &output_sizes);
/**
* Calculate string offsets from sizes.
*
* Note: This function is the reverse of `calculate_string_sizes_from_offsets`.
*
* @param[out] output_table Calculated offsets will be stored in the string columns of
* `output_table`.
* @param[in] input_sizes Vector of size `num_columns`, where `input_sizes[j]` stores the string
* size of each row in column `j`.
*/
void calculate_string_offsets_from_sizes(
cudf::mutable_table_view output_table,
std::vector<rmm::device_uvector<cudf::size_type>> const &input_sizes);
/**
* Helper function for allocating the receive buffer of string sizes.
*/
void allocate_string_sizes_receive_buffer(
cudf::table_view input_table,
std::vector<int64_t> recv_offsets,
std::vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv);
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/src/communicator.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "communicator.hpp"
#include "error.hpp"
#include <rmm/mr/device/per_device_resource.hpp>
#include <mpi.h>
#include <ucp/api/ucp.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <stdexcept>
void Communicator::initialize()
{
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
CUDA_RT_CALL(cudaGetDevice(¤t_device));
}
void MPILikeCommunicator::initialize() { Communicator::initialize(); }
void MPILikeCommunicator::start() { pending_requests.clear(); }
void MPILikeCommunicator::stop() { waitall(pending_requests); }
void MPILikeCommunicator::send(const void *buf, int64_t count, int element_size, int dest)
{
pending_requests.push_back(send(buf, count, element_size, dest, reserved_tag));
}
void MPILikeCommunicator::recv(void *buf, int64_t count, int element_size, int source)
{
pending_requests.push_back(recv(buf, count, element_size, source, reserved_tag));
}
void UCXCommunicator::initialize_ucx()
{
ucp_params_t ucp_params;
ucp_config_t *ucp_config;
ucp_worker_params_t ucp_worker_params;
memset(&ucp_params, 0, sizeof(ucp_params));
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_ESTIMATED_NUM_EPS;
ucp_params.features = UCP_FEATURE_TAG;
ucp_params.estimated_num_eps = mpi_size;
UCX_CALL(ucp_config_read(NULL, NULL, &ucp_config));
UCX_CALL(ucp_init(&ucp_params, ucp_config, &ucp_context));
ucp_config_release(ucp_config);
memset(&ucp_worker_params, 0, sizeof(ucp_worker_params));
ucp_worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
ucp_worker_params.thread_mode = UCS_THREAD_MODE_SINGLE; // only the master thread can access UCX
UCX_CALL(ucp_worker_create(ucp_context, &ucp_worker_params, &ucp_worker));
UCX_CALL(ucp_worker_get_address(ucp_worker, &ucp_worker_address, &ucp_worker_address_len));
}
void UCXCommunicator::create_endpoints()
{
/* Broadcast worker addresses to all ranks */
void *ucp_worker_address_book = malloc(ucp_worker_address_len * mpi_size);
MPI_CALL(MPI_Allgather(ucp_worker_address,
ucp_worker_address_len,
MPI_CHAR,
ucp_worker_address_book,
ucp_worker_address_len,
MPI_CHAR,
MPI_COMM_WORLD));
/* Create endpoints on all ranks */
std::vector<ucp_ep_params_t> ucp_ep_params;
ucp_endpoints.resize(mpi_size);
ucp_ep_params.resize(mpi_size);
for (int irank = 0; irank < mpi_size; irank++) {
memset(&ucp_ep_params[irank], 0, sizeof(ucp_ep_params));
ucp_ep_params[irank].field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ucp_ep_params[irank].address =
(ucp_address_t *)((char *)ucp_worker_address_book + irank * ucp_worker_address_len);
UCX_CALL(ucp_ep_create(ucp_worker, &ucp_ep_params[irank], &ucp_endpoints[irank]));
}
free(ucp_worker_address_book);
}
void UCXCommunicator::initialize()
{
MPILikeCommunicator::initialize();
initialize_ucx();
create_endpoints();
}
void empty_callback_func() {}
comm_handle_t UCXCommunicator::send(
const void *buf, int64_t count, int element_size, int dest, int tag)
{
// TODO: the design of the communication tag should be in line with the design of
// UCXBufferCommunicator
ucs_status_ptr_t req = ucp_tag_send_nb(ucp_endpoints[dest],
buf,
count,
ucp_dt_make_contig(element_size),
tag * mpi_size + mpi_rank,
(ucp_send_callback_t)empty_callback_func);
CHECK_ERROR(UCS_PTR_IS_ERR(req), false, "ucp_tag_send_nb");
if (UCS_PTR_STATUS(req) == UCS_OK) {
// already locally completed
return nullptr;
} else {
return req;
}
}
comm_handle_t UCXCommunicator::recv(void *buf, int64_t count, int element_size, int source, int tag)
{
// TODO: the design of the communication tag should be in line with the design of
// UCXBufferCommunicator
ucs_status_ptr_t req = ucp_tag_recv_nb(ucp_worker,
buf,
count,
ucp_dt_make_contig(element_size),
tag * mpi_size + source,
(ucp_tag_t)-1,
(ucp_tag_recv_callback_t)empty_callback_func);
CHECK_ERROR(UCS_PTR_IS_ERR(req), false, "ucp_tag_msg_recv_nb");
return req;
}
comm_handle_t UCXCommunicator::recv(
void **buf, int64_t *count, int element_size, int source, int tag)
{
ucp_tag_message_h ucp_tag_message;
ucp_tag_recv_info_t ucp_probe_info;
/* Probe the size of the incoming message */
while (true) {
ucp_tag_message =
ucp_tag_probe_nb(ucp_worker, tag * mpi_size + source, (ucp_tag_t)-1, 1, &ucp_probe_info);
if (ucp_tag_message != NULL) {
// the message has already arrived
break;
}
ucp_worker_progress(ucp_worker);
}
/* Allocate receive buffer */
*buf = rmm::mr::get_current_device_resource()->allocate(ucp_probe_info.length, cudaStreamDefault);
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
/* Received data */
ucs_status_ptr_t req = ucp_tag_msg_recv_nb(ucp_worker,
*buf,
ucp_probe_info.length / element_size,
ucp_dt_make_contig(element_size),
ucp_tag_message,
(ucp_tag_recv_callback_t)empty_callback_func);
CHECK_ERROR(UCS_PTR_IS_ERR(req), false, "ucp_tag_msg_recv_nb");
if (count != nullptr) *count = ucp_probe_info.length / element_size;
return req;
}
void UCXCommunicator::register_buffer(void *buf, size_t size, ucp_mem_h *memory_handle)
{
ucp_mem_map_params_t mem_map_params;
memset(&mem_map_params, 0, sizeof(ucp_mem_map_params_t));
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH;
mem_map_params.address = buf;
mem_map_params.length = size;
UCX_CALL(ucp_mem_map(ucp_context, &mem_map_params, memory_handle));
}
void UCXCommunicator::deregister_buffer(ucp_mem_h memory_handle)
{
UCX_CALL(ucp_mem_unmap(ucp_context, memory_handle));
}
void UCXCommunicator::wait(comm_handle_t request)
{
ucs_status_t ucx_status;
if (request == nullptr) return;
while (true) {
ucx_status = ucp_request_check_status(request);
if (ucx_status == UCS_INPROGRESS) {
ucp_worker_progress(ucp_worker);
continue;
}
UCX_CALL(ucx_status);
break;
}
if (request != nullptr) ucp_request_free(request);
}
void UCXCommunicator::waitall(std::vector<comm_handle_t> requests)
{
UCXCommunicator::waitall(requests.begin(), requests.end());
}
void UCXCommunicator::waitall(std::vector<comm_handle_t>::const_iterator begin,
std::vector<comm_handle_t>::const_iterator end)
{
ucs_status_t ucx_status;
while (true) {
bool all_finished = true;
for (auto it = begin; it != end; it++) {
auto &request = *it;
if (request == nullptr) continue;
ucx_status = ucp_request_check_status(request);
if (ucx_status == UCS_INPROGRESS) {
all_finished = false;
break;
}
UCX_CALL(ucx_status);
}
if (all_finished) break;
ucp_worker_progress(ucp_worker);
}
for (auto it = begin; it != end; it++) {
auto &request = *it;
if (request != nullptr) ucp_request_free(request);
}
}
void UCXCommunicator::finalize()
{
std::vector<comm_handle_t> close_nb_reqs(mpi_size, nullptr);
for (int irank = 0; irank < mpi_size; irank++) {
ucs_status_ptr_t ucs_status_ptr =
ucp_ep_close_nb(ucp_endpoints[irank], UCP_EP_CLOSE_MODE_FLUSH);
CHECK_ERROR(UCS_PTR_IS_ERR(ucs_status_ptr), false, "ucp_ep_close_nb");
if (UCS_PTR_STATUS(ucs_status_ptr) != UCS_OK) close_nb_reqs[irank] = ucs_status_ptr;
}
UCXCommunicator::waitall(close_nb_reqs);
// Barrier is necessary here because we do not want to destroy any worker before all ranks have
// closed the endpoints.
MPI_Barrier(MPI_COMM_WORLD);
ucp_worker_release_address(ucp_worker, ucp_worker_address);
ucp_worker_destroy(ucp_worker);
ucp_cleanup(ucp_context);
}
static void request_init(void *request)
{
UCXBufferCommunicator::CommInfo *info = (UCXBufferCommunicator::CommInfo *)request;
info->completed = false;
info->comm = nullptr;
info->orig_info = nullptr;
info->custom_allocated = false;
}
void UCXBufferCommunicator::initialize_ucx()
{
// Note: This initialization is different from UCXCommunicator on requesting reserved space in
// the communication handle.
ucp_params_t ucp_params;
ucp_config_t *ucp_config;
ucp_worker_params_t ucp_worker_params;
assert(sizeof(SendInfo) == sizeof(RecvInfo));
memset(&ucp_params, 0, sizeof(ucp_params));
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_ESTIMATED_NUM_EPS |
UCP_PARAM_FIELD_REQUEST_INIT | UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params.features = UCP_FEATURE_TAG;
ucp_params.estimated_num_eps = mpi_size;
ucp_params.request_size = sizeof(SendInfo);
ucp_params.request_init = request_init;
UCX_CALL(ucp_config_read(NULL, NULL, &ucp_config));
UCX_CALL(ucp_init(&ucp_params, ucp_config, &ucp_context));
ucp_config_release(ucp_config);
memset(&ucp_worker_params, 0, sizeof(ucp_worker_params));
ucp_worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
ucp_worker_params.thread_mode = UCS_THREAD_MODE_SINGLE; // only the master thread can access UCX
UCX_CALL(ucp_worker_create(ucp_context, &ucp_worker_params, &ucp_worker));
UCX_CALL(ucp_worker_get_address(ucp_worker, &ucp_worker_address, &ucp_worker_address_len));
}
void UCXBufferCommunicator::initialize()
{
UCXCommunicator::initialize();
if (mpi_size > 65536) {
throw std::runtime_error("Ranks > 65536 is not supported due to tag limitation");
}
/* Create priority stream for copying between user buffer and comm buffer. Useful for overlapping.
*/
int least_priority;
int greatest_priority;
CUDA_RT_CALL(cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
CUDA_RT_CALL(
cudaStreamCreateWithPriority(©_stream, cudaStreamNonBlocking, greatest_priority));
}
void UCXBufferCommunicator::setup_cache(int64_t ncaches, int64_t buffer_size)
{
comm_buffer_size = buffer_size;
cache_start_addr =
rmm::mr::get_current_device_resource()->allocate(comm_buffer_size * ncaches, cudaStreamDefault);
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
register_buffer(cache_start_addr, comm_buffer_size * ncaches, &cache_mem_handle);
for (int icache = 0; icache < ncaches; icache++) {
void *current_buffer = (void *)((char *)cache_start_addr + icache * buffer_size);
buffer_cache.push(current_buffer);
}
}
/**
* Get the communication tag passed to UCX from user defined tag and source rank.
*
* This function is necessary because UCX receive API does not specify a source.
* Therefore, the current implementation uses tag matching to differentiate messages
* coming from different ranks.
*
* @param[in] user_tag User-specified tag
* @param[in] source_rank Rank number of the sender of this message
*
* @returns Communication tag passed to UCX
*/
uint64_t get_comm_tag(int user_tag, int source_rank)
{
uint64_t comm_tag = 0LLU;
// user_tag occupies the most significant 32 bits of comm_tag
comm_tag |= ((uint64_t)user_tag << 32);
// source rank occupies the least significant 32 bits of comm_tag
comm_tag |= (uint64_t)source_rank;
return comm_tag;
}
static void send_handler(void *request, ucs_status_t status)
{
UCXBufferCommunicator::SendInfo *info = (UCXBufferCommunicator::SendInfo *)request;
int element_size = info->element_size;
int ibatch = info->ibatch;
const int64_t nelements_per_batch = info->comm->comm_buffer_size / element_size;
int64_t nelements_remaining = *(info->count) - nelements_per_batch * ibatch;
request = nullptr;
while (nelements_remaining > 0) {
int64_t nelements_current_batch =
(nelements_remaining < nelements_per_batch ? nelements_remaining : nelements_per_batch);
int64_t nelements_sent = ibatch * nelements_per_batch;
void *start_addr = (void *)((char *)info->send_buffer + nelements_sent * element_size);
/* Copy data from user buffer to the communication buffer */
CUDA_RT_CALL(cudaMemcpyAsync(info->comm_buffer,
start_addr,
nelements_current_batch * element_size,
cudaMemcpyDeviceToDevice,
info->comm->copy_stream));
CUDA_RT_CALL(cudaStreamSynchronize(info->comm->copy_stream));
/* Construct communication tag */
uint64_t comm_tag = get_comm_tag(info->user_tag, info->comm->mpi_rank);
/* Send the communication buffer to the remote rank */
request = ucp_tag_send_nb(info->comm->ucp_endpoints[info->dest],
info->comm_buffer,
nelements_current_batch,
ucp_dt_make_contig(element_size),
comm_tag,
send_handler);
CHECK_ERROR(UCS_PTR_IS_ERR(request), false, "ucp_tag_send_nb");
if (UCS_PTR_STATUS(request) != UCS_OK) {
// Send is not complete for now. Subsequent batches are handled by continuation.
break;
}
request = nullptr;
ibatch++;
nelements_remaining -= nelements_current_batch;
}
if (request != nullptr) {
// Copy info from the handle of last batch to the handle of the current batch
memcpy(request, info, sizeof(UCXBufferCommunicator::SendInfo));
((UCXBufferCommunicator::SendInfo *)request)->ibatch = ibatch + 1;
((UCXBufferCommunicator::SendInfo *)request)->custom_allocated = false;
} else {
info->orig_info->completed = true;
}
// Free the request handle if it is internal (not returned to user)
if ((void *)info != (void *)(info->orig_info)) {
// This handle is internal and no longer needed. Free it.
info->completed = false;
info->comm = nullptr;
info->orig_info = nullptr;
info->custom_allocated = false;
ucp_request_free(info);
}
}
comm_handle_t UCXBufferCommunicator::send(
const void *buf, int64_t count, int element_size, int dest, int tag)
{
// Get the communication tag for sending the number of elements (count)
uint64_t comm_tag = get_comm_tag(tag, mpi_rank);
// Since send operation is fully async to the user, we need to keep the count buffer alive
int64_t *count_buf = (int64_t *)malloc(sizeof(int64_t)); // TODO: never freed?
*count_buf = count;
// Send the buffer size. This is needed because the receive side may not have information on how
// large the buffer is.
comm_handle_t request = ucp_tag_send_nb(
ucp_endpoints[dest], count_buf, 1, ucp_dt_make_contig(sizeof(int64_t)), comm_tag, send_handler);
CHECK_ERROR(UCS_PTR_IS_ERR(request), false, "ucp_tag_send_nb");
if (UCS_PTR_STATUS(request) == UCS_OK) {
// Sending buffer size is completed locally. Allocate request handle manually.
request = malloc(sizeof(SendInfo));
((SendInfo *)request)->custom_allocated = true;
((SendInfo *)request)->completed = false;
}
// Get the communication buffer
if (buffer_cache.empty()) {
// TODO: A better way to implement this would print a warning and fallback to normal send.
throw std::runtime_error("No buffered cache available");
}
void *comm_buffer = buffer_cache.front();
buffer_cache.pop();
// Fill in information about this send in the request handle so that the callback can launch
// subsequent batches.
SendInfo *info = (SendInfo *)request;
info->types = SEND;
info->send_buffer = buf;
info->comm_buffer = comm_buffer;
info->count = count_buf;
info->element_size = element_size;
info->dest = dest;
info->user_tag = tag;
info->ibatch = 0;
info->comm = this;
info->orig_info = (UCXBufferCommunicator::CommInfo *)info;
if (info->custom_allocated) {
// Launch callback manually
send_handler(request, UCS_OK);
}
return request;
}
static void recv_handler(void *request, ucs_status_t status, ucp_tag_recv_info_t *recv_info)
{
UCXBufferCommunicator::RecvInfo *info = (UCXBufferCommunicator::RecvInfo *)request;
if (info->orig_info == nullptr) {
// If the code enters here, it means the callback has been called by UCX but the necessary
// information in the request handle hasn't been filled yet. We will mark 'orig_info' here, the
// receive will fill the information and this callback will be manually called again.
info->orig_info = (UCXBufferCommunicator::CommInfo *)0x1;
return;
}
int element_size = info->element_size;
const int64_t nelements_per_batch = info->comm->comm_buffer_size / element_size;
/* Allocate receive buffer if not available */
if (*(info->recv_buffer) == nullptr && *(info->count) > 0) {
assert(info->ibatch == 0);
*(info->recv_buffer) = rmm::mr::get_current_device_resource()->allocate(
*(info->count) * element_size, cudaStreamDefault);
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
}
/* Copy data from communication buffer to user buffer for the finished batch */
if (info->ibatch > 0) {
// Calculate the start address of the user buffer of the finished batch
int last_batch = info->ibatch - 1;
int64_t nelement_copied = nelements_per_batch * last_batch;
int64_t nelements_uncopied = *(info->count) - nelement_copied;
int64_t nelements_copy_batch =
(nelements_uncopied < nelements_per_batch ? nelements_uncopied : nelements_per_batch);
void *start_addr = (void *)((char *)(*(info->recv_buffer)) + nelement_copied * element_size);
// Copy data from comm buffer to user buffer
CUDA_RT_CALL(cudaMemcpyAsync(start_addr,
info->comm_buffer,
nelements_copy_batch * element_size,
cudaMemcpyDeviceToDevice,
info->comm->copy_stream));
CUDA_RT_CALL(cudaStreamSynchronize(info->comm->copy_stream));
}
/* Recv data from remote rank for the next batch */
int64_t nelement_recved = nelements_per_batch * info->ibatch;
int64_t nelements_remaining = *(info->count) - nelement_recved;
int64_t nelements_current_batch =
(nelements_remaining < nelements_per_batch ? nelements_remaining : nelements_per_batch);
if (nelements_current_batch > 0) {
uint64_t comm_tag = get_comm_tag(info->user_tag, info->source);
request = ucp_tag_recv_nb(info->comm->ucp_worker,
info->comm_buffer,
nelements_current_batch,
ucp_dt_make_contig(element_size),
comm_tag,
(ucp_tag_t)-1,
recv_handler);
CHECK_ERROR(UCS_PTR_IS_ERR(request), false, "ucp_tag_recv_nb");
// It is possible the callback is called inside 'ucp_tag_recv_nb' but the necessary information
// hasn't been filled in the request handle. In that case, this function will manually call the
// callback again after filling the request handle.
bool callback_called = (((UCXBufferCommunicator::RecvInfo *)request)->orig_info != nullptr);
// Fill the request handle of the next batch with the same info of this batch but add 1 to
// info->ibatch
(info->ibatch)++;
memcpy(request, info, sizeof(UCXBufferCommunicator::RecvInfo));
if (callback_called) {
// Call the callback manually
recv_handler(request, UCS_OK, nullptr);
}
} else {
info->orig_info->completed = true;
}
// Free the request handle if it is internal (not returned to user)
if ((void *)info != (void *)(info->orig_info)) {
info->completed = false;
info->comm = nullptr;
info->orig_info = nullptr;
info->custom_allocated = false;
ucp_request_free(info);
}
}
comm_handle_t UCXBufferCommunicator::recv_helper(
void **buf, int64_t *count, int element_size, int source, int tag)
{
// Allocate the receive buffer for receiving message size
int64_t *recved_count;
if (count == nullptr)
recved_count = (int64_t *)malloc(sizeof(int64_t)); // TODO: memory leak, never freed?
else
recved_count = count;
// Construct tag for receiving the number of elements
uint64_t comm_tag = get_comm_tag(tag, source);
// Request to receive the message size
ucs_status_ptr_t request = ucp_tag_recv_nb(ucp_worker,
recved_count,
1,
ucp_dt_make_contig(sizeof(int64_t)),
comm_tag,
(ucp_tag_t)-1,
recv_handler);
CHECK_ERROR(UCS_PTR_IS_ERR(request), false, "ucp_tag_msg_recv_nb");
// Get the communication buffer
if (buffer_cache.empty()) {
// TODO: A better way to implement this would print a warning and fallback to normal send.
throw std::runtime_error("No buffered cache available");
}
void *comm_buffer = buffer_cache.front();
buffer_cache.pop();
// Fill information inside communication handle so that the callbacks can use this to receive
// subsequent batches
RecvInfo *info = (RecvInfo *)request;
// Mark callback_called as true if the message size is received inside ucp_tag_recv_nb
bool callback_called = (info->orig_info != nullptr);
info->types = RECV;
if (*buf == nullptr) {
info->recv_buffer = buf;
} else {
info->recv_buffer = (void **)malloc(sizeof(void *)); // TODO: never freed, memory leak.
*(info->recv_buffer) = *buf;
}
info->comm_buffer = comm_buffer;
info->custom_allocated = false;
info->count = recved_count;
info->element_size = element_size;
info->source = source;
info->user_tag = tag;
info->ibatch = 0;
info->comm = this;
info->orig_info = (UCXBufferCommunicator::CommInfo *)info;
if (callback_called) {
// Manually launch callback again if the callback is called inside ucp_tag_recv_nb
recv_handler(info, UCS_OK, nullptr);
}
return request;
}
comm_handle_t UCXBufferCommunicator::recv(
void *buf, int64_t count, int element_size, int source, int tag)
{
return recv_helper(&buf, nullptr, element_size, source, tag);
}
comm_handle_t UCXBufferCommunicator::recv(
void **buf, int64_t *count, int element_size, int source, int tag)
{
// Set *buf to nullptr so that it's allocated inside callback
*buf = nullptr;
return recv_helper(buf, count, element_size, source, tag);
}
void UCXBufferCommunicator::wait(comm_handle_t request)
{
CommInfo *info = (CommInfo *)request;
if (info == nullptr) return;
// Use busy polling for waiting for completion
while (info->completed == false) { ucp_worker_progress(ucp_worker); }
// Put the comm buffer back to the buffer queue
buffer_cache.push(info->comm_buffer);
// Free the request handle
info->completed = false;
info->comm = nullptr;
info->orig_info = nullptr;
info->comm_buffer = nullptr;
if (info->custom_allocated)
free(request);
else
ucp_request_free(request);
}
void UCXBufferCommunicator::waitall(std::vector<comm_handle_t> requests)
{
waitall(requests.begin(), requests.end());
}
void UCXBufferCommunicator::waitall(std::vector<comm_handle_t>::const_iterator begin,
std::vector<comm_handle_t>::const_iterator end)
{
// Use busy polling for waiting for all request handles
while (true) {
bool all_finished = true;
for (auto it = begin; it != end; it++) {
CommInfo *request = (CommInfo *)*it;
if (request != nullptr && request->completed == false) {
all_finished = false;
break;
}
}
if (all_finished) break;
ucp_worker_progress(ucp_worker);
}
// Put the comm buffers back to the buffer queue and free all request handles
for (auto it = begin; it != end; it++) {
CommInfo *request = (CommInfo *)*it;
if (request != nullptr) {
buffer_cache.push(request->comm_buffer);
request->completed = false;
request->comm = nullptr;
request->orig_info = nullptr;
request->comm_buffer = nullptr;
if (request->custom_allocated)
free(request);
else
ucp_request_free(request);
}
}
}
void UCXBufferCommunicator::finalize()
{
deregister_buffer(cache_mem_handle);
rmm::mr::get_current_device_resource()->deallocate(
cache_start_addr, comm_buffer_size * buffer_cache.size(), cudaStreamDefault);
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
UCXCommunicator::finalize();
}
UCXCommunicator *initialize_ucx_communicator(bool use_buffer_communicator,
int num_comm_buffers,
int64_t comm_buffer_size)
{
if (use_buffer_communicator) {
UCXBufferCommunicator *communicator = new UCXBufferCommunicator();
communicator->initialize();
communicator->setup_cache(num_comm_buffers, comm_buffer_size);
return communicator;
} else {
UCXCommunicator *communicator = new UCXCommunicator();
communicator->initialize();
return communicator;
}
}
void NCCLCommunicator::initialize()
{
Communicator::initialize();
ncclUniqueId nccl_id;
if (mpi_rank == 0) NCCL_CALL(ncclGetUniqueId(&nccl_id));
MPI_CALL(MPI_Bcast(&nccl_id, sizeof(nccl_id), MPI_BYTE, 0, MPI_COMM_WORLD));
NCCL_CALL(ncclCommInitRank(&nccl_comm, mpi_size, nccl_id, mpi_rank));
CUDA_RT_CALL(cudaStreamCreate(&comm_stream));
}
void NCCLCommunicator::start()
{
NCCL_CALL(ncclGroupStart());
comm_buffers.clear();
comm_buffer_sizes.clear();
recv_buffers.clear();
recv_buffer_idx.clear();
}
void NCCLCommunicator::send(const void *buf, int64_t count, int element_size, int dest)
{
// Note: NCCL has performance issue when the buffer is not 128-bit aligned.
// This implementation gets away with issue by forcing the communication to go through a
// allocated communication buffer. This buffer is 256B aligned to be compatible with RMM.
std::size_t aligned_size = (count * element_size + 255) / 256 * 256;
comm_buffers.push_back(
rmm::mr::get_current_device_resource()->allocate(aligned_size, comm_stream));
comm_buffer_sizes.push_back(count * element_size);
CUDA_RT_CALL(cudaMemcpyAsync(
comm_buffers.back(), buf, count * element_size, cudaMemcpyDeviceToDevice, comm_stream));
NCCL_CALL(ncclSend(comm_buffers.back(), aligned_size, ncclChar, dest, nccl_comm, comm_stream));
}
void NCCLCommunicator::recv(void *buf, int64_t count, int element_size, int source)
{
std::size_t aligned_size = (count * element_size + 255) / 256 * 256;
recv_buffers.push_back(buf);
recv_buffer_idx.push_back(comm_buffers.size());
comm_buffers.push_back(
rmm::mr::get_current_device_resource()->allocate(aligned_size, comm_stream));
comm_buffer_sizes.push_back(count * element_size);
NCCL_CALL(ncclRecv(comm_buffers.back(), aligned_size, ncclChar, source, nccl_comm, comm_stream));
}
void NCCLCommunicator::stop()
{
NCCL_CALL(ncclGroupEnd());
for (std::size_t ibuffer = 0; ibuffer < recv_buffers.size(); ibuffer++) {
std::size_t idx = recv_buffer_idx[ibuffer];
CUDA_RT_CALL(cudaMemcpyAsync(recv_buffers[ibuffer],
comm_buffers[idx],
comm_buffer_sizes[idx],
cudaMemcpyDeviceToDevice,
comm_stream));
}
for (std::size_t ibuffer = 0; ibuffer < comm_buffers.size(); ibuffer++) {
std::size_t aligned_size = (comm_buffer_sizes[ibuffer] + 255) / 256 * 256;
rmm::mr::get_current_device_resource()->deallocate(
comm_buffers[ibuffer], aligned_size, comm_stream);
}
CUDA_RT_CALL(cudaStreamSynchronize(comm_stream));
}
void NCCLCommunicator::finalize()
{
CUDA_RT_CALL(cudaStreamDestroy(comm_stream));
NCCL_CALL(ncclCommDestroy(nccl_comm));
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/string_payload.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/communicator.hpp"
#include "../src/distributed_join.hpp"
#include "../src/error.hpp"
#include "../src/setup.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/sequence.h>
#include <mpi.h>
#include <cassert>
#include <cstring>
#include <memory>
/**
* Generate an input table used for this test case.
*
* The generated table has `nelements_per_gpu` rows and two columns. The first column is the key
* column, with sequence `start_value`, `start_value+multiple`, `start_value+multiple*2`, etc. For
* example, if `start_value` is `0`, and `multiple` is `3`, the key column will contain the sequence
* 0,3,6,9,12,etc. The payload column is a string column. The string contained in a row with key `k`
* has length `k % 7 + 1`, and all filled the `k % 26`th letter in lowercase. For example, if `k=2`,
* the string is `ccc`, and if `k=5`, the string is `ffffff`.
*/
std::unique_ptr<cudf::table> generate_table(cudf::size_type nelements_per_gpu,
int start_value,
int multiple)
{
// First pass: calculate string subcolumn size
cudf::size_type string_column_size = 0;
for (cudf::size_type ielement = 0; ielement < nelements_per_gpu; ielement++) {
int current_value = start_value + ielement * multiple;
string_column_size += (current_value % 7 + 1);
}
// Allocate buffers for the string column
rmm::device_vector<char> strings(string_column_size);
rmm::device_vector<cudf::size_type> offsets(nelements_per_gpu + 1);
// Second pass, fill the string subcolumn
cudf::size_type current_offset = 0;
for (cudf::size_type ielement = 0; ielement < nelements_per_gpu; ielement++) {
int current_value = start_value + ielement * multiple;
int current_size = current_value % 7 + 1;
char current_char = 'a' + current_value % 26;
offsets[ielement] = current_offset;
memset(thrust::raw_pointer_cast(strings.data() + current_offset), current_char, current_size);
current_offset += current_size;
}
offsets[nelements_per_gpu] = current_offset;
// Construct the payload column
std::unique_ptr<cudf::column> payload_column = cudf::make_strings_column(
cudf::device_span<char>(strings), cudf::device_span<cudf::size_type>(offsets));
// Construct the key column
std::unique_ptr<cudf::column> key_column =
cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32), nelements_per_gpu);
int *key_buffer = key_column->mutable_view().head<int>();
thrust::sequence(
rmm::exec_policy(), key_buffer, key_buffer + nelements_per_gpu, start_value, multiple);
std::vector<std::unique_ptr<cudf::column>> new_table;
new_table.push_back(std::move(key_column));
new_table.push_back(std::move(payload_column));
return std::make_unique<cudf::table>(std::move(new_table));
}
inline void check_payload_correctness(cudf::column_view payload_column,
cudf::size_type irow,
int key)
{
cudf::size_type start_idx = *(payload_column.child(0).begin<cudf::size_type>() + irow);
cudf::size_type end_idx = *(payload_column.child(0).begin<cudf::size_type>() + irow + 1);
assert(end_idx - start_idx == key % 7 + 1);
for (; start_idx < end_idx; start_idx++)
assert(*(payload_column.child(1).begin<char>() + start_idx) == 'a' + key % 26);
}
void run_test(cudf::size_type nelements_per_gpu,
bool compression,
Communicator *communicator,
int nvlink_domain_size)
{
int mpi_rank = communicator->mpi_rank;
int mpi_size = communicator->mpi_size;
std::unique_ptr<cudf::table> left_table =
generate_table(nelements_per_gpu, nelements_per_gpu * mpi_rank * 3, 3);
std::unique_ptr<cudf::table> right_table =
generate_table(nelements_per_gpu, nelements_per_gpu * mpi_rank * 5, 5);
/* Generate compression options */
std::vector<ColumnCompressionOptions> left_compression_options =
generate_compression_options_distributed(left_table->view(), compression);
std::vector<ColumnCompressionOptions> right_compression_options =
generate_compression_options_distributed(right_table->view(), compression);
auto join_result = distributed_inner_join(left_table->view(),
right_table->view(),
{0},
{0},
communicator,
left_compression_options,
right_compression_options,
1,
false,
nullptr,
nvlink_domain_size);
assert(join_result->num_columns() == 4);
int num_rows = join_result->num_rows();
int total_nrows;
MPI_CALL(MPI_Allreduce(&num_rows, &total_nrows, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD));
assert(total_nrows == nelements_per_gpu * mpi_size / 5);
if (num_rows == 0) return;
cudf::column_view column0 = join_result->view().column(0);
cudf::column_view column1 = join_result->view().column(1);
cudf::column_view column2 = join_result->view().column(2);
cudf::column_view column3 = join_result->view().column(3);
for (cudf::size_type irow = 0; irow < num_rows; irow++) {
int key = *(column0.begin<int>() + irow);
assert(key % 15 == 0);
assert(key == *(column2.begin<int>() + irow));
check_payload_correctness(column1, irow, key);
check_payload_correctness(column3, irow, key);
}
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
/* Setup memory pool */
rmm::mr::managed_memory_resource mr;
rmm::mr::set_current_device_resource(&mr);
/* Initialize communicator */
UCXCommunicator *communicator = initialize_ucx_communicator(false, 0, 0);
/* Run tests */
// Note: temporarily disable some test cases because nvcomp's cascaded selector can raise
// "Floating point exception" if the input buffer is smaller than sample_size * num_samples.
// run_test(12'000, true, communicator, 1);
run_test(12'000, false, communicator, 1);
run_test(120'000, true, communicator, 1);
run_test(120'000, false, communicator, 1);
run_test(120'000, true, communicator, 2);
/* Cleanup */
communicator->finalize();
delete communicator;
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/compare_against_analytical.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/communicator.hpp"
#include "../src/distribute_table.hpp"
#include "../src/distributed_join.hpp"
#include "../src/error.hpp"
#include "../src/setup.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/join.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <mpi.h>
#include <cassert>
#include <iostream>
#include <memory>
#include <vector>
using cudf::table;
__global__ void verify_correctness(
const int *col0, const int *col1, const int *col2, const int *col3, int size)
{
for (size_t i = threadIdx.x + blockDim.x * blockIdx.x; i < size; i += blockDim.x * gridDim.x) {
assert(col0[i] % 15 == 0);
assert(col1[i] == col0[i] / 3);
assert(col2[i] % 15 == 0);
assert(col3[i] == col2[i] / 5);
assert(col0[i] == col2[i]);
}
}
/**
* This helper function generates the left/right table used for testing join.
*
* There are two columns in each table. The first column is filled with consecutive multiple of
* argument *multiple*, and is used as key column. For example, if *multiple* is 3, the column
* contains 0,3,6,9...etc. The second column is filled with consecutive integers and is used as
* payload column.
*/
std::unique_ptr<table> generate_table(cudf::size_type size, int multiple)
{
std::vector<std::unique_ptr<cudf::column>> new_table;
// construct the key column
auto key_column = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32), size);
auto key_buffer = key_column->mutable_view().head<int>();
thrust::sequence(thrust::device, key_buffer, key_buffer + size, 0, multiple);
new_table.push_back(std::move(key_column));
// construct the payload column
auto payload_column = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32), size);
auto payload_buffer = payload_column->mutable_view().head<int>();
thrust::sequence(thrust::device, payload_buffer, payload_buffer + size);
new_table.push_back(std::move(payload_column));
return std::make_unique<table>(std::move(new_table));
}
void run_test(cudf::size_type size, // must be a multiple of 5
int over_decomposition_factor,
bool compression,
int nvlink_domain_size,
Communicator *communicator)
{
assert(size % 5 == 0);
int mpi_rank;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
/* Generate input tables */
std::unique_ptr<table> left_table;
std::unique_ptr<table> right_table;
cudf::table_view left_view;
cudf::table_view right_view;
if (mpi_rank == 0) {
left_table = generate_table(size, 3);
right_table = generate_table(size, 5);
left_view = left_table->view();
right_view = right_table->view();
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
}
/* Distribute input tables among ranks */
auto local_left_table = distribute_table(left_view, communicator);
auto local_right_table = distribute_table(right_view, communicator);
/* Generate compression options */
std::vector<ColumnCompressionOptions> left_compression_options =
generate_compression_options_distributed(local_left_table->view(), compression);
std::vector<ColumnCompressionOptions> right_compression_options =
generate_compression_options_distributed(local_right_table->view(), compression);
/* Distributed join */
auto join_result = distributed_inner_join(local_left_table->view(),
local_right_table->view(),
{0},
{0},
communicator,
left_compression_options,
right_compression_options,
over_decomposition_factor,
false,
nullptr,
nvlink_domain_size);
/* Merge table from worker ranks to the root rank */
std::unique_ptr<table> merged_table = collect_tables(join_result->view(), communicator);
/* Verify Correctness */
if (mpi_rank == 0) {
const int block_size{128};
int nblocks{-1};
CUDA_RT_CALL(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&nblocks, verify_correctness, block_size, 0));
// Since the key has to be a multiple of 5, the join result size is the size of left table
// divided by 5.
assert(merged_table->num_rows() == size / 5);
verify_correctness<<<nblocks, block_size>>>(merged_table->get_column(0).view().head<int>(),
merged_table->get_column(1).view().head<int>(),
merged_table->get_column(2).view().head<int>(),
merged_table->get_column(3).view().head<int>(),
merged_table->num_rows());
CUDA_RT_CALL(cudaDeviceSynchronize());
std::cerr << "Test case (" << size << "," << over_decomposition_factor << "," << compression
<< "," << nvlink_domain_size << ") passes successfully.\n";
}
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
/* Initialize memory pool */
const size_t pool_size = 960'000'000; // 960MB
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource();
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr{mr, pool_size, pool_size};
rmm::mr::set_current_device_resource(&pool_mr);
/* Initialize communicator */
int mpi_size;
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
UCXCommunicator *communicator = initialize_ucx_communicator(
true,
2 * mpi_size, // *2 because buffers are needed for both sends and receives
100'000LL);
/* Run test */
// Note: temporarily disable some test cases because nvcomp's cascaded selector can raise
// "Floating point exception" if the input buffer is smaller than sample_size * num_samples.
run_test(30'000, 1, false, 1, communicator);
run_test(300'000, 1, false, 1, communicator);
// run_test(300'000, 1, true, 1, communicator);
run_test(300'000, 4, false, 1, communicator);
// run_test(300'000, 4, true, 1, communicator);
run_test(3'000'000, 1, true, 1, communicator);
run_test(3'000'000, 4, true, 1, communicator);
run_test(3'000'000, 4, true, 2, communicator);
/* Cleanup */
communicator->finalize();
delete communicator;
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/CMakeLists.txt
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include(BuildHelpers)
file(GLOB SOURCES *.cu *.cpp)
build_executables(SOURCES)
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/test_shuffle_on.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Test the correctness of shuffle_on implementation. This test has the following steps:
*
* 1. Each GPU independently generate a table with a single key column, filled with random integers.
* 2. The generated table is shuffled across GPUs, using identity hash function.
* 3. Each GPU verifies the shuffled keys has the same remainders modulo the number of MPI ranks.
*/
#include "../src/communicator.hpp"
#include "../src/error.hpp"
#include "../src/setup.hpp"
#include "../src/shuffle_on.hpp"
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <mpi.h>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <vector>
using cudf::column;
using cudf::table;
std::unique_ptr<table> generate_table(cudf::size_type size)
{
std::vector<std::unique_ptr<column>> columns;
auto key_column = cudf::make_numeric_column(cudf::data_type(cudf::type_id::INT32), size);
auto key_buffer = key_column->mutable_view().head<int>();
for (int ielement = 0; ielement < size; ielement++) {
key_buffer[ielement] = rand() % (size * 10);
}
columns.push_back(std::move(key_column));
return std::make_unique<table>(std::move(columns));
}
void run_test(int nrows_per_gpu, bool compression, Communicator *communicator)
{
auto input_table = generate_table(nrows_per_gpu);
auto compression_options =
generate_compression_options_distributed(input_table->view(), compression);
std::unique_ptr<cudf::table> output_table = shuffle_on(
input_table->view(), {0}, communicator, compression_options, cudf::hash_id::HASH_IDENTITY);
assert(output_table->view().num_columns() == 1);
cudf::size_type num_rows_shuffled = output_table->view().column(0).size();
auto key_buffer = output_table->view().column(0).head<int>();
if (num_rows_shuffled != 0) {
int mod_result = key_buffer[0] % communicator->mpi_size;
for (cudf::size_type ielement = 0; ielement < num_rows_shuffled; ielement++) {
assert(key_buffer[ielement] % communicator->mpi_size == mod_result);
}
}
MPI_CALL(MPI_Barrier(MPI_COMM_WORLD));
if (communicator->mpi_rank == 0) {
std::cerr << std::boolalpha;
std::cerr << "Test case (" << nrows_per_gpu << "," << compression << ") passes successfully.\n";
}
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
rmm::mr::managed_memory_resource mr;
rmm::mr::set_current_device_resource(&mr);
UCXCommunicator *communicator = initialize_ucx_communicator(false, 0, 0);
run_test(1'000'000, false, communicator);
run_test(1'000'000, true, communicator);
communicator->finalize();
delete communicator;
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/buffer_communicator.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/communicator.hpp"
#include "../src/error.hpp"
#include "../src/setup.hpp"
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <mpi.h>
#include <cassert>
#include <cstdint>
#include <iostream>
#include <vector>
static int64_t COUNT = 50'000'000LL;
void parse_command_line_arguments(int argc, char *argv[])
{
for (int iarg = 0; iarg < argc; iarg++) {
if (!strcmp(argv[iarg], "--count")) { COUNT = atol(argv[iarg + 1]); }
}
}
__global__ void set_data(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
start_addr[ielement] = (start_val + ielement);
}
}
__global__ void test_correctness(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
assert(start_addr[ielement] == (start_val + ielement));
}
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
/* Parse command line arguments */
parse_command_line_arguments(argc, argv);
/* Initialize memory pool */
size_t free_memory, total_memory;
CUDA_RT_CALL(cudaMemGetInfo(&free_memory, &total_memory));
const size_t pool_size = free_memory - 5LL * (1LL << 29); // free memory - 500MB
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource();
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr{mr, pool_size, pool_size};
rmm::mr::set_current_device_resource(&pool_mr);
/* Initialize communicator */
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
UCXCommunicator *communicator = initialize_ucx_communicator(true, 2 * mpi_size, 20'000'000LL);
/* Send and recv data */
rmm::device_buffer send_buf{COUNT * sizeof(uint64_t), 0};
std::vector<uint64_t *> recv_buf(mpi_size, nullptr);
std::vector<comm_handle_t> send_reqs(mpi_size, nullptr);
std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr);
int grid_size{-1};
int block_size{-1};
CUDA_RT_CALL(cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, set_data));
set_data<<<grid_size, block_size>>>(
static_cast<uint64_t *>(send_buf.data()), COUNT, COUNT * mpi_rank);
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) {
send_reqs[irank] = communicator->send(send_buf.data(), COUNT, sizeof(uint64_t), irank, 32);
}
}
int64_t count_received;
for (int irank = mpi_size - 1; irank >= 0; irank--) {
if (irank != mpi_rank) {
recv_reqs[irank] =
communicator->recv((void **)&recv_buf[irank], &count_received, sizeof(uint64_t), irank, 32);
}
}
communicator->waitall(send_reqs);
communicator->waitall(recv_reqs);
assert(count_received == COUNT);
/* Test the correctness */
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) {
test_correctness<<<grid_size, block_size>>>(recv_buf[irank], COUNT, COUNT * irank);
}
}
/* Cleanup */
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank) {
rmm::mr::get_current_device_resource()->deallocate(recv_buf[irank], COUNT, cudaStreamDefault);
}
}
CUDA_RT_CALL(cudaStreamSynchronize(cudaStreamDefault));
communicator->finalize();
delete communicator;
if (mpi_rank == 0) { std::cerr << "Test case \"buffer_communicator\" passes successfully.\n"; }
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos/distributed-join
|
rapidsai_public_repos/distributed-join/test/compare_against_single_gpu.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
This test case compares the result of distributed join on multiple GPUs to the result of
cudf::inner_join on a single GPU.
Specifically, it follows the following steps:
1. The root rank constructs a random build table and a random probe table.
2. The root rank runs cudf::inner_join on the newly constructed tables.
3. The root rank distributes the build and probe table across all ranks.
4. All ranks run distibuted join collectively.
5. Each rank sends the distributed join result to the root rank.
6. The root rank assembles the received results into a single table and compares it to the result of
step 2.
*/
#include "../src/communicator.hpp"
#include "../src/distribute_table.hpp"
#include "../src/distributed_join.hpp"
#include "../src/error.hpp"
#include "../src/generate_table.cuh"
#include "../src/registered_memory_resource.hpp"
#include "../src/setup.hpp"
#include <cudf/join.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cstdint>
#include <iostream>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
using cudf::table;
template <typename dtype>
std::string dtype_to_string()
{
if (std::is_same<dtype, int32_t>::value) {
return "int32_t";
} else if (std::is_same<dtype, int64_t>::value) {
return "int64_t";
} else if (std::is_same<dtype, cudf::timestamp_D>::value) {
return "timestamp_D";
} else if (std::is_same<dtype, cudf::timestamp_ms>::value) {
return "timestamp_ms";
} else if (std::is_same<dtype, cudf::timestamp_ns>::value) {
return "timestamp_ns";
} else if (std::is_same<dtype, cudf::duration_D>::value) {
return "duration_D";
} else if (std::is_same<dtype, cudf::duration_s>::value) {
return "duration_s";
} else if (std::is_same<dtype, cudf::duration_us>::value) {
return "duration_us";
} else {
return "unknown_t";
}
}
template <typename data_type>
__global__ void verify_correctness(const data_type *data1,
const data_type *data2,
cudf::size_type size)
{
const cudf::size_type start_idx = threadIdx.x + blockDim.x * blockIdx.x;
const cudf::size_type stride = blockDim.x * gridDim.x;
for (cudf::size_type idx = start_idx; idx < size; idx += stride) {
assert(data1[idx] == data2[idx]);
}
}
template <typename KEY_T, typename PAYLOAD_T>
void run_test(cudf::size_type build_table_size,
cudf::size_type probe_table_size,
double selectivity,
bool is_build_table_key_unique,
int over_decomposition_factor,
bool compression,
int nvlink_domain_size,
Communicator *communicator)
{
int mpi_rank;
int mpi_size;
MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank));
MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size));
/* Generate build table and probe table and compute reference solution */
std::unique_ptr<table> build;
std::unique_ptr<table> probe;
std::unique_ptr<table> reference;
cudf::table_view build_view;
cudf::table_view probe_view;
if (mpi_rank == 0) {
KEY_T rand_max_val = build_table_size * 2;
std::tie(build, probe) = generate_build_probe_tables<KEY_T, PAYLOAD_T>(
build_table_size, probe_table_size, selectivity, rand_max_val, is_build_table_key_unique);
build_view = build->view();
probe_view = probe->view();
reference = cudf::inner_join(build->view(), probe->view(), {0}, {0});
}
std::unique_ptr<table> local_build = distribute_table(build_view, communicator);
std::unique_ptr<table> local_probe = distribute_table(probe_view, communicator);
/* Generate compression options */
std::vector<ColumnCompressionOptions> build_compression_options =
generate_compression_options_distributed(local_build->view(), compression);
std::vector<ColumnCompressionOptions> probe_compression_options =
generate_compression_options_distributed(local_probe->view(), compression);
/* Distributed join */
std::unique_ptr<table> join_result_all_ranks = distributed_inner_join(local_build->view(),
local_probe->view(),
{0},
{0},
communicator,
build_compression_options,
probe_compression_options,
over_decomposition_factor,
false,
nullptr,
nvlink_domain_size);
/* Send join result from all ranks to the root rank */
std::unique_ptr<table> join_result = collect_tables(join_result_all_ranks->view(), communicator);
/* Verify correctness */
if (mpi_rank == 0) {
// Compare the number of columns
cudf::size_type ncols = reference->num_columns();
assert(join_result->num_columns() == ncols);
assert(ncols == 4);
// Although join_result and reference should contain the same table, rows may be reordered.
// Therefore, we first sort both tables and then compare
cudf::size_type nrows = reference->num_rows();
assert(join_result->num_rows() == nrows);
std::unique_ptr<table> join_sorted = cudf::sort(join_result->view());
std::unique_ptr<table> reference_sorted = cudf::sort(reference->view());
// Get the number of thread blocks based on thread block size
const int block_size = 128;
int nblocks{-1};
CUDA_RT_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&nblocks, verify_correctness<KEY_T>, block_size, 0));
// There should be four columns in the result table. The first two columns are from the left
// table, and the last two columns are from the right table.
verify_correctness<KEY_T>
<<<nblocks, block_size>>>(join_sorted->view().column(0).head<KEY_T>(),
reference_sorted->view().column(0).head<KEY_T>(),
nrows);
verify_correctness<PAYLOAD_T>
<<<nblocks, block_size>>>(join_sorted->view().column(1).head<PAYLOAD_T>(),
reference_sorted->view().column(1).head<PAYLOAD_T>(),
nrows);
verify_correctness<KEY_T>
<<<nblocks, block_size>>>(join_sorted->view().column(2).head<KEY_T>(),
reference_sorted->view().column(2).head<KEY_T>(),
nrows);
verify_correctness<PAYLOAD_T>
<<<nblocks, block_size>>>(join_sorted->view().column(3).head<PAYLOAD_T>(),
reference_sorted->view().column(3).head<PAYLOAD_T>(),
nrows);
CUDA_RT_CALL(cudaDeviceSynchronize());
std::cerr << std::boolalpha;
std::cerr << "Test case (" << dtype_to_string<KEY_T>() << "," << dtype_to_string<PAYLOAD_T>()
<< "," << build_table_size << "," << probe_table_size << "," << selectivity << ","
<< is_build_table_key_unique << "," << over_decomposition_factor << "," << compression
<< "," << nvlink_domain_size << ") passes successfully.\n";
}
}
int main(int argc, char *argv[])
{
MPI_CALL(MPI_Init(&argc, &argv));
set_cuda_device();
/* Initialize communicator */
UCXCommunicator *communicator = initialize_ucx_communicator(false, 0, 0);
/* Initialize memory pool */
const size_t pool_size = 1'500'000'000; // 1.5GB
registered_memory_resource mr(communicator);
auto *pool_mr =
new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(&mr, pool_size, pool_size);
rmm::mr::set_current_device_resource(pool_mr);
/* run test */
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 0.3, true, 10, false, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 0.3, true, 10, true, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 0.3, true, 10, false, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 0.3, true, 10, true, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 1.0, true, 10, false, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 1.0, true, 10, true, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 1.0, true, 10, false, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 1.0, true, 10, true, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 10, false, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 10, true, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 1'000'000, 0.3, true, 10, false, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 1'000'000, 0.3, true, 10, true, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 5'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, int64_t>(1'000'000, 5'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::timestamp_D>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::timestamp_D>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::timestamp_ms>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::timestamp_ms>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::timestamp_ns>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::timestamp_ns>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::duration_D>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::duration_D>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::duration_s>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::duration_s>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int64_t, cudf::duration_us>(1'000'000, 1'000'000, 0.3, true, 1, false, 1, communicator);
run_test<int64_t, cudf::duration_us>(1'000'000, 1'000'000, 0.3, true, 1, true, 1, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 1, false, 2, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 1, true, 2, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 10, false, 2, communicator);
run_test<int32_t, int32_t>(1'000'000, 1'000'000, 0.3, true, 10, true, 2, communicator);
/* Cleanup */
delete pool_mr;
communicator->finalize();
delete communicator;
MPI_CALL(MPI_Finalize());
return 0;
}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/sphinx-theme/pyproject.toml
|
[project]
name = "rapidsai-sphinx-theme"
description = "PyData-based Sphinx theme for RAPIDS"
dynamic = ["version"]
readme = "README.md"
requires-python = ">=3.8"
dependencies = [
"sphinx>=5.0",
"pydata-sphinx-theme"
]
license = { file = "LICENSE" }
maintainers = [
{ name = "RAPIDS Team", email = "[email protected]" },
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Framework :: Sphinx",
"Framework :: Sphinx :: Theme",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
]
[project.urls]
"Source" = "https://github.com/rapidsai/sphinx-theme"
[project.entry-points]
"sphinx.html_themes" = { rapidsai_sphinx_theme = "rapidsai_sphinx_theme" }
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/sphinx-theme/README.md
|
# RAPIDS Sphinx Theme
> **Warning**
> Work In Progress & Experimental
Using default [PyData Theme](https://pydata-sphinx-theme.readthedocs.io/en/stable/) with customized configuration to standardized RAPIDS documentation navigation.
- RAPIDS nav section based on [announcements banner](https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html)
- Custom colors based on [css styling](https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/styling.html)
- Doc versions based on [version switcher](https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html)
## How to Use
Install the theme:
```sh
pip install git+https://github.com/rapidsai/sphinx-theme.git@main
```
Setup the `conf.py` file:
```py
html_theme = "rapidsai_sphinx_theme"
```
## How to Develop
1. Install the theme locally with:
```sh
pip install -e .
```
2. Make any necessary changes to the `rapidsai_sphinx_theme` directory
3. Build the demo docs locally with:
```sh
sphinx-build -b dirhtml docs _html
```
4. Start a development server to view the built docs with:
```sh
python -m http.server -d _html
```
5. View the rendered docs @ <http://0.0.0.0:8000/>
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/sphinx-theme/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2023 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/sphinx-theme
|
rapidsai_public_repos/sphinx-theme/rapidsai_sphinx_theme/theme.conf
|
[theme]
inherit = pydata_sphinx_theme
stylesheet = rapidsai.css
| 0 |
rapidsai_public_repos/sphinx-theme
|
rapidsai_public_repos/sphinx-theme/rapidsai_sphinx_theme/__init__.py
|
from os import path
from typing import Dict
from sphinx.application import Sphinx
__version__ = "0.0.0"
def setup(app: Sphinx) -> Dict[str, bool]:
app.add_html_theme("rapidsai_sphinx_theme", path.abspath(path.dirname(__file__)))
app.add_js_file("rapidsai.js", loading_method="defer")
# Override PyData theme defaults here
app.config["html_theme_options"].update(
{
"navbar_align": "left",
"show_toc_level": 2,
"header_links_before_dropdown": 2,
"navbar_start": ["navbar-logo", "version-switcher"],
"external_links": [
{"name": "Ecosystem", "url": "https://rapids.ai/ecosystem"},
{"name": "Learn More", "url": "https://rapids.ai/learn-more"},
{"name": "News", "url": "https://rapids.ai/news"},
{"name": "User Guides", "url": "https://rapids.ai/user-guides"},
{"name": "API Docs", "url": "https://rapids.ai/api-docs"},
{"name": "Install", "url": "https://rapids.ai/install"},
],
}
)
return {"parallel_read_safe": True, "parallel_write_safe": True}
| 0 |
rapidsai_public_repos/sphinx-theme/rapidsai_sphinx_theme
|
rapidsai_public_repos/sphinx-theme/rapidsai_sphinx_theme/static/rapidsai.css
|
/* RAPIDS Custom Docs css */
/* https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/styling.html#custom-css */
body {
margin: 0;
padding: 0;
}
/* RAPIDS custom colors primary */
html[data-theme="light"] {
--pst-color-primary: #7306ff;
--pst-color-secondary: #984dfb;
--pst-color-info: #ffb500;
--pst-color-danger: #d216d2;
--pst-color-target: #37c9dd;
}
html[data-theme="dark"] {
--pst-color-primary: #7306ff;
--pst-color-secondary: #984dfb;
--pst-color-info: #ffb500;
--pst-color-danger: #d216d2;
--pst-color-target: #37c9dd;
}
/* PyData Overrides */
/* NavBar */
.bd-header {
height: 56px;
background-color: #7400ff !important;
box-shadow: 0px 3px 2px rgba(0, 0, 0, 0.2);
}
.bd-header .navbar-nav li a.nav-link,
.bd-header .navbar-nav .dropdown button {
color: white;
font-weight: bold;
}
.bd-header .navbar-nav > .active > .nav-link {
color: #b49fd6 !important;
padding-left: 0.8rem;
padding-right: 0.8rem;
}
.bd-header .navbar-nav li a.nav-link:focus,
.bd-header .navbar-nav li a.nav-link:hover {
color: #b49fd6 !important;
}
.navbar-brand,
.navbar-brand:hover {
padding: 0.6em 0;
text-decoration: none;
}
.navbar-brand p {
color: white;
font-weight: bold;
}
button.btn.version-switcher__button {
color: white;
margin-bottom: unset;
}
.version-switcher__menu {
background-color: rgb(139 47 255);
border-width: 0;
border-top-right-radius: 0;
border-top-left-radius: 0;
box-shadow: 0 4px 4px rgb(10 10 10 / 20%);
}
.version-switcher__menu a.list-group-item:hover {
background-color: rgb(139 47 255);
color: #b49fd6 !important;
}
.dropdown-menu[data-bs-popper] {
margin-top: 0.7rem;
}
.version-switcher__menu a.list-group-item {
color: white;
background-color: inherit;
}
.search-button,
.theme-switch-button span,
.navbar-icon-links i.fa-square-twitter:before {
color: #ebebeb;
}
.bd-header .navbar-nav .dropdown .dropdown-menu {
background-color: rgb(139 47 255);
margin: 0.6rem;
border-width: 0;
min-width: 12rem;
border-top-right-radius: 0;
border-top-left-radius: 0;
box-shadow: 0 4px 4px rgb(10 10 10 / 20%);
}
.bd-header label.sidebar-toggle {
color: white;
}
/* Body Text */
nav.bd-links p.bd-links__title {
color: rgb(48, 48, 48);
}
| 0 |
rapidsai_public_repos/sphinx-theme
|
rapidsai_public_repos/sphinx-theme/docs/Makefile
|
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
rapidsai_public_repos/sphinx-theme
|
rapidsai_public_repos/sphinx-theme/docs/conf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
# -- Project information -----------------------------------------------------
project = "cudf"
copyright = "2018-2023, NVIDIA Corporation"
author = "NVIDIA Corporation"
language = "en"
html_theme = "rapidsai_sphinx_theme"
html_title = "cuDF Documentation"
html_theme_options = {
"logo": {"text": project, "link": "https://rapids.ai/"},
"github_url": f"https://github.com/rapidsai/{project}",
}
| 0 |
rapidsai_public_repos/sphinx-theme
|
rapidsai_public_repos/sphinx-theme/docs/index.rst
|
================
Document Heading
================
Example Page
============
Sub-heading
-----------
Paragraphs are separated
by a blank line.
* This is a bulleted list.
* It has two items, the second
item uses two lines.
1. This is a numbered list.
2. It has two items too.
#. This is a numbered list.
#. It has two items too.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/test_client_bandwidth.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dask_cuda import LocalCUDACluster
from dask.distributed import Client, wait
import cupy as cp
import numpy as np
import cudf
import dask_cudf
import rmm
from time import perf_counter_ns
def benchmark_func(func, n_times=10):
def wrap_func(*args, **kwargs):
time_ls = []
# ignore 1st run
# and return other runs
for _ in range(0,n_times+1):
t1 = perf_counter_ns()
result = func(*args, **kwargs)
t2 = perf_counter_ns()
time_ls.append(t2-t1)
return result, time_ls[1:]
return wrap_func
def create_dataframe(client):
n_rows = 25_000_000
df = cudf.DataFrame({'src':cp.arange(0,n_rows,dtype=cp.int32), 'dst':cp.arange(0,n_rows, dtype=cp.int32), 'eids':cp.ones(n_rows, cp.int32)})
ddf = dask_cudf.from_cudf(df,npartitions= len(client.scheduler_info()['workers'])).persist()
client.rebalance(ddf)
del df
_ = wait(ddf)
return ddf
@benchmark_func
def get_n_rows(ddf, n):
if n==-1:
df = ddf.compute()
else:
df = ddf.head(n)
return df
def run_bandwidth_test(ddf, n):
df, time_ls = get_n_rows(ddf, n)
time_ar = np.asarray(time_ls)
time_mean = time_ar.mean()
size_bytes = df.memory_usage().sum()
size_gb = round(size_bytes/(pow(1024,3)), 2)
print(f"Getting {len(df):,} rows of size {size_gb} took = {time_mean*1e-6} ms")
time_mean_s = time_mean*1e-9
print(f"Bandwidth = {round(size_gb/time_mean_s, 4)} gb/s")
return
if __name__ == "__main__":
cluster = LocalCUDACluster(protocol='ucx',rmm_pool_size='15GB', CUDA_VISIBLE_DEVICES='1,2,3')
client = Client(cluster)
rmm.reinitialize(pool_allocator=True)
ddf = create_dataframe(client)
run_bandwidth_test(ddf, 1_000_000)
run_bandwidth_test(ddf, 2_000_000)
run_bandwidth_test(ddf, 4_000_000)
run_bandwidth_test(ddf, -1)
print("--"*20+"Completed Test"+"--"*20, flush=True)
client.shutdown()
cluster.close()
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.