repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/build.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# kvikio build script
# This script is used to build the component(s) in this repo from
# source, and can be called with various options to customize the
# build as needed (see the help output for details)
# Abort script on first error
set -e
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="clean libkvikio kvikio legate -v -g -n -s --ptds -h"
HELP="$0 [clean] [libkvikio] [kvikio] [legate] [-v] [-g] [-n] [-s] [--ptds] [--cmake-args=\"<args>\"] [-h]
clean - remove all existing build artifacts and configuration (start over)
libkvikio - build and install the libkvikio C++ code
kvikio - build and install the kvikio Python package
legate - build and install the legate-kvikio Python package
-v - verbose build mode
-g - build for debug
-n - no install step
--cmake-args=\\\"<args>\\\" - pass arbitrary list of CMake configuration options (escape all quotes in argument)
-h - print this text
default action (no args) is to build and install 'libkvikio' and 'kvikio' targets
"
LIBKVIKIO_BUILD_DIR=${LIBKVIKIO_BUILD_DIR:=${REPODIR}/cpp/build}
KVIKIO_BUILD_DIR="${REPODIR}/python/build ${REPODIR}/python/_skbuild"
LEGATE_BUILD_DIR="${REPODIR}/legate/build ${REPODIR}/legate/_skbuild"
BUILD_DIRS="${LIBKVIKIO_BUILD_DIR} ${KVIKIO_BUILD_DIR} ${LEGATE_BUILD_DIR}"
# Set defaults for vars modified by flags to this script
VERBOSE_FLAG=""
BUILD_TYPE=Release
INSTALL_TARGET=install
RAN_CMAKE=0
# Set defaults for vars that may not have been defined externally
# If INSTALL_PREFIX is not set, check PREFIX, then check
# CONDA_PREFIX, then fall back to install inside of $LIBKVIKIO_BUILD_DIR
INSTALL_PREFIX=${INSTALL_PREFIX:=${PREFIX:=${CONDA_PREFIX:=$LIBKVIKIO_BUILD_DIR/install}}}
export PARALLEL_LEVEL=${PARALLEL_LEVEL:-4}
function hasArg {
(( NUMARGS != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
function cmakeArgs {
# Check for multiple cmake args options
if [[ $(echo $ARGS | { grep -Eo "\-\-cmake\-args" || true; } | wc -l ) -gt 1 ]]; then
echo "Multiple --cmake-args options were provided, please provide only one: ${ARGS}"
exit 1
fi
# Check for cmake args option
if [[ -n $(echo $ARGS | { grep -E "\-\-cmake\-args" || true; } ) ]]; then
# There are possible weird edge cases that may cause this regex filter to output nothing and fail silently
# the true pipe will catch any weird edge cases that may happen and will cause the program to fall back
# on the invalid option error
EXTRA_CMAKE_ARGS=$(echo $ARGS | { grep -Eo "\-\-cmake\-args=\".+\"" || true; })
if [[ -n ${EXTRA_CMAKE_ARGS} ]]; then
# Remove the full EXTRA_CMAKE_ARGS argument from list of args so that it passes validArgs function
ARGS=${ARGS//$EXTRA_CMAKE_ARGS/}
# Filter the full argument down to just the extra string that will be added to cmake call
EXTRA_CMAKE_ARGS=$(echo $EXTRA_CMAKE_ARGS | grep -Eo "\".+\"" | sed -e 's/^"//' -e 's/"$//')
fi
fi
}
# Runs cmake if it has not been run already for build directory
# LIBKVIKIO_BUILD_DIR
function ensureCMakeRan {
mkdir -p "${LIBKVIKIO_BUILD_DIR}"
cd ${REPODIR}/cpp
if (( RAN_CMAKE == 0 )); then
echo "Executing cmake for libkvikio..."
cmake -B "${LIBKVIKIO_BUILD_DIR}" -S . \
-DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
${EXTRA_CMAKE_ARGS}
RAN_CMAKE=1
fi
}
if hasArg -h || hasArg --help; then
echo "${HELP}"
exit 0
fi
# Check for valid usage
if (( ${NUMARGS} != 0 )); then
# Check for cmake args
cmakeArgs
for a in ${ARGS}; do
if ! (echo " ${VALIDARGS} " | grep -q " ${a} "); then
echo "Invalid option or formatting, check --help: ${a}"
exit 1
fi
done
fi
# Process flags
if hasArg -v; then
VERBOSE_FLAG=-v
set -x
fi
if hasArg -g; then
BUILD_TYPE=Debug
fi
if hasArg -n; then
INSTALL_TARGET=""
fi
# If clean given, run it prior to any other steps
if hasArg clean; then
# If the dirs to clean are mounted dirs in a container, the
# contents should be removed but the mounted dirs will remain.
# The find removes all contents but leaves the dirs, the rmdir
# attempts to remove the dirs but can fail safely.
for bd in ${BUILD_DIRS}; do
if [ -d "${bd}" ]; then
find "${bd}" -mindepth 1 -delete
rmdir "${bd}" || true
fi
done
rm -f ${REPODIR}/legate/legate_kvikio/install_info.py
fi
################################################################################
# Configure, build, and install libkvikio
if (( NUMARGS == 0 )) || hasArg libkvikio; then
ensureCMakeRan
echo "building libkvikio..."
cmake --build "${LIBKVIKIO_BUILD_DIR}" -j${PARALLEL_LEVEL} ${VERBOSE_FLAG}
if [[ ${INSTALL_TARGET} != "" ]]; then
echo "installing libkvikio..."
cmake --build "${LIBKVIKIO_BUILD_DIR}" --target install -v ${VERBOSE_FLAG}
fi
fi
# Build and install the kvikio Python package
if (( NUMARGS == 0 )) || hasArg kvikio; then
cd "${REPODIR}/python"
export INSTALL_PREFIX
echo "building kvikio..."
python setup.py build_ext --inplace
python setup.py install --single-version-externally-managed --record=record.txt
fi
# Build and install the legate-kvikio Python package
if hasArg legate; then
cd "${REPODIR}/legate"
export INSTALL_PREFIX
echo "building legate..."
python setup.py install --single-version-externally-managed --record=record.txt
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/.clang-tidy
|
---
Checks: 'clang-diagnostic-*,
clang-analyzer-*,
cppcoreguidelines-*,
modernize-*,
bugprone-*,
performance-*,
readability-*,
llvm-*,
-cppcoreguidelines-macro-usage,
-llvm-header-guard,
-modernize-use-trailing-return-type,
-readability-named-parameter'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
CheckOptions:
- key: cert-dcl16-c.NewSuffixes
value: 'L;LL;LU;LLU'
- key: cert-oop54-cpp.WarnOnlyIfThisHasSuspiciousField
value: '0'
- key: cert-str34-c.DiagnoseSignedUnsignedCharComparisons
value: '0'
- key: cppcoreguidelines-explicit-virtual-functions.IgnoreDestructors
value: '1'
- key: cppcoreguidelines-non-private-member-variables-in-classes.IgnoreClassesWithAllMemberVariablesBeingPublic
value: '1'
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: llvm-else-after-return.WarnOnConditionVariables
value: '0'
- key: llvm-else-after-return.WarnOnUnfixable
value: '0'
- key: llvm-qualified-auto.AddConstToQualified
value: '0'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
- key: readability-identifier-length.IgnoredParameterNames
value: 'mr|os'
- key: readability-identifier-length.IgnoredVariableNames
value: 'mr|_'
#- key: readability-function-cognitive-complexity.IgnoreMacros
# value: '1'
- key: bugprone-easily-swappable-parameters.IgnoredParameterNames
value: 'alignment'
- key: cppcoreguidelines-avoid-magic-numbers.IgnorePowersOf2IntegerValues
value: '1'
- key: readability-magic-numbers.IgnorePowersOf2IntegerValues
value: '1'
...
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/.codespellrc
|
# Copyright (c) 2017-2022, NVIDIA CORPORATION.
[codespell]
# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override -
# this is only to allow you to run codespell interactively
skip = ./.git,./.github,./cpp/build,.*egg-info.*,./python/tests
# ignore short words, and typename parameters like OffsetT
ignore-regex = \b(.{1,4}|[A-Z]\w*T)\b
ignore-words-list = inout,unparseable
builtin = clear
quiet-level = 3
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/dependencies.yaml
|
# Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
all:
output: conda
matrix:
cuda: ["11.8", "12.0"]
arch: [x86_64]
includes:
- build
- checks
- cudatoolkit
- docs
- notebooks
- py_version
- run
- test_python
- test_python_legate
test_cpp:
output: none
includes:
- cudatoolkit
test_python:
output: none
includes:
- cudatoolkit
- py_version
- test_python
checks:
output: none
includes:
- checks
- py_version
docs:
output: none
includes:
- cudatoolkit
- docs
- py_version
py_build:
output: pyproject
pyproject_dir: python
extras:
table: build-system
includes:
- build
py_run:
output: pyproject
pyproject_dir: python
extras:
table: project
includes:
- run
py_optional_test:
output: pyproject
pyproject_dir: python
extras:
table: project.optional-dependencies
key: test
includes:
- test_python
legate_py_build:
output: pyproject
pyproject_dir: legate
extras:
table: build-system
includes:
- build
legate_py_run:
output: pyproject
pyproject_dir: legate
extras:
table: project
includes:
- run
legate_py_optional_test:
output: pyproject
pyproject_dir: legate
extras:
table: project.optional-dependencies
key: test
includes:
- test_python
- test_python_legate
channels:
- rapidsai
- rapidsai-nightly
- conda-forge
- nvidia
dependencies:
build:
common:
- output_types: [conda, requirements, pyproject]
packages:
- cmake>=3.26.4
- cython>=3.0.0
- ninja
- scikit-build>=0.13.1
- output_types: conda
packages:
- c-compiler
- cxx-compiler
- output_types: [requirements, pyproject]
packages:
- setuptools
- wheel
specific:
- output_types: conda
matrices:
- matrix:
arch: x86_64
packages:
- gcc_linux-64=11.*
- sysroot_linux-64=2.17
- matrix:
arch: aarch64
packages:
- gcc_linux-aarch64=11.*
- sysroot_linux-aarch64=2.17
- output_types: conda
matrices:
- matrix:
arch: x86_64
cuda: "11.8"
packages:
- nvcc_linux-64=11.8
- matrix:
arch: aarch64
cuda: "11.8"
packages:
- nvcc_linux-aarch64=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-nvcc
checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
cudatoolkit:
common:
- output_types: conda
packages:
- nvcomp==3.0.4
specific:
- output_types: conda
matrices:
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- matrix:
cuda: "11.8"
packages:
- cuda-version=11.8
- cudatoolkit
- matrix:
cuda: "11.5"
packages:
- cuda-version=11.5
- cudatoolkit
- matrix:
cuda: "11.4"
packages:
- cuda-version=11.4
- cudatoolkit
- matrix:
cuda: "11.2"
packages:
- cuda-version=11.2
- cudatoolkit
- output_types: conda
matrices:
- matrix:
cuda: "12.0"
arch: x86_64
packages:
- libcufile
- libcufile-dev
- matrix:
cuda: "11.8"
arch: x86_64
packages:
# libcufile package version reference: https://anaconda.org/nvidia/libcufile/files
- libcufile=1.4.0.31
- libcufile-dev=1.4.0.31
- matrix:
cuda: "11.5"
arch: x86_64
packages:
- libcufile>=1.1.0.37,<=1.1.1.25
- libcufile-dev>=1.1.0.37,<=1.1.1.25
- matrix:
cuda: "11.4"
arch: x86_64
packages:
- &libcufile_114 libcufile>=1.0.0.82,<=1.0.2.10
- &libcufile_dev114 libcufile-dev>=1.0.0.82,<=1.0.2.10
- matrix:
cuda: "11.2"
arch: x86_64
packages:
# The NVIDIA channel doesn't publish pkgs older than 11.4 for these libs,
# so 11.2 uses 11.4 packages (the oldest available).
- *libcufile_114
- *libcufile_dev114
# Fallback matrix for aarch64, which doesn't support libcufile.
- matrix:
packages:
docs:
common:
- output_types: [conda, requirements]
packages:
- numpydoc
- sphinx
- sphinx-click
- sphinx_rtd_theme
- output_types: conda
packages:
- doxygen=1.9.1 # pre-commit hook needs a specific version.
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
run:
common:
- output_types: [conda, requirements, pyproject]
packages:
- numpy>=1.21
- zarr
# See https://github.com/zarr-developers/numcodecs/pull/475
- numcodecs <0.12.0
- packaging
- output_types: conda
packages:
- cupy>=12.0.0
- output_types: [requirements, pyproject]
packages:
- cupy-cuda11x>=12.0.0
test_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- &dask dask>=2022.05.2
- pytest
- pytest-cov
specific:
- output_types: [conda, requirements, pyproject]
matrices:
- matrix:
cuda: "12.0"
packages:
- cuda-python>=12.0,<13.0a0
- matrix: # All CUDA 11 versions
packages:
- cuda-python>=11.7.1,<12.0a0
test_python_legate:
common:
- output_types: [conda, requirements, pyproject]
packages:
- *dask
- distributed>=2022.05.2
notebooks:
common:
- output_types: conda
packages:
- cudf==23.12.*
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/CONTRIBUTING.md
|
# Contributing to KvikIO
Contributions to KvikIO fall into the following three categories.
1. To report a bug, request a new feature, or report a problem with
documentation, please file an [issue](https://github.com/rapidsai/kvikio/issues/new/choose)
describing in detail the problem or new feature. The RAPIDS team evaluates
and triages issues, and schedules them for a release. If you believe the
issue needs priority attention, please comment on the issue to notify the
team.
2. To propose and implement a new Feature, please file a new feature request
[issue](https://github.com/rapidsai/kvikio/issues/new/choose). Describe the
intended feature and discuss the design and implementation with the team and
community. Once the team agrees that the plan looks good, go ahead and
implement it, using the [code contributions](#code-contributions) guide below.
3. To implement a feature or bug-fix for an existing outstanding issue, please
Follow the [code contributions](#code-contributions) guide below. If you
need more context on a particular issue, please ask in a comment.
As contributors and maintainers to this project,
you are expected to abide by KvikIO's code of conduct.
More information can be found at: [Contributor Code of Conduct](https://docs.rapids.ai/resources/conduct/).
## Code contributions
### Requirements
To install users should have a working Linux machine with CUDA Toolkit
installed (v11.4+) and a working compiler toolchain (C++17 and cmake).
#### C++
The C++ bindings are header-only and depends on CUDA Driver and Runtime API.
In order to build and run the example code, CMake is required.
#### Python
The Python packages depends on the following packages:
* Cython
* Pip
* Setuptools
For testing:
* pytest
* cupy
### Build KvikIO from source
#### C++
To build the C++ example, go to the `cpp` subdiretory and run:
```
mkdir build
cd build
cmake ..
make
```
Then run the example:
```
./examples/basic_io
```
#### Python
To build and install the extension, go to the `python` subdiretory and run:
```
python -m pip install .
```
One might have to define `CUDA_HOME` to the path to the CUDA installation.
In order to test the installation, run the following:
```
pytest tests/
```
And to test performance, run the following:
```
python benchmarks/single-node-io.py
```
### Code Formatting
#### Using pre-commit hooks
KvikIO uses [pre-commit](https://pre-commit.com/) to execute all code linters and formatters. These
tools ensure a consistent code format throughout the project. Using pre-commit ensures that linter
versions and options are aligned for all developers. Additionally, there is a CI check in place to
enforce that committed code follows our standards.
To use `pre-commit`, install via `conda` or `pip`:
```bash
conda install -c conda-forge pre-commit
```
```bash
pip install pre-commit
```
Then run pre-commit hooks before committing code:
```bash
pre-commit run
```
By default, pre-commit runs on staged files (only changes and additions that will be committed).
To run pre-commit checks on all files, execute:
```bash
pre-commit run --all-files
```
Optionally, you may set up the pre-commit hooks to run automatically when you make a git commit. This can be done by running:
```bash
pre-commit install
```
Now code linters and formatters will be run each time you commit changes.
You can skip these checks with `git commit --no-verify` or with the short version `git commit -n`.
#### Summary of pre-commit hooks
The following section describes some of the core pre-commit hooks used by the repository.
See `.pre-commit-config.yaml` for a full list.
C++/CUDA is formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
Python code runs several linters including [Black](https://black.readthedocs.io/en/stable/),
[isort](https://pycqa.github.io/isort/), and [flake8](https://flake8.pycqa.org/en/latest/).
[Codespell](https://github.com/codespell-project/codespell) is used to find spelling
mistakes, and this check is run as a pre-commit hook. To apply the suggested spelling fixes,
you can run `codespell -i 3 -w .` from the repository root directory.
This will bring up an interactive prompt to select which spelling fixes to apply.
## Attribution
* Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
* Portions adopted from https://github.com/dask/dask/blob/master/docs/source/develop.rst
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/kvikio/.clang-format
|
---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
#AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
#AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/python/pyproject.toml
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"cmake>=3.26.4",
"cython>=3.0.0",
"ninja",
"scikit-build>=0.13.1",
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "kvikio"
version = "23.12.00"
description = "KvikIO - GPUDirect Storage"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
"cupy-cuda11x>=12.0.0",
"numcodecs <0.12.0",
"numpy>=1.21",
"packaging",
"zarr",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.optional-dependencies]
test = [
"cuda-python>=11.7.1,<12.0a0",
"dask>=2022.05.2",
"pytest",
"pytest-cov",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/kvikio"
[tool.black]
line-length = 88
target-version = ["py39"]
include = '\.py?$'
exclude = '''
/(
thirdparty |
\.eggs |
\.git |
\.hg |
\.mypy_cache |
\.tox |
\.venv |
_build |
buck-out |
build |
dist |
_skbuild
)/
'''
[tool.isort]
line_length = 88
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_first_party = [
"kvikio",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
"thirdparty",
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"_build",
"buck-out",
"build",
"dist",
"__init__.py",
]
[tool.mypy]
ignore_missing_imports = true
[project.entry-points."numcodecs.codecs"]
nvcomp_batch = "kvikio.nvcomp_codec:NvCompBatchCodec"
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/python/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
set(kvikio_version 23.12.00)
include(../cpp/cmake/fetch_rapids.cmake)
include(rapids-cpm)
rapids_cpm_init()
include(rapids-cuda)
rapids_cuda_init_architectures(kvikio-python)
project(
kvikio-python
VERSION ${kvikio_version}
LANGUAGES # TODO: Building Python extension modules via the python_extension_module requires the C
# language to be enabled here. The test project that is built in scikit-build to verify
# various linking options for the python library is hardcoded to build with C, so until
# that is fixed we need to keep C.
C CXX CUDA
)
option(FIND_KVIKIO_CPP
"Search for existing KVIKIO C++ installations before defaulting to local files" OFF
)
# TODO: Should we symlink FindcuFile.cmake into python/cmake? find cuFile
include(../cpp/cmake/Modules/FindcuFile.cmake)
if(FIND_KVIKIO_CPP)
find_package(KvikIO ${kvikio_version})
else()
set(KvikIO_FOUND OFF)
endif()
find_package(CUDAToolkit REQUIRED)
if(NOT KvikIO_FOUND)
add_subdirectory(../cpp kvikio-cpp)
set(cython_lib_dir kvikio)
install(TARGETS kvikio DESTINATION ${cython_lib_dir})
endif()
include(rapids-cython)
rapids_cython_init()
add_subdirectory(cmake)
add_subdirectory(kvikio/_lib)
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/python/setup.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from setuptools import find_packages
from skbuild import setup
setup(
packages=find_packages(exclude=["tests*"]),
package_data={
# Note: A dict comprehension with an explicit copy is necessary (rather
# than something simpler like a dict.fromkeys) because otherwise every
# package will refer to the same list and skbuild modifies it in place.
key: ["*.pyi", "*.pxd"]
for key in find_packages(include=["kvikio._lib"])
},
zip_safe=False,
)
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/python/.coveragerc
|
# Configuration file for Python coverage tests
[run]
source = kvikio
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/cufile.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import pathlib
from typing import Optional, Union
from ._lib import libkvikio # type: ignore
class IOFuture:
"""Future for CuFile IO
This class shouldn't be used directly, instead non-blocking IO operations such
as `CuFile.pread` and `CuFile.pwrite` returns an instance of this class. Use
`.get()` to wait on the completion of the IO operation and retrieve the result.
"""
__slots__ = "_handle"
def __init__(self, handle):
self._handle = handle
def get(self) -> int:
"""Retrieve the result of the IO operation that created this future
This call blocks until the IO operation finishes.
Returns
-------
int
The size of bytes that were read or written successfully.
"""
return self._handle.get()
def done(self) -> bool:
"""Return True if the future is done.
Returns
-------
bool
Whether the future is done or not
"""
return self._handle.done()
class CuFile:
"""File handle for GPUDirect Storage (GDS)"""
def __init__(self, file: Union[pathlib.Path, str], flags: str = "r"):
"""Open and register file for GDS IO operations
CuFile opens the file twice and maintains two file descriptors.
One file is opened with the specified `flags` and the other file is
opened with the `flags` plus the `O_DIRECT` flag.
Parameters
----------
file: pathlib.Path or str
Path-like object giving the pathname (absolute or relative to the current
working directory) of the file to be opened and registered.
flags: str, optional
"r" -> "open for reading (default)"
"w" -> "open for writing, truncating the file first"
"a" -> "open for writing, appending to the end of file if it exists"
"+" -> "open for updating (reading and writing)"
"""
self._handle = libkvikio.CuFile(file, flags)
def close(self) -> None:
"""Deregister the file and close the file"""
self._handle.close()
@property
def closed(self) -> bool:
return self._handle.closed()
def fileno(self) -> int:
"""Get the file descriptor of the open file"""
return self._handle.fileno()
def open_flags(self) -> int:
"""Get the flags of the file descriptor (see open(2))"""
return self._handle.open_flags()
def __enter__(self) -> "CuFile":
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def pread(
self,
buf,
size: Optional[int] = None,
file_offset: int = 0,
task_size: Optional[int] = None,
) -> IOFuture:
"""Reads specified bytes from the file into device or host memory in parallel
`pread` reads the data from a specified file at a specified offset and size
bytes into `buf`. The API works correctly for unaligned offsets and any data
size, although the performance might not match the performance of aligned reads.
See additional details in the notes below.
`pread` is non-blocking and returns a `IOFuture` that can be waited upon. It
partitions the operation into tasks of size `task_size` for execution in the
default thread pool.
Parameters
----------
buf: buffer-like or array-like
Device or host buffer to read into.
size: int, optional
Size in bytes to read.
file_offset: int, optional
Offset in the file to read from.
task_size: int, default=kvikio.defaults.task_size()
Size of each task in bytes.
Returns
-------
IOFuture
Future that on completion returns the size of bytes that were successfully
read.
Notes
-----
KvikIO can only make use of GDS for reads that are aligned to a page boundary.
For unaligned reads, KvikIO has to split the reads into aligned and unaligned
parts. The GPU page size is 4kB, so all reads should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return IOFuture(self._handle.pread(buf, size, file_offset, task_size))
def pwrite(
self,
buf,
size: Optional[int] = None,
file_offset: int = 0,
task_size: Optional[int] = None,
) -> IOFuture:
"""Writes specified bytes from device or host memory into the file in parallel
`pwrite` writes the data from `buf` to the file at a specified offset and size.
The API works correctly for unaligned offset and data sizes, although the
performance is not on-par with aligned writes. See additional details in the
notes below.
`pwrite` is non-blocking and returns a `IOFuture` that can be waited upon. It
partitions the operation into tasks of size `task_size` for execution in the
default thread pool.
Parameters
----------
buf: buffer-like or array-like
Device or host buffer to write to.
size: int, optional
Size in bytes to write.
file_offset: int, optional
Offset in the file to write from.
task_size: int, default=kvikio.defaults.task_size()
Size of each task in bytes.
Returns
-------
IOFuture
Future that on completion returns the size of bytes that were successfully
written.
Notes
-----
KvikIO can only make use of GDS for writes that are aligned to a page boundary.
For unaligned writes, KvikIO has to split the writes into aligned and unaligned
parts. The GPU page size is 4kB, so all writes should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return IOFuture(self._handle.pwrite(buf, size, file_offset, task_size))
def read(
self,
buf,
size: Optional[int] = None,
file_offset: int = 0,
task_size: Optional[int] = None,
) -> int:
"""Reads specified bytes from the file into the device memory in parallel
This is a blocking version of `.pread`.
Parameters
----------
buf: buffer-like or array-like
Device buffer to read into.
size: int, optional
Size in bytes to read.
file_offset: int, optional
Offset in the file to read from.
task_size: int, default=kvikio.defaults.task_size()
Size of each task in bytes.
Returns
-------
int
The size of bytes that were successfully read.
Notes
-----
KvikIO can only make use of GDS for reads that are aligned to a page boundary.
For unaligned reads, KvikIO has to split the reads into aligned and unaligned
parts. The GPU page size is 4kB, so all reads should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return self.pread(buf, size, file_offset, task_size).get()
def write(
self,
buf,
size: Optional[int] = None,
file_offset: int = 0,
task_size: Optional[int] = None,
) -> int:
"""Writes specified bytes from the device memory into the file in parallel
This is a blocking version of `.pwrite`.
Parameters
----------
buf: buffer-like or array-like
Device buffer to write to.
size: int, optional
Size in bytes to write.
file_offset: int, optional
Offset in the file to write from.
task_size: int, default=kvikio.defaults.task_size()
Size of each task in bytes.
Returns
-------
int
The size of bytes that were successfully written.
Notes
-----
KvikIO can only make use of GDS for writes that are aligned to a page boundary.
For unaligned writes, KvikIO has to split the writes into aligned and unaligned
parts. The GPU page size is 4kB, so all writes should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return self.pwrite(buf, size, file_offset, task_size).get()
def raw_read(
self, buf, size: Optional[int] = None, file_offset: int = 0, dev_offset: int = 0
) -> int:
"""Reads specified bytes from the file into the device memory
This is a low-level version of `.read` that doesn't use threads and
does not support host memory.
Parameters
----------
buf: buffer-like or array-like
Device buffer to read into.
size: int, optional
Size in bytes to read.
file_offset: int, optional
Offset in the file to read from.
dev_offset: int, optional
Offset in the `buf` to read from.
Returns
-------
int
The size of bytes that were successfully read.
Notes
-----
KvikIO can only make use of GDS for reads that are aligned to a page boundary.
For unaligned reads, KvikIO has to split the reads into aligned and unaligned
parts. The GPU page size is 4kB, so all reads should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return self._handle.read(buf, size, file_offset, dev_offset)
def raw_write(
self, buf, size: Optional[int] = None, file_offset: int = 0, dev_offset: int = 0
) -> int:
"""Writes specified bytes from the device memory into the file
This is a low-level version of `.write` that doesn't use threads and
does not support host memory.
Parameters
----------
buf: buffer-like or array-like
Device buffer to write to.
size: int, optional
Size in bytes to write.
file_offset: int, optional
Offset in the file to write from.
dev_offset: int, optional
Offset in the `buf` to write from.
Returns
-------
int
The size of bytes that were successfully written.
Notes
-----
KvikIO can only make use of GDS for writes that are aligned to a page boundary.
For unaligned writes, KvikIO has to split the writes into aligned and unaligned
parts. The GPU page size is 4kB, so all writes should be at an offset that is a
multiple of 4096 bytes. If the desired `file_offset` is not a multiple of 4096,
it is likely desirable to round down to the nearest multiple of 4096 and discard
any undesired bytes from the resulting data. Similarly, it is optimal for `size`
to be a multiple of 4096 bytes. When GDS isn't used, this is less critical.
"""
return self._handle.write(buf, size, file_offset, dev_offset)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/nvcomp_codec.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from typing import Any, Mapping, Optional, Sequence
import cupy as cp
import cupy.typing
from numcodecs.compat import ensure_contiguous_ndarray_like
from kvikio._lib.libnvcomp_ll import SUPPORTED_ALGORITHMS
from kvikio.numcodecs import BufferLike, CudaCodec
class NvCompBatchCodec(CudaCodec):
"""Codec that uses batch algorithms from nvCOMP library.
An algorithm is selected using `algorithm` parameter.
If the algorithm takes additional options, they can be
passed to the algorithm using `options` dictionary.
"""
# Header stores original uncompressed size. This is required to enable
# data compatibility between existing numcodecs codecs and NvCompBatchCodec.
HEADER_SIZE_BYTES: int = 4
codec_id: str = "nvcomp_batch"
algorithm: str
options: Mapping[str, Any]
def __init__(
self,
algorithm: str,
options: Optional[Mapping[str, Any]] = None,
stream: Optional[cp.cuda.Stream] = None,
) -> None:
algo_id = algorithm.lower()
algo_t = SUPPORTED_ALGORITHMS.get(algo_id, None)
if algo_t is None:
raise ValueError(
f"{algorithm} is not supported. "
f"Must be one of: {list(SUPPORTED_ALGORITHMS.keys())}"
)
self.algorithm = algo_id
self.options = dict(options) if options is not None else {}
# Create an algorithm.
self._algo = algo_t(**self.options)
# Use default stream, if needed.
self._stream = stream if stream is not None else cp.cuda.Stream.ptds
def encode(self, buf: BufferLike) -> cupy.typing.NDArray:
return self.encode_batch([buf])[0]
def encode_batch(self, bufs: Sequence[Any]) -> Sequence[Any]:
"""Encode data in `bufs` using nvCOMP.
Parameters
----------
bufs :
Data to be encoded. Each buffer in the list may be any object
supporting the new-style buffer protocol.
Returns
-------
List of encoded buffers. Each buffer may be any object supporting
the new-style buffer protocol.
"""
num_chunks = len(bufs)
if num_chunks == 0:
return []
bufs = [cp.asarray(ensure_contiguous_ndarray_like(b)) for b in bufs]
buf_sizes = [b.size * b.itemsize for b in bufs]
max_chunk_size = max(buf_sizes)
# Get temp and output buffer sizes.
temp_size = self._algo.get_compress_temp_size(num_chunks, max_chunk_size)
comp_chunk_size = self._algo.get_compress_chunk_size(max_chunk_size)
# Prepare data and size buffers.
# uncomp_chunks is used as a container that stores pointers to actual chunks.
# nvCOMP requires this and sizes buffers to be in GPU memory.
uncomp_chunks = cp.array([b.data.ptr for b in bufs], dtype=cp.uintp)
uncomp_chunk_sizes = cp.array(buf_sizes, dtype=cp.uint64)
temp_buf = cp.empty(temp_size, dtype=cp.uint8)
comp_chunks = cp.empty((num_chunks, comp_chunk_size), dtype=cp.uint8)
# Array of pointers to each compressed chunk.
comp_chunk_ptrs = cp.array([c.data.ptr for c in comp_chunks], dtype=cp.uintp)
# Resulting compressed chunk sizes.
comp_chunk_sizes = cp.empty(num_chunks, dtype=cp.uint64)
self._algo.compress(
uncomp_chunks,
uncomp_chunk_sizes,
max_chunk_size,
num_chunks,
temp_buf,
comp_chunk_ptrs,
comp_chunk_sizes,
self._stream,
)
res = []
# Copy to host to subsequently avoid many smaller D2H copies.
comp_chunks = cp.asnumpy(comp_chunks, self._stream)
comp_chunk_sizes = cp.asnumpy(comp_chunk_sizes, self._stream)
self._stream.synchronize()
for i in range(num_chunks):
res.append(comp_chunks[i, : comp_chunk_sizes[i]].tobytes())
return res
def decode(self, buf: BufferLike, out: Optional[BufferLike] = None) -> BufferLike:
return self.decode_batch([buf], [out])[0]
def decode_batch(
self, bufs: Sequence[Any], out: Optional[Sequence[Any]] = None
) -> Sequence[Any]:
"""Decode data in `bufs` using nvCOMP.
Parameters
----------
bufs :
Encoded data. Each buffer in the list may be any object
supporting the new-style buffer protocol.
out :
List of writeable buffers to store decoded data.
N.B. if provided, each buffer must be exactly the right size
to store the decoded data.
Returns
-------
List of decoded buffers. Each buffer may be any object supporting
the new-style buffer protocol.
"""
num_chunks = len(bufs)
if num_chunks == 0:
return []
# TODO(akamenev): check only first buffer, assuming they are all
# of the same kind.
is_host_buffer = not hasattr(bufs[0], "__cuda_array_interface__")
if is_host_buffer:
bufs = [cp.asarray(ensure_contiguous_ndarray_like(b)) for b in bufs]
# Prepare compressed chunks buffers.
comp_chunks = cp.array([b.data.ptr for b in bufs], dtype=cp.uintp)
comp_chunk_sizes = cp.array([b.size for b in bufs], dtype=cp.uint64)
# Get uncompressed chunk sizes.
uncomp_chunk_sizes = self._algo.get_decompress_size(
comp_chunks,
comp_chunk_sizes,
self._stream,
)
# Check whether the uncompressed chunks are all the same size.
# cupy.unique returns sorted sizes.
sorted_chunk_sizes = cp.unique(uncomp_chunk_sizes)
max_chunk_size = sorted_chunk_sizes[-1].item()
is_equal_chunks = sorted_chunk_sizes.shape[0] == 1
# Get temp buffer size.
temp_size = self._algo.get_decompress_temp_size(num_chunks, max_chunk_size)
temp_buf = cp.empty(temp_size, dtype=cp.uint8)
# Prepare uncompressed chunks buffers.
# First, allocate chunks of max_chunk_size and then
# copy the pointers to a pointer array in GPU memory as required by nvCOMP.
# For performance reasons, we use max_chunk_size so we can create
# a rectangular array with the same pointer increments.
uncomp_chunks = cp.empty((num_chunks, max_chunk_size), dtype=cp.uint8)
p_start = uncomp_chunks.data.ptr
uncomp_chunk_ptrs = cp.uint64(p_start) + (
cp.arange(0, num_chunks * max_chunk_size, max_chunk_size, dtype=cp.uint64)
)
# TODO(akamenev): currently we provide the following 2 buffers to decompress()
# but do not check/use them afterwards since some of the algos
# (e.g. LZ4 and Gdeflate) do not require it and run faster
# without those arguments passed, while other algos (e.g. zstd) require
# these buffers to be valid.
actual_uncomp_chunk_sizes = cp.empty(num_chunks, dtype=cp.uint64)
statuses = cp.empty(num_chunks, dtype=cp.int32)
self._algo.decompress(
comp_chunks,
comp_chunk_sizes,
num_chunks,
temp_buf,
uncomp_chunk_ptrs,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
self._stream,
)
# If all chunks are the same size, we can just return uncomp_chunks.
if is_equal_chunks and out is None:
return cp.asnumpy(uncomp_chunks) if is_host_buffer else uncomp_chunks
res = []
uncomp_chunk_sizes = uncomp_chunk_sizes.get()
for i in range(num_chunks):
ret = uncomp_chunks[i, : uncomp_chunk_sizes[i]]
if out is None or out[i] is None:
res.append(cp.asnumpy(ret) if is_host_buffer else ret)
else:
o = ensure_contiguous_ndarray_like(out[i])
if hasattr(o, "__cuda_array_interface__"):
cp.copyto(o, ret.view(dtype=o.dtype), casting="no")
else:
cp.asnumpy(ret.view(dtype=o.dtype), out=o, stream=self._stream)
res.append(o)
self._stream.synchronize()
return res
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"(algorithm={self.algorithm!r}, options={self.options!r})"
)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/nvcomp.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from enum import Enum
import cupy as cp
import numpy as np
import kvikio._lib.libnvcomp as _lib
from kvikio._lib.arr import asarray
_dtype_map = {
cp.dtype("int8"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_CHAR,
cp.dtype("uint8"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_UCHAR,
cp.dtype("int16"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_SHORT,
cp.dtype("uint16"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_USHORT,
cp.dtype("int32"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_INT,
cp.dtype("uint32"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_UINT,
cp.dtype("int64"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_LONGLONG,
cp.dtype("uint64"): _lib.pyNvcompType_t.pyNVCOMP_TYPE_ULONGLONG,
}
def cp_to_nvcomp_dtype(in_type: cp.dtype) -> Enum:
"""Convert np/cp dtypes to nvcomp integral dtypes.
Parameters
----------
in_type
A type argument that can be used to initialize a cupy/numpy dtype.
Returns
-------
int
The value of the NVCOMP_TYPE for supported dtype.
"""
cp_type = cp.dtype(in_type)
return _dtype_map[cp_type]
class nvCompManager:
"""Base class for nvComp Compression Managers.
Compression managers compress uncompressed data and decompress the result.
Child types of nvCompManager implement only their constructor, as they each
take different options to build. The rest of their implementation is
in nvCompManager.
nvCompManager also keeps all of the options for its child types.
"""
_manager: _lib._nvcompManager = None
config: dict = {}
decompression_config: dict = {}
# This is a python option: What type was the data when it was passed in?
# This is used only for returning a decompressed view of the original
# datatype. Untested so far.
input_type = cp.int8
# Default options exist for every option type for every class that inherits
# from nvCompManager, which takes advantage of the below property-setting
# code.
stream: cp.cuda.Stream = cp.cuda.Stream()
chunk_size: int = 1 << 16
data_type: _lib.pyNvcompType_t = _lib.pyNvcompType_t.pyNVCOMP_TYPE_UCHAR
# Some classes have this defined as type, some as data_type.
type: _lib.pyNvcompType_t = _lib.pyNvcompType_t.pyNVCOMP_TYPE_UCHAR
device_id: int = 0
# Bitcomp Defaults
bitcomp_algo: int = 0
# Gdeflate defaults
algo: int = 0
def __init__(self, kwargs):
"""Stores the results of all input arguments as class members.
This code does type correction, fixing inputs to have an expected
shape before calling one of the nvCompManager methods on a child
class.
Special case: Convert data_type to a _lib.pyNvcompType_t
"""
# Special case: Throw error if stream or device_id are specified
if kwargs.get("stream") is not None:
raise NotImplementedError(
"stream argument not yet supported: " "Use the default argument"
)
# data_type will be passed in as a python object. Convert it to
# a C++ nvcompType_t here.
if kwargs.get("data_type"):
if not isinstance(kwargs["data_type"], _lib.pyNvcompType_t):
kwargs["input_type"] = kwargs.get("data_type")
kwargs["data_type"] = cp_to_nvcomp_dtype(
cp.dtype(kwargs["data_type"]).type
)
# Special case: Convert type to a _lib.pyNvcompType_t
if kwargs.get("type"):
if not isinstance(kwargs["type"], _lib.pyNvcompType_t):
kwargs["input_type"] = kwargs.get("type")
kwargs["type"] = cp_to_nvcomp_dtype(cp.dtype(kwargs["type"]).type)
for k, v in kwargs.items():
setattr(self, k, v)
def compress(self, data: cp.ndarray) -> cp.ndarray:
"""Compress a buffer.
Parameters
----------
data: cp.ndarray
A GPU buffer of data to compress.
Returns
-------
cp.ndarray
A GPU buffer of compressed bytes.
"""
# TODO: An option: check if incoming data size matches the size of the
# last incoming data, and reuse temp and out buffer if so.
data_size = data.size * data.itemsize
self.config = self._manager.configure_compression(data_size)
self.compress_out_buffer = cp.empty(
self.config["max_compressed_buffer_size"], dtype="uint8"
)
size = self._manager.compress(asarray(data), asarray(self.compress_out_buffer))
return self.compress_out_buffer[0:size]
def decompress(self, data: cp.ndarray) -> cp.ndarray:
"""Decompress a GPU buffer.
Parameters
----------
data: cp.ndarray
A GPU buffer of data to decompress.
Returns
-------
cp.ndarray
An array of `self.dtype` produced after decompressing the input argument.
"""
self.decompression_config = (
self._manager.configure_decompression_with_compressed_buffer(asarray(data))
)
decomp_buffer = cp.empty(
self.decompression_config["decomp_data_size"], dtype="uint8"
)
self._manager.decompress(asarray(decomp_buffer), asarray(data))
return decomp_buffer.view(self.input_type)
def configure_compression(self, data_size: int) -> dict:
"""Return the compression configuration object.
Parameters
----------
data_size: int
The size of the buffer that is staged to be compressed.
Returns
-------
dict {
"uncompressed_buffer_size": The size of the input data
"max_compressed_buffer_size": The maximum size of the compressed data. The
size of the buffer that must be allocated before calling compress.
"num_chunks": The number of configured chunks to compress the data over
}
"""
return self._manager.configure_compression(data_size)
def configure_decompression_with_compressed_buffer(
self, data: cp.ndarray
) -> cp.ndarray:
"""Return the decompression configuration object.
Parameters
----------
data: cp.ndarray
A GPU buffer of previously compressed data.
Returns
-------
dict {
"decomp_data_size": The size of each decompression chunk.
"num_chunks": The number of chunks that the decompressed data is returned
in.
}
"""
return self._manager.configure_decompression_with_compressed_buffer(
asarray(data)
)
def get_compressed_output_size(self, comp_buffer: cp.ndarray) -> int:
"""Return the actual size of compression result.
Returns the number of bytes that should be copied out of
`comp_buffer`.
Parameters
----------
comp_buffer: cp.ndarray
A GPU buffer that has been previously compressed.
Returns
-------
int
"""
return self._manager.get_compressed_output_size(asarray(comp_buffer))
class ANSManager(nvCompManager):
def __init__(self, **kwargs):
"""Initialize an ANSManager object.
Used to compress and decompress GPU buffers.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
Defaults to 4096.
device_id: int (optional)
Specify which device_id on the node to use for allocation and compression.
Defaults to 0.
"""
super().__init__(kwargs)
self._manager = _lib._ANSManager(self.chunk_size, self.stream, self.device_id)
class BitcompManager(nvCompManager):
def __init__(self, **kwargs):
"""Create a GPU BitcompCompressor object.
Used to compress and decompress GPU buffers.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
Defaults to 4096.
device_id: int (optional)
Specify which device_id on the node to use
Defaults to 0.
"""
super().__init__(kwargs)
self._manager = _lib._BitcompManager(
self.chunk_size,
self.data_type.value,
self.bitcomp_algo,
self.stream,
self.device_id,
)
class CascadedManager(nvCompManager):
def __init__(self, **kwargs):
"""Initialize a CascadedManager for a specific dtype.
Used to compress and decompress GPU buffers.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
Defaults to 4096 and can't currently be changed.
dtype: cp.dtype (optional)
The dtype of the input buffer to be compressed.
num_RLEs: int (optional)
Number of Run-Length Encoders to use, see [algorithms overview.md](
https://github.com/NVIDIA/nvcomp/blob/main/doc/algorithms_overview.md#run-length-encoding-rle) # noqa: E501
num_deltas: int (optional)
Number of Delta Encoders to use, see [algorithms overview.md](
https://github.com/NVIDIA/nvcomp/blob/main/doc/algorithms_overview.md#delta-encoding) # noqa: E501
use_bp: bool (optional)
Enable Bitpacking, see [algorithms overview.md](
https://github.com/NVIDIA/nvcomp/blob/main/doc/algorithms_overview.md#bitpacking) # noqa: E501
device_id: int (optional)
Specify which device_id on the node to use
Defaults to 0.
"""
super().__init__(kwargs)
default_options = {
"chunk_size": 1 << 12,
"type": np.int32,
"num_RLEs": 2,
"num_deltas": 1,
"use_bp": True,
}
# Replace any options that may have been excluded, they are not optional.
for k, v in default_options.items():
try:
getattr(self, k)
except Exception:
setattr(self, k, v)
self.options = {
"chunk_size": self.chunk_size,
"type": self.type,
"num_RLEs": self.num_RLEs,
"num_deltas": self.num_deltas,
"use_bp": self.use_bp,
}
self._manager = _lib._CascadedManager(
default_options, self.stream, self.device_id
)
class GdeflateManager(nvCompManager):
def __init__(self, **kwargs):
"""Create a GPU GdeflateCompressor object.
Used to compress and decompress GPU buffers.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
algo: int (optional)
Integer in the range [0, 1, 2]. Only algorithm #0 is currently
supported.
stream: cudaStream_t (optional)
Which CUDA stream to perform the operation on. Not currently
supported.
device_id: int (optional)
Specify which device_id on the node to use
Defaults to 0.
"""
super().__init__(kwargs)
self._manager = _lib._GdeflateManager(
self.chunk_size, self.algo, self.stream, self.device_id
)
class LZ4Manager(nvCompManager):
def __init__(self, **kwargs):
"""Create a GPU LZ4Compressor object.
Used to compress and decompress GPU buffers of a specific dtype.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
The size of each chunk of data to decompress indepentently with
LZ4. Must be within the range of [32768, 16777216]. Larger sizes will
result in higher compression, but with decreased parallelism. The
recommended size is 65536.
Defaults to the recommended size.
data_type: pyNVCOMP_TYPE (optional)
The data type returned for decompression.
Defaults to pyNVCOMP_TYPE.UCHAR
stream: cudaStream_t (optional)
Which CUDA stream to perform the operation on. Not currently
supported.
device_id: int (optional)
Specify which device_id on the node to use
Defaults to 0.
"""
super().__init__(kwargs)
self._manager = _lib._LZ4Manager(
self.chunk_size, self.data_type.value, self.stream, self.device_id
)
class SnappyManager(nvCompManager):
def __init__(self, **kwargs):
"""Create a GPU SnappyCompressor object.
Used to compress and decompress GPU buffers.
All parameters are optional and will be set to usable defaults.
Parameters
----------
chunk_size: int (optional)
stream: cudaStream_t (optional)
Which CUDA stream to perform the operation on. Not currently
supported.
device_id: int (optional)
Specify which device_id on the node to use
Defaults to 0.
"""
super().__init__(kwargs)
self._manager = _lib._SnappyManager(
self.chunk_size, self.stream, self.device_id
)
class ManagedDecompressionManager(nvCompManager):
def __init__(self, compressed_buffer):
"""Create a Managed compressor using the
create_manager factory method.
This function is used in order to automatically
identify which compression algorithm was used on
an input buffer.
It returns a ManagedDecompressionManager that can
then be used normally to decompress the unknown
compressed binary data, or compress other data
into the same format.
Parameters
----------
compressed_buffer: cp.ndarray
A buffer of compressed bytes of unknown origin.
"""
super().__init__({})
self._manager = _lib._ManagedManager(asarray(compressed_buffer))
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/zarr.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
import contextlib
import os
import os.path
from abc import abstractmethod
from typing import Any, Literal, Mapping, Optional, Sequence, Union
import cupy
import cupy.typing
import numcodecs
import numpy
import numpy as np
import zarr
import zarr.creation
import zarr.errors
import zarr.storage
import zarr.util
from numcodecs.abc import Codec
from numcodecs.compat import ensure_contiguous_ndarray_like
from numcodecs.registry import register_codec
from packaging.version import parse
import kvikio
import kvikio.nvcomp
import kvikio.nvcomp_codec
import kvikio.zarr
from kvikio.numcodecs import BufferLike, CudaCodec
from kvikio.nvcomp_codec import NvCompBatchCodec
MINIMUM_ZARR_VERSION = "2.15"
# Is this version of zarr supported? We depend on the `Context`
# argument introduced in https://github.com/zarr-developers/zarr-python/pull/1131
# in zarr v2.15.
supported = parse(zarr.__version__) >= parse(MINIMUM_ZARR_VERSION)
class GDSStore(zarr.storage.DirectoryStore):
"""GPUDirect Storage (GDS) class using directories and files.
This class works like `zarr.storage.DirectoryStore` but implements
getitems() in order to support direct reading into device memory.
It uses KvikIO for reads and writes, which in turn will use GDS
when applicable.
Parameters
----------
path : string
Location of directory to use as the root of the storage hierarchy.
normalize_keys : bool, optional
If True, all store keys will be normalized to use lower case characters
(e.g. 'foo' and 'FOO' will be treated as equivalent). This can be
useful to avoid potential discrepancies between case-sensitive and
case-insensitive file system. Default value is False.
dimension_separator : {'.', '/'}, optional
Separator placed between the dimensions of a chunk.
compressor_config_overwrite
If not None, use this `Mapping` to specify what is written to the Zarr metadata
file on disk (`.zarray`). Normally, Zarr writes the configuration[1] given by
the `compressor` argument to the `.zarray` file. Use this argument to overwrite
the normal configuration and use the specified `Mapping` instead.
decompressor_config_overwrite
If not None, use this `Mapping` to specify what compressor configuration[1] is
used for decompressing no matter the configuration found in the Zarr metadata
on disk (the `.zarray` file).
[1] https://github.com/zarr-developers/numcodecs/blob/cb155432/numcodecs/abc.py#L79
Notes
-----
Atomic writes are used, which means that data are first written to a
temporary file, then moved into place when the write is successfully
completed. Files are only held open while they are being read or written and are
closed immediately afterwards, so there is no need to manually close any files.
Safe to write in multiple threads or processes.
"""
# The default output array type used by getitems().
default_meta_array = numpy.empty(())
def __init__(
self,
path,
normalize_keys=False,
dimension_separator=None,
*,
compressor_config_overwrite: Optional[Mapping] = None,
decompressor_config_overwrite: Optional[Mapping] = None,
) -> None:
if not kvikio.zarr.supported:
raise RuntimeError(
f"GDSStore requires Zarr >={kvikio.zarr.MINIMUM_ZARR_VERSION}"
)
super().__init__(
path, normalize_keys=normalize_keys, dimension_separator=dimension_separator
)
self.compressor_config_overwrite = compressor_config_overwrite
self.decompressor_config_overwrite = decompressor_config_overwrite
def __eq__(self, other):
return isinstance(other, GDSStore) and self.path == other.path
def _tofile(self, a, fn):
with kvikio.CuFile(fn, "w") as f:
written = f.write(a)
assert written == a.nbytes
def __getitem__(self, key):
ret = super().__getitem__(key)
if self.decompressor_config_overwrite and key == ".zarray":
meta = self._metadata_class.decode_array_metadata(ret)
if meta["compressor"]:
meta["compressor"] = self.decompressor_config_overwrite
ret = self._metadata_class.encode_array_metadata(meta)
return ret
def __setitem__(self, key, value):
if self.compressor_config_overwrite and key == ".zarray":
meta = self._metadata_class.decode_array_metadata(value)
if meta["compressor"]:
meta["compressor"] = self.compressor_config_overwrite
value = self._metadata_class.encode_array_metadata(meta)
super().__setitem__(key, value)
def getitems(
self,
keys: Sequence[str],
*,
contexts: Mapping[str, Mapping] = {},
) -> Mapping[str, Any]:
"""Retrieve data from multiple keys.
Parameters
----------
keys : Iterable[str]
The keys to retrieve
contexts: Mapping[str, Context]
A mapping of keys to their context. Each context is a mapping of store
specific information. If the "meta_array" key exist, GDSStore use its
values as the output array otherwise GDSStore.default_meta_array is used.
Returns
-------
Mapping
A collection mapping the input keys to their results.
"""
ret = {}
io_results = []
with contextlib.ExitStack() as stack:
for key in keys:
filepath = os.path.join(self.path, key)
if not os.path.isfile(filepath):
continue
try:
meta_array = contexts[key]["meta_array"]
except KeyError:
meta_array = self.default_meta_array
nbytes = os.path.getsize(filepath)
f = stack.enter_context(kvikio.CuFile(filepath, "r"))
ret[key] = numpy.empty_like(meta_array, shape=(nbytes,), dtype="u1")
io_results.append((f.pread(ret[key]), nbytes))
for future, nbytes in io_results:
nbytes_read = future.get()
if nbytes_read != nbytes:
raise RuntimeError(
f"Incomplete read ({nbytes_read}) expected {nbytes}"
)
return ret
class NVCompCompressor(CudaCodec):
"""Abstract base class for nvCOMP compressors
The derived classes must set `codec_id` and implement
`get_nvcomp_manager`
Parameters
----------
device_ordinal
The device that should do the compression/decompression
"""
def __init__(self, device_ordinal: int = 0):
self.device_ordinal = device_ordinal
@abstractmethod
def get_nvcomp_manager(self) -> kvikio.nvcomp.nvCompManager:
"""Abstract method that should return the nvCOMP compressor manager
Returns
-------
nvCompManager
The nvCOMP compressor manager to use
"""
pass # TODO: cache Manager
def encode(self, buf: BufferLike) -> cupy.typing.NDArray:
buf = cupy.asarray(ensure_contiguous_ndarray_like(buf))
return self.get_nvcomp_manager().compress(buf)
def decode(self, buf: BufferLike, out: Optional[BufferLike] = None) -> BufferLike:
buf = ensure_contiguous_ndarray_like(buf)
is_host_buffer = not hasattr(buf, "__cuda_array_interface__")
if is_host_buffer:
buf = cupy.asarray(buf)
ret = self.get_nvcomp_manager().decompress(buf)
if is_host_buffer:
ret = cupy.asnumpy(ret)
if out is not None:
out = ensure_contiguous_ndarray_like(out)
if hasattr(out, "__cuda_array_interface__"):
cupy.copyto(out, ret.view(dtype=out.dtype), casting="no")
else:
np.copyto(out, cupy.asnumpy(ret.view(dtype=out.dtype)), casting="no")
return ret
class ANS(NVCompCompressor):
codec_id = "nvcomp_ANS"
def get_nvcomp_manager(self):
return kvikio.nvcomp.ANSManager(device_id=self.device_ordinal)
class Bitcomp(NVCompCompressor):
codec_id = "nvcomp_Bitcomp"
def get_nvcomp_manager(self):
return kvikio.nvcomp.BitcompManager(device_id=self.device_ordinal)
class Cascaded(NVCompCompressor):
codec_id = "nvcomp_Cascaded"
def get_nvcomp_manager(self):
return kvikio.nvcomp.CascadedManager(device_id=self.device_ordinal)
class Gdeflate(NVCompCompressor):
codec_id = "nvcomp_Gdeflate"
def get_nvcomp_manager(self):
return kvikio.nvcomp.GdeflateManager(device_id=self.device_ordinal)
class LZ4(NVCompCompressor):
codec_id = "nvcomp_LZ4"
def get_nvcomp_manager(self):
return kvikio.nvcomp.LZ4Manager(device_id=self.device_ordinal)
class Snappy(NVCompCompressor):
codec_id = "nvcomp_Snappy"
def get_nvcomp_manager(self):
return kvikio.nvcomp.SnappyManager(device_id=self.device_ordinal)
# Expose a list of available nvCOMP compressors and register them as Zarr condecs
nvcomp_compressors = [ANS, Bitcomp, Cascaded, Gdeflate, LZ4, Snappy]
for c in nvcomp_compressors:
register_codec(c)
class CompatCompressor:
"""A pair of compatible compressors one using the CPU and one using the GPU
Warning
-------
`CompatCompressor` is only supported by KvikIO's `open_cupy_array()` and
cannot be used as a compressor argument in Zarr functions like `open()`
and `open_array()` directly. However, it is possible to use its `.cpu`
like: `open(..., compressor=CompatCompressor.lz4().cpu)`.
Parameters
----------
cpu
The CPU compressor.
gpu
The GPU compressor.
"""
def __init__(self, cpu: Codec, gpu: CudaCodec) -> None:
self.cpu = cpu
self.gpu = gpu
@classmethod
def lz4(cls) -> CompatCompressor:
"""A compatible pair of LZ4 compressors"""
return cls(cpu=numcodecs.LZ4(), gpu=NvCompBatchCodec("lz4"))
def open_cupy_array(
store: Union[os.PathLike, str],
mode: Literal["r", "r+", "a", "w", "w-"] = "a",
compressor: Codec | CompatCompressor = Snappy(device_ordinal=0),
meta_array=cupy.empty(()),
**kwargs,
) -> zarr.Array:
"""Open an Zarr array as a CuPy-like array using file-mode-like semantics.
This function is a CUDA friendly version of `zarr.open_array` that reads
and writes to CuPy arrays. Beside the arguments listed below, the arguments
have the same semantics as in `zarr.open_array`.
Parameters
----------
store
Path to directory in file system. As opposed to `zarr.open_array`,
Store and path to zip files isn't supported.
mode
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist); 'a' means read/write (create if doesn't
exist); 'w' means create (overwrite if exists); 'w-' means create
(fail if exists).
compressor
The compressor used when creating a Zarr file or None if no compressor
is to be used. If a `CompatCompressor` is given, `CompatCompressor.gpu`
is used for compression and decompression; and `CompatCompressor.cpu`
is written as the compressor in the Zarr file metadata on disk.
This argument is ignored in "r" and "r+" mode. By default the
Snappy compressor by nvCOMP is used.
meta_array : array-like, optional
An CuPy-like array instance to use for determining arrays to create and
return to users. It must implement `__cuda_array_interface__`.
**kwargs
The rest of the arguments are forwarded to `zarr.open_array` as-is.
Returns
-------
Zarr array backed by a GDS file store, nvCOMP compression, and CuPy arrays.
"""
if not isinstance(store, (str, os.PathLike)):
raise ValueError("store must be a path")
store = str(os.fspath(store))
if not hasattr(meta_array, "__cuda_array_interface__"):
raise ValueError("meta_array must implement __cuda_array_interface__")
if mode in ("r", "r+", "a"):
# In order to handle "a", we start by trying to open the file in read mode.
try:
ret = zarr.open_array(
store=kvikio.zarr.GDSStore(path=store),
mode="r+",
meta_array=meta_array,
**kwargs,
)
except (zarr.errors.ContainsGroupError, zarr.errors.ArrayNotFoundError):
# If we are reading, this is a genuine error.
if mode in ("r", "r+"):
raise
else:
if ret.compressor is None:
return ret
# If we are reading a LZ4-CPU compressed file, we overwrite the
# metadata on-the-fly to make Zarr use LZ4-GPU for both compression
# and decompression.
compat_lz4 = CompatCompressor.lz4()
if ret.compressor == compat_lz4.cpu:
ret = zarr.open_array(
store=kvikio.zarr.GDSStore(
path=store,
compressor_config_overwrite=compat_lz4.cpu.get_config(),
decompressor_config_overwrite=compat_lz4.gpu.get_config(),
),
mode=mode,
meta_array=meta_array,
**kwargs,
)
elif not isinstance(ret.compressor, CudaCodec):
raise ValueError(
"The Zarr file was written using a non-CUDA compatible "
f"compressor, {ret.compressor}, please use something "
"like kvikio.zarr.CompatCompressor"
)
return ret
# At this point, we known that we are writing a new array
if mode not in ("w", "w-", "a"):
raise ValueError(f"Unknown mode: {mode}")
if isinstance(compressor, CompatCompressor):
compressor_config_overwrite = compressor.cpu.get_config()
decompressor_config_overwrite = compressor.gpu.get_config()
compressor = compressor.gpu
else:
compressor_config_overwrite = None
decompressor_config_overwrite = None
return zarr.open_array(
store=kvikio.zarr.GDSStore(
path=store,
compressor_config_overwrite=compressor_config_overwrite,
decompressor_config_overwrite=decompressor_config_overwrite,
),
mode=mode,
meta_array=meta_array,
compressor=compressor,
**kwargs,
)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/numpy.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import io
import os
import os.path
from typing import Protocol, Union, runtime_checkable
import numpy as np
from numpy.typing import ArrayLike, DTypeLike
import kvikio
@runtime_checkable
class FileLike(Protocol):
"""File like object that represent a OS-level file"""
def fileno(self) -> int:
...
@property
def name(self) -> str:
...
class LikeWrapper:
"""Wrapper for NumPy's `like` argument introduced in NumPy v1.20
Wraps an array-like instance in order to seamlessly utilize KvikIO.
Examples
--------
Read file into a NumPy array:
>>> np.arange(10).tofile("/tmp/myfile")
>>> np.fromfile("/tmp/myfile", dtype=int)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> type(_)
<class 'numpy.ndarray'>
Read file into a CuPy array using the like argument. The file is read
directly into device memory using GDS if available:
>>> import cupy
>>> np.fromfile("/tmp/myfile", dtype=int, like=cupy.empty(()))
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> type(_)
<class 'cupy._core.core.ndarray'>
We can also use CuPy's fromfile function:
>>> cupy.fromfile("/tmp/myfile", dtype=int, like=cupy.empty(()))
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> type(_)
<class 'cupy._core.core.ndarray'>
"""
def __init__(self, like: ArrayLike) -> None:
self._like = like
def __array_function__(self, func, types, args, kwargs):
if func is not np.fromfile:
return func(*args, **kwargs)
return fromfile(*args, like=self._like, **kwargs)
def fromfile(
file: Union[str, os.PathLike, io.FileIO],
dtype: DTypeLike = float,
count: int = -1,
sep: str = "",
offset: int = 0,
*,
like: ArrayLike = None,
) -> ArrayLike:
"""Construct an array from file using KvikIO
Overload `numpy.fromfile` to use KvikIO.
Parameters
----------
file : FileLike or str or PathLike
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
count : int
Number of items to read. `-1` means all items (i.e., the complete file).
sep : str
Empty ("") separator means the file should be treated as binary. Any other
value is not supported and will raise NotImplementedError.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
like : array_like, optional
Reference object to allow the creation of arrays which are not
NumPy arrays.
Examples
--------
Read file into a NumPy array:
>>> np.arange(10).tofile("/tmp/myfile")
>>> fromfile("/tmp/myfile", dtype=int)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> type(_)
<class 'numpy.ndarray'>
Read file into a CuPy array using the like argument. The file is read
directly into device memory using GDS if available:
>>> import cupy
>>> fromfile("/tmp/myfile", dtype=int, like=cupy.empty(()))
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> type(_)
<class 'cupy._core.core.ndarray'>
"""
if sep != "":
raise NotImplementedError(
"Non-default value of the `sep` argument is not supported"
)
if isinstance(file, FileLike):
filepath = file.name
else:
filepath = str(file)
nbytes = os.path.getsize(filepath)
itemsize = np.dtype(dtype).itemsize
count = nbytes if count == -1 else count
# Notice, Numpy truncate to filesize silently.
count = min(count, (nbytes - offset) // itemsize)
ret = np.empty_like(like, shape=(count,), dtype=dtype)
with kvikio.CuFile(filepath, "r") as f:
f.read(ret, file_offset=offset)
return ret
def tofile(
ary: ArrayLike,
file: Union[str, os.PathLike, io.FileIO],
) -> None:
"""Write array to a file using KvikIO.
Overload `numpy.fromfile` to use KvikIO.
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
ary : array_like
Data to write.
file : FileLike or str or PathLike
Open file object or filename.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness.
When file is a file object, array contents are directly written to the
file, bypassing the file object's `write` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support `fileno()` (e.g., BytesIO).
"""
if isinstance(file, FileLike):
filepath = file.name
else:
filepath = str(file)
with kvikio.CuFile(filepath, "w") as f:
f.write(np.ascontiguousarray(ary, like=ary)) # type: ignore
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/__init__.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from ._lib import libkvikio # type: ignore
from .cufile import CuFile # noqa: F401
__version__ = "23.12.00"
def memory_register(buf) -> None:
return libkvikio.memory_register(buf)
def memory_deregister(buf) -> None:
libkvikio.memory_deregister(buf)
# TODO: Wrap nicely, maybe as a dataclass?
DriverProperties = libkvikio.DriverProperties
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/numcodecs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
"""
This module implements CUDA compression and transformation codecs for Numcodecs.
See <https://numcodecs.readthedocs.io/en/stable/>
"""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Optional, Union
import cupy.typing
import numpy.typing
from numcodecs.abc import Codec
# TODO: replace `ANY` with `collections.abc.Buffer` from PEP-688
# when it becomes available.
BufferLike = Union[cupy.typing.NDArray, numpy.typing.ArrayLike, Any]
class CudaCodec(Codec):
"""Abstract base class for CUDA codecs"""
@abstractmethod
def encode(self, buf: BufferLike) -> cupy.typing.NDArray:
"""Encode `buf` using CUDA.
This method should support both device and host buffers.
Parameters
----------
buf
A numpy array like object such as numpy.ndarray, cupy.ndarray,
or any object exporting a buffer interface.
Returns
-------
The compressed buffer wrapped in a CuPy array
"""
@abstractmethod
def decode(self, buf: BufferLike, out: Optional[BufferLike] = None) -> BufferLike:
"""Decode `buf` using CUDA.
This method should support both device and host buffers.
Parameters
----------
buf
A numpy array like object such as numpy.ndarray, cupy.ndarray,
or any object exporting a buffer interface.
out
A numpy array like object such as numpy.ndarray, cupy.ndarray,
or any object exporting a buffer interface. If provided, this buffer must
be exactly the right size to store the decoded data.
Returns
-------
Decoded data, which is either host or device memory based on the type
of `out`. If `out` is None, the type of `buf` determines the return buffer
type.
"""
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/kvikio/defaults.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import contextlib
from ._lib import libkvikio # type: ignore
def compat_mode() -> bool:
"""Check if KvikIO is running in compatibility mode.
Notice, this is not the same as the compatibility mode in cuFile. That is,
cuFile can run in compatibility mode while KvikIO is not.
When KvikIO is running in compatibility mode, it doesn't load `libcufile.so`.
Instead, reads and writes are done using POSIX.
Set the environment variable `KVIKIO_COMPAT_MODE` to enable/disable compatibility
mode. By default, compatibility mode is enabled:
- when `libcufile` cannot be found
- when running in Windows Subsystem for Linux (WSL)
- when `/run/udev` isn't readable, which typically happens when running inside
a docker image not launched with `--volume /run/udev:/run/udev:ro`
Return
------
bool
Whether KvikIO is running in compatibility mode or not.
"""
return libkvikio.compat_mode()
def compat_mode_reset(enable: bool) -> None:
"""Reset the compatibility mode.
Use this function to enable/disable compatibility mode explicitly.
Parameters
----------
enable : bool
Set to True to enable and False to disable compatibility mode
"""
libkvikio.compat_mode_reset(enable)
@contextlib.contextmanager
def set_compat_mode(enable: bool):
"""Context for resetting the compatibility mode.
Parameters
----------
enable : bool
Set to True to enable and False to disable compatibility mode
"""
num_threads_reset(get_num_threads()) # Sync all running threads
old_value = compat_mode()
try:
compat_mode_reset(enable)
yield
finally:
compat_mode_reset(old_value)
def get_num_threads() -> int:
"""Get the number of threads of the thread pool.
Set the default value using `num_threads_reset()` or by setting the
`KVIKIO_NTHREADS` environment variable. If not set, the default value is 1.
Return
------
nthreads: int
The number of threads in the current thread pool.
"""
return libkvikio.thread_pool_nthreads()
def num_threads_reset(nthreads: int) -> None:
"""Reset the number of threads in the default thread pool.
Waits for all currently running tasks to be completed, then destroys all threads
in the pool and creates a new thread pool with the new number of threads. Any
tasks that were waiting in the queue before the pool was reset will then be
executed by the new threads. If the pool was paused before resetting it, the new
pool will be paused as well.
Parameters
----------
nthreads : int
The number of threads to use. The default value can be specified by setting
the `KVIKIO_NTHREADS` environment variable. If not set, the default value
is 1.
"""
libkvikio.thread_pool_nthreads_reset(nthreads)
@contextlib.contextmanager
def set_num_threads(nthreads: int):
"""Context for resetting the number of threads in the default thread pool.
Parameters
----------
nthreads : int
The number of threads to use.
"""
old_value = get_num_threads()
try:
num_threads_reset(nthreads)
yield
finally:
num_threads_reset(old_value)
def task_size() -> int:
"""Get the default task size used for parallel IO operations.
Set the default value using `task_size_reset()` or by setting
the `KVIKIO_TASK_SIZE` environment variable. If not set,
the default value is 4 MiB.
Return
------
nbytes: int
The default task size in bytes.
"""
return libkvikio.task_size()
def task_size_reset(nbytes: int) -> None:
"""Reset the default task size used for parallel IO operations.
Parameters
----------
nbytes : int
The default task size in bytes.
"""
libkvikio.task_size_reset(nbytes)
@contextlib.contextmanager
def set_task_size(nbytes: int):
"""Context for resetting the task size used for parallel IO operations.
Parameters
----------
nbytes : int
The default task size in bytes.
"""
old_value = task_size()
try:
task_size_reset(nbytes)
yield
finally:
task_size_reset(old_value)
def gds_threshold() -> int:
"""Get the default GDS threshold, which is the minimum size to use GDS.
In order to improve performance of small IO, `.pread()` and `.pwrite()`
implements a shortcut that circumvent the threadpool and use the POSIX
backend directly.
Set the default value using `gds_threshold_reset()` or by setting the
`KVIKIO_TASK_SIZE` environment variable. If not set, the default value
is 1 MiB.
Return
------
nbytes : int
The default GDS threshold size in bytes.
"""
return libkvikio.gds_threshold()
def gds_threshold_reset(nbytes: int) -> None:
"""Reset the default GDS threshold, which is the minimum size to use GDS.
Parameters
----------
nbytes : int
The default GDS threshold size in bytes.
"""
libkvikio.gds_threshold_reset(nbytes)
@contextlib.contextmanager
def set_gds_threshold(nbytes: int):
"""Context for resetting the default GDS threshold.
Parameters
----------
nbytes : int
The default GDS threshold size in bytes.
"""
old_value = gds_threshold()
try:
gds_threshold_reset(nbytes)
yield
finally:
gds_threshold_reset(old_value)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/nvcomp_ll_cxx_api.pxd
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# distutils: language = c++
# cython: language_level=3
cdef extern from "cuda_runtime.h":
ctypedef void* cudaStream_t
ctypedef enum cudaMemcpyKind:
cudaMemcpyHostToHost = 0,
cudaMemcpyHostToDevice = 1,
cudaMemcpyDeviceToHost = 2,
cudaMemcpyDeviceToDevice = 3,
cudaMemcpyDefault = 4
cdef extern from "nvcomp.h":
ctypedef enum nvcompType_t:
NVCOMP_TYPE_CHAR = 0, # 1B
NVCOMP_TYPE_UCHAR = 1, # 1B
NVCOMP_TYPE_SHORT = 2, # 2B
NVCOMP_TYPE_USHORT = 3, # 2B
NVCOMP_TYPE_INT = 4, # 4B
NVCOMP_TYPE_UINT = 5, # 4B
NVCOMP_TYPE_LONGLONG = 6, # 8B
NVCOMP_TYPE_ULONGLONG = 7, # 8B
NVCOMP_TYPE_BITS = 0xff # 1b
cdef extern from "nvcomp/shared_types.h":
ctypedef enum nvcompStatus_t:
nvcompSuccess = 0,
nvcompErrorInvalidValue = 10,
nvcompErrorNotSupported = 11,
nvcompErrorCannotDecompress = 12,
nvcompErrorBadChecksum = 13,
nvcompErrorCannotVerifyChecksums = 14,
nvcompErrorCudaError = 1000,
nvcompErrorInternal = 10000,
# nvCOMP Low-Level Interface.
# https://github.com/NVIDIA/nvcomp/blob/main/doc/lowlevel_c_quickstart.md
#
# LZ4 batch compression/decompression API.
#
cdef extern from "nvcomp/lz4.h" nogil:
ctypedef struct nvcompBatchedLZ4Opts_t:
nvcompType_t data_type
# Compression API.
cdef nvcompStatus_t nvcompBatchedLZ4CompressGetTempSize(
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
nvcompBatchedLZ4Opts_t format_opts,
size_t* temp_bytes
)
cdef nvcompStatus_t nvcompBatchedLZ4CompressGetMaxOutputChunkSize(
size_t max_uncompressed_chunk_bytes,
nvcompBatchedLZ4Opts_t format_opts,
size_t* max_compressed_bytes
)
cdef nvcompStatus_t nvcompBatchedLZ4CompressAsync(
const void* const* device_uncompressed_ptrs,
const size_t* device_uncompressed_bytes,
size_t max_uncompressed_chunk_bytes,
size_t batch_size,
void* device_temp_ptr,
size_t temp_bytes,
void* const* device_compressed_ptrs,
size_t* device_compressed_bytes,
nvcompBatchedLZ4Opts_t format_opts,
cudaStream_t stream
)
# Decompression API.
cdef nvcompStatus_t nvcompBatchedLZ4DecompressGetTempSize(
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
size_t* temp_bytes
)
cdef nvcompStatus_t nvcompBatchedLZ4GetDecompressSizeAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
size_t* device_uncompressed_bytes,
size_t batch_size,
cudaStream_t stream
)
nvcompStatus_t nvcompBatchedLZ4DecompressAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
const size_t* device_uncompressed_bytes,
size_t* device_actual_uncompressed_bytes,
size_t batch_size,
void* const device_temp_ptr,
size_t temp_bytes,
void* const* device_uncompressed_ptrs,
nvcompStatus_t* device_statuses,
cudaStream_t stream
)
#
# Gdeflate batch compression/decompression API.
#
cdef extern from "nvcomp/gdeflate.h" nogil:
ctypedef struct nvcompBatchedGdeflateOpts_t:
int algo
# Compression API.
cdef nvcompStatus_t nvcompBatchedGdeflateCompressGetTempSize(
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
nvcompBatchedGdeflateOpts_t format_opts,
size_t* temp_bytes
)
cdef nvcompStatus_t nvcompBatchedGdeflateCompressGetMaxOutputChunkSize(
size_t max_uncompressed_chunk_bytes,
nvcompBatchedGdeflateOpts_t format_opts,
size_t* max_compressed_bytes
)
cdef nvcompStatus_t nvcompBatchedGdeflateCompressAsync(
const void* const* device_uncompressed_ptrs,
const size_t* device_uncompressed_bytes,
size_t max_uncompressed_chunk_bytes,
size_t batch_size,
void* device_temp_ptr,
size_t temp_bytes,
void* const* device_compressed_ptrs,
size_t* device_compressed_bytes,
nvcompBatchedGdeflateOpts_t format_opts,
cudaStream_t stream
)
# Decompression API.
cdef nvcompStatus_t nvcompBatchedGdeflateDecompressGetTempSize(
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
size_t* temp_bytes
)
nvcompStatus_t nvcompBatchedGdeflateGetDecompressSizeAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
size_t* device_uncompressed_bytes,
size_t batch_size,
cudaStream_t stream
)
nvcompStatus_t nvcompBatchedGdeflateDecompressAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
const size_t* device_uncompressed_bytes,
size_t* device_actual_uncompressed_bytes,
size_t batch_size,
void* const device_temp_ptr,
size_t temp_bytes,
void* const* device_uncompressed_ptrs,
nvcompStatus_t* device_statuses,
cudaStream_t stream
)
#
# zstd batch compression/decompression API.
#
cdef extern from "nvcomp/zstd.h" nogil:
ctypedef struct nvcompBatchedZstdOpts_t:
int reserved
# Compression API.
cdef nvcompStatus_t nvcompBatchedZstdCompressGetTempSize(
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
nvcompBatchedZstdOpts_t format_opts,
size_t* temp_bytes
)
cdef nvcompStatus_t nvcompBatchedZstdCompressGetMaxOutputChunkSize(
size_t max_uncompressed_chunk_bytes,
nvcompBatchedZstdOpts_t format_opts,
size_t* max_compressed_bytes
)
cdef nvcompStatus_t nvcompBatchedZstdCompressAsync(
const void* const* device_uncompressed_ptrs,
const size_t* device_uncompressed_bytes,
size_t max_uncompressed_chunk_bytes,
size_t batch_size,
void* device_temp_ptr,
size_t temp_bytes,
void* const* device_compressed_ptrs,
size_t* device_compressed_bytes,
nvcompBatchedZstdOpts_t format_opts,
cudaStream_t stream
)
# Decompression API.
cdef nvcompStatus_t nvcompBatchedZstdDecompressGetTempSize(
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
size_t* temp_bytes
)
nvcompStatus_t nvcompBatchedZstdGetDecompressSizeAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
size_t* device_uncompressed_bytes,
size_t batch_size,
cudaStream_t stream
)
nvcompStatus_t nvcompBatchedZstdDecompressAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
const size_t* device_uncompressed_bytes,
size_t* device_actual_uncompressed_bytes,
size_t batch_size,
void* const device_temp_ptr,
size_t temp_bytes,
void* const* device_uncompressed_ptrs,
nvcompStatus_t* device_statuses,
cudaStream_t stream
)
#
# Snappy batch compression/decompression API.
#
cdef extern from "nvcomp/snappy.h" nogil:
ctypedef struct nvcompBatchedSnappyOpts_t:
int reserved
# Compression API.
cdef nvcompStatus_t nvcompBatchedSnappyCompressGetTempSize(
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
nvcompBatchedSnappyOpts_t format_opts,
size_t* temp_bytes
)
cdef nvcompStatus_t nvcompBatchedSnappyCompressGetMaxOutputChunkSize(
size_t max_uncompressed_chunk_bytes,
nvcompBatchedSnappyOpts_t format_opts,
size_t* max_compressed_bytes
)
cdef nvcompStatus_t nvcompBatchedSnappyCompressAsync(
const void* const* device_uncompressed_ptrs,
const size_t* device_uncompressed_bytes,
size_t max_uncompressed_chunk_bytes,
size_t batch_size,
void* device_temp_ptr,
size_t temp_bytes,
void* const* device_compressed_ptrs,
size_t* device_compressed_bytes,
nvcompBatchedSnappyOpts_t format_opts,
cudaStream_t stream
)
# Decompression API.
cdef nvcompStatus_t nvcompBatchedSnappyDecompressGetTempSize(
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
size_t* temp_bytes
)
nvcompStatus_t nvcompBatchedSnappyGetDecompressSizeAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
size_t* device_uncompressed_bytes,
size_t batch_size,
cudaStream_t stream
)
nvcompStatus_t nvcompBatchedSnappyDecompressAsync(
const void* const* device_compressed_ptrs,
const size_t* device_compressed_bytes,
const size_t* device_uncompressed_bytes,
size_t* device_actual_uncompressed_bytes,
size_t batch_size,
void* const device_temp_ptr,
size_t temp_bytes,
void* const* device_uncompressed_ptrs,
nvcompStatus_t* device_statuses,
cudaStream_t stream
)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_modules arr.pyx libnvcomp.pyx libnvcomp_ll.pyx libkvikio.pyx)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_modules}"
LINKED_LIBRARIES kvikio::kvikio nvcomp::nvcomp
)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/libnvcomp.pyx
|
# Copyright (c) 2022 Carson Swope
# Use, modification, and distribution is subject to the MIT License
# https://github.com/carsonswope/py-nvcomp/blob/main/LICENSE)
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: MIT
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from enum import Enum
from libc.stdint cimport uint8_t, uintptr_t
from libcpp cimport nullptr
from libcpp.memory cimport make_shared, shared_ptr
from libcpp.utility cimport move
from kvikio._lib.arr cimport Array
from kvikio._lib.nvcomp_cxx_api cimport (
ANSManager,
BitcompManager,
CascadedManager,
CompressionConfig,
DecompressionConfig,
GdeflateManager,
LZ4Manager,
SnappyManager,
create_manager,
cudaStream_t,
nvcompBatchedANSDefaultOpts,
nvcompBatchedANSOpts_t,
nvcompBatchedBitcompFormatOpts,
nvcompBatchedCascadedDefaultOpts,
nvcompBatchedCascadedOpts_t,
nvcompBatchedGdeflateOpts_t,
nvcompBatchedLZ4Opts_t,
nvcompBatchedSnappyDefaultOpts,
nvcompBatchedSnappyOpts_t,
nvcompManagerBase,
nvcompType_t,
)
class pyNvcompType_t(Enum):
pyNVCOMP_TYPE_CHAR = nvcompType_t.NVCOMP_TYPE_CHAR
pyNVCOMP_TYPE_UCHAR = nvcompType_t.NVCOMP_TYPE_UCHAR
pyNVCOMP_TYPE_SHORT = nvcompType_t.NVCOMP_TYPE_SHORT
pyNVCOMP_TYPE_USHORT = nvcompType_t.NVCOMP_TYPE_USHORT
pyNVCOMP_TYPE_INT = nvcompType_t.NVCOMP_TYPE_INT
pyNVCOMP_TYPE_UINT = nvcompType_t.NVCOMP_TYPE_UINT
pyNVCOMP_TYPE_LONGLONG = nvcompType_t.NVCOMP_TYPE_LONGLONG
pyNVCOMP_TYPE_ULONGLONG = nvcompType_t.NVCOMP_TYPE_ULONGLONG
pyNVCOMP_TYPE_BITS = nvcompType_t.NVCOMP_TYPE_BITS
cdef class _nvcompManager:
# Temporary storage for factory allocated manager to prevent cleanup
cdef shared_ptr[nvcompManagerBase] _mgr
cdef nvcompManagerBase* _impl
cdef shared_ptr[CompressionConfig] _compression_config
cdef shared_ptr[DecompressionConfig] _decompression_config
def __dealloc__(self):
# `ManagedManager` uses a temporary object, self._mgr
# to retain a reference count to the Manager created by
# create_manager. If it is present, then the `shared_ptr`
# system will free self._impl. Otherwise, we need to free
# self._iNonempl
if self._mgr == nullptr:
del self._impl
def configure_compression(self, decomp_buffer_size):
cdef shared_ptr[CompressionConfig] partial = make_shared[
CompressionConfig](
self._impl.configure_compression(decomp_buffer_size)
)
self._compression_config = make_shared[CompressionConfig](
(move(partial.get()[0]))
)
cdef const CompressionConfig* compression_config_ptr = \
self._compression_config.get()
return {
"uncompressed_buffer_size": compression_config_ptr.
uncompressed_buffer_size,
"max_compressed_buffer_size": compression_config_ptr.
max_compressed_buffer_size,
"num_chunks": compression_config_ptr.num_chunks
}
def compress(self, Array decomp_buffer, Array comp_buffer):
cdef uintptr_t comp_buffer_ptr = comp_buffer.ptr
self._impl.compress(
<const uint8_t*>decomp_buffer.ptr,
<uint8_t*>comp_buffer_ptr,
<CompressionConfig&>self._compression_config.get()[0]
)
size = self._impl.get_compressed_output_size(
<uint8_t*>comp_buffer_ptr
)
return size
def configure_decompression_with_compressed_buffer(
self,
Array comp_buffer
) -> dict:
cdef shared_ptr[DecompressionConfig] partial = make_shared[
DecompressionConfig](self._impl.configure_decompression(
<uint8_t*>comp_buffer.ptr
)
)
self._decompression_config = make_shared[DecompressionConfig](
(move(partial.get()[0]))
)
cdef const DecompressionConfig* decompression_config_ptr = \
self._decompression_config.get()
return {
"decomp_data_size": decompression_config_ptr.decomp_data_size,
"num_chunks": decompression_config_ptr.num_chunks
}
def decompress(
self,
Array decomp_buffer,
Array comp_buffer,
):
self._impl.decompress(
<uint8_t*>decomp_buffer.ptr,
<const uint8_t*>comp_buffer.ptr,
<DecompressionConfig&>self._decompression_config.get()[0]
)
def get_compressed_output_size(self, Array comp_buffer):
return self._impl.get_compressed_output_size(
<uint8_t*>comp_buffer.ptr
)
cdef class _ANSManager(_nvcompManager):
def __cinit__(
self,
size_t uncomp_chunk_size,
user_stream,
const int device_id,
):
self._impl = <nvcompManagerBase*>new ANSManager(
uncomp_chunk_size,
<nvcompBatchedANSOpts_t>nvcompBatchedANSDefaultOpts, # TODO
<cudaStream_t><void*>0, # TODO
device_id
)
cdef class _BitcompManager(_nvcompManager):
def __cinit__(
self,
size_t uncomp_chunk_size,
nvcompType_t data_type,
int bitcomp_algo,
user_stream,
const int device_id
):
cdef opts = nvcompBatchedBitcompFormatOpts(bitcomp_algo, data_type)
self._impl = <nvcompManagerBase*>new BitcompManager(
uncomp_chunk_size,
opts,
<cudaStream_t><void*>0, # TODO
device_id
)
cdef class _CascadedManager(_nvcompManager):
def __cinit__(
self,
_options,
user_stream,
const int device_id,
):
self._impl = <nvcompManagerBase*>new CascadedManager(
_options["chunk_size"],
<nvcompBatchedCascadedOpts_t>nvcompBatchedCascadedDefaultOpts, # TODO
<cudaStream_t><void*>0, # TODO
device_id,
)
cdef class _GdeflateManager(_nvcompManager):
def __cinit__(
self,
int chunk_size,
int algo,
user_stream,
const int device_id
):
cdef opts = nvcompBatchedGdeflateOpts_t(algo)
self._impl = <nvcompManagerBase*>new GdeflateManager(
chunk_size,
opts,
<cudaStream_t><void*>0, # TODO
device_id
)
cdef class _LZ4Manager(_nvcompManager):
def __cinit__(
self,
size_t uncomp_chunk_size,
nvcompType_t data_type,
user_stream,
const int device_id,
):
# TODO: Doesn't work with user specified streams passed down
# from anywhere up. I'm not going to rabbit hole on it until
# everything else works.
# cdef cudaStream_t stream = <cudaStream_t><void*>user_stream
cdef opts = nvcompBatchedLZ4Opts_t(data_type)
self._impl = <nvcompManagerBase*>new LZ4Manager(
uncomp_chunk_size,
opts,
<cudaStream_t><void*>0, # TODO
device_id
)
cdef class _SnappyManager(_nvcompManager):
def __cinit__(
self,
size_t uncomp_chunk_size,
user_stream,
const int device_id,
):
# TODO: Doesn't work with user specified streams passed down
# from anywhere up. I'm not going to rabbit hole on it until
# everything else works.
self._impl = <nvcompManagerBase*>new SnappyManager(
uncomp_chunk_size,
<nvcompBatchedSnappyOpts_t>nvcompBatchedSnappyDefaultOpts,
<cudaStream_t><void*>0, # TODO
device_id
)
cdef class _ManagedManager(_nvcompManager):
def __init__(self, compressed_buffer):
cdef shared_ptr[nvcompManagerBase] _mgr = create_manager(
<uint8_t*><uintptr_t>compressed_buffer.ptr
)
self._mgr = _mgr
self._impl = move(_mgr).get()
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/nvcomp_cxx_api.pxd
|
# Copyright (c) 2022 Carson Swope
# Use, modification, and distribution is subject to the MIT License
# https://github.com/carsonswope/py-nvcomp/blob/main/LICENSE)
# SPDX-FileCopyrightText: Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: MIT
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from libc.stdint cimport uint8_t, uint32_t
from libcpp.memory cimport shared_ptr
from libcpp.vector cimport vector
cdef extern from "cuda_runtime.h":
ctypedef void* cudaStream_t
cdef extern from "nvcomp.h":
ctypedef enum nvcompType_t:
NVCOMP_TYPE_CHAR = 0, # 1B
NVCOMP_TYPE_UCHAR = 1, # 1B
NVCOMP_TYPE_SHORT = 2, # 2B
NVCOMP_TYPE_USHORT = 3, # 2B
NVCOMP_TYPE_INT = 4, # 4B
NVCOMP_TYPE_UINT = 5, # 4B
NVCOMP_TYPE_LONGLONG = 6, # 8B
NVCOMP_TYPE_ULONGLONG = 7, # 8B
NVCOMP_TYPE_BITS = 0xff # 1b
cdef extern from "nvcomp/shared_types.h":
ctypedef enum nvcompStatus_t:
nvcompSuccess = 0,
nvcompErrorInvalidValue = 10,
nvcompErrorNotSupported = 11,
nvcompErrorCannotDecompress = 12,
nvcompErrorBadChecksum = 13,
nvcompErrorCannotVerifyChecksums = 14,
nvcompErrorCudaError = 1000,
nvcompErrorInternal = 10000,
# Manager Factory
cdef extern from "nvcomp/nvcompManagerFactory.hpp" namespace 'nvcomp':
cdef shared_ptr[nvcompManagerBase] create_manager "nvcomp::create_manager"(
const uint8_t* comp_buffer
) except +
# Compression Manager
cdef extern from "nvcomp/nvcompManager.hpp" namespace 'nvcomp':
cdef cppclass PinnedPtrPool[T]:
pass
cdef cppclass CompressionConfig "nvcomp::CompressionConfig":
const size_t uncompressed_buffer_size
const size_t max_compressed_buffer_size
const size_t num_chunks
CompressionConfig(
PinnedPtrPool[nvcompStatus_t]* pool,
size_t uncompressed_buffer_size) except +
nvcompStatus_t* get_status() const
CompressionConfig(CompressionConfig& other)
CompressionConfig& operator=(const CompressionConfig& other) except +
# Commented as Cython doesn't support rvalues, but a user can call
# `move` with the existing operator and generate correct C++ code
# xref: https://github.com/cython/cython/issues/1445
# CompressionConfig& operator=(CompressionConfig&& other) except +
cdef cppclass DecompressionConfig "nvcomp::DecompressionConfig":
size_t decomp_data_size
uint32_t num_chunks
DecompressionConfig(PinnedPtrPool[nvcompStatus_t]& pool) except +
nvcompStatus_t* get_status() const
DecompressionConfig(DecompressionConfig& other)
DecompressionConfig& operator=(const DecompressionConfig& other) except +
# Commented as Cython doesn't support rvalues, but a user can call
# `move` with the existing operator and generate correct C++ code
# xref: https://github.com/cython/cython/issues/1445
# DecompressionConfig& operator=(DecompressionConfig&& other) except +
cdef cppclass nvcompManagerBase "nvcomp::nvcompManagerBase":
CompressionConfig configure_compression(
const size_t decomp_buffer_size)
void compress(
const uint8_t* decomp_buffer,
uint8_t* comp_buffer,
const CompressionConfig& comp_config) except +
DecompressionConfig configure_decompression(
const uint8_t* comp_buffer)
DecompressionConfig configure_decompression(
const CompressionConfig& comp_config)
void decompress(
uint8_t* decomp_buffer,
const uint8_t* comp_buffer,
const DecompressionConfig& decomp_config)
size_t get_compressed_output_size(uint8_t* comp_buffer) except +
cdef cppclass PimplManager "nvcomp::PimplManager":
CompressionConfig configure_compression(
const size_t decomp_buffer_size) except +
void compress(
const uint8_t* decomp_buffer,
uint8_t* comp_buffer,
const CompressionConfig& comp_config) except +
DecompressionConfig configure_decompression(
const uint8_t* comp_buffer)
DecompressionConfig configure_decompression(
const CompressionConfig& comp_config)
void decompress(
uint8_t* decomp_buffer,
const uint8_t* comp_buffer,
const DecompressionConfig& decomp_config) except +
size_t get_compressed_output_size(uint8_t* comp_buffer) except +
# C++ Concrete ANS Manager
cdef extern from "nvcomp/ans.h" nogil:
ctypedef enum nvcompANSType_t:
nvcomp_rANS = 0
ctypedef struct nvcompBatchedANSOpts_t:
nvcompANSType_t type
cdef nvcompBatchedANSOpts_t nvcompBatchedANSDefaultOpts
cdef extern from "nvcomp/ans.hpp":
cdef cppclass ANSManager "nvcomp::ANSManager":
ANSManager(
size_t uncomp_chunk_size,
const nvcompBatchedANSOpts_t& format_opts,
cudaStream_t user_stream,
const int device_id
) except +
# C++ Concrete Bitcomp Manager
cdef extern from "nvcomp/bitcomp.h" nogil:
ctypedef struct nvcompBatchedBitcompFormatOpts:
int algorithm_type
nvcompType_t data_type
cdef nvcompBatchedBitcompFormatOpts nvcompBatchedBitcompDefaultOpts
cdef extern from "nvcomp/bitcomp.hpp":
cdef cppclass BitcompManager "nvcomp::BitcompManager":
BitcompManager(
size_t uncomp_chunk_size,
const nvcompBatchedBitcompFormatOpts& format_opts,
cudaStream_t user_stream,
const int device_id
) except +
# C++ Concrete Cascaded Manager
cdef extern from "nvcomp/cascaded.h" nogil:
ctypedef struct nvcompBatchedCascadedOpts_t:
size_t chunk_size
nvcompType_t type
int num_RLEs
int num_deltas
int use_bp
cdef nvcompBatchedCascadedOpts_t nvcompBatchedCascadedDefaultOpts
cdef extern from "nvcomp/cascaded.hpp" nogil:
cdef cppclass CascadedManager "nvcomp::CascadedManager":
CascadedManager(
size_t uncomp_chunk_size,
const nvcompBatchedCascadedOpts_t& options,
cudaStream_t user_stream,
int device_id
)
# C++ Concrete Gdeflate Manager
cdef extern from "nvcomp/gdeflate.h" nogil:
ctypedef struct nvcompBatchedGdeflateOpts_t:
int algo
cdef nvcompBatchedGdeflateOpts_t nvcompBatchedGdeflateDefaultOpts
cdef extern from "nvcomp/gdeflate.hpp":
cdef cppclass GdeflateManager "nvcomp::GdeflateManager":
GdeflateManager(
int uncomp_chunk_size,
const nvcompBatchedGdeflateOpts_t& format_opts,
cudaStream_t user_stream,
const int device_id
) except +
# C++ Concrete LZ4 Manager
cdef extern from "nvcomp/gdeflate.h" nogil:
ctypedef struct nvcompBatchedLZ4Opts_t:
nvcompType_t data_type
cdef nvcompBatchedLZ4Opts_t nvcompBatchedLZ4DefaultOpts
cdef extern from "nvcomp/lz4.hpp":
cdef cppclass LZ4Manager "nvcomp::LZ4Manager":
LZ4Manager(
size_t uncomp_chunk_size,
const nvcompBatchedLZ4Opts_t& format_opts,
cudaStream_t user_stream,
const int device_id
) except +
# C++ Concrete Snappy Manager
cdef extern from "nvcomp/snappy.h" nogil:
ctypedef struct nvcompBatchedSnappyOpts_t:
int reserved
cdef nvcompBatchedSnappyOpts_t nvcompBatchedSnappyDefaultOpts
cdef extern from "nvcomp/snappy.hpp":
cdef cppclass SnappyManager "nvcomp::SnappyManager":
SnappyManager(
size_t uncomp_chunk_size,
const nvcompBatchedSnappyOpts_t& format_opts,
cudaStream_t user_stream,
const int device_id
) except +
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/arr.pxd
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# distutils: language = c++
# cython: language_level=3
from libc.stdint cimport uintptr_t
cdef class Array:
cdef readonly uintptr_t ptr
cdef readonly bint readonly
cdef readonly object obj
cdef readonly Py_ssize_t itemsize
cdef readonly Py_ssize_t ndim
cdef Py_ssize_t[::1] shape_mv
cdef Py_ssize_t[::1] strides_mv
cdef readonly bint cuda
cpdef bint _c_contiguous(self)
cpdef bint _f_contiguous(self)
cpdef bint _contiguous(self)
cpdef Py_ssize_t _nbytes(self)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/arr.pyi
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from typing import Generic, Tuple, TypeVar
T = TypeVar("T")
class Array(Generic[T]):
def __init__(self, obj: T): ...
@property
def c_contiguous(self) -> bool: ...
@property
def f_contiguous(self) -> bool: ...
@property
def contiguous(self) -> bool: ...
@property
def nbytes(self) -> int: ...
@property
def shape(self) -> Tuple[int]: ...
@property
def strides(self) -> Tuple[int]: ...
@property
def cuda(self) -> bool: ...
@property
def obj(self) -> T: ...
def asarray(obj) -> Array: ...
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/__init__.pxd
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/libkvikio.pyx
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# distutils: language = c++
# cython: language_level=3
import pathlib
from typing import Optional
from libc.stdint cimport uintptr_t
from libcpp.utility cimport move, pair
from . cimport kvikio_cxx_api
from .arr cimport Array
from .kvikio_cxx_api cimport FileHandle, future, is_future_done
cdef class IOFuture:
"""C++ future for CuFile reads and writes"""
cdef future[size_t] _handle
def get(self) -> int:
cdef size_t ret
with nogil:
ret = self._handle.get()
return ret
def done(self) -> bool:
return is_future_done(self._handle)
cdef IOFuture _wrap_io_future(future[size_t] &fut):
"""Wrap a C++ future (of a `size_t`) in a `IOFuture` instance"""
ret = IOFuture()
ret._handle = move(fut)
return ret
def memory_register(buf) -> None:
if not isinstance(buf, Array):
buf = Array(buf)
cdef Array arr = buf
kvikio_cxx_api.memory_register(<void*>arr.ptr)
def memory_deregister(buf) -> None:
if not isinstance(buf, Array):
buf = Array(buf)
cdef Array arr = buf
kvikio_cxx_api.memory_deregister(<void*>arr.ptr)
def compat_mode() -> bool:
return kvikio_cxx_api.compat_mode()
def compat_mode_reset(enable: bool) -> None:
kvikio_cxx_api.compat_mode_reset(enable)
def thread_pool_nthreads() -> int:
return kvikio_cxx_api.thread_pool_nthreads()
def thread_pool_nthreads_reset(nthreads: int) -> None:
kvikio_cxx_api.thread_pool_nthreads_reset(nthreads)
def task_size() -> int:
return kvikio_cxx_api.task_size()
def task_size_reset(nbytes: int) -> None:
kvikio_cxx_api.task_size_reset(nbytes)
def gds_threshold() -> int:
return kvikio_cxx_api.gds_threshold()
def gds_threshold_reset(nbytes: int) -> None:
kvikio_cxx_api.gds_threshold_reset(nbytes)
cdef pair[uintptr_t, size_t] _parse_buffer(buf, size, bint accept_host_buffer) except *:
"""Parse `buf` and `size` argument and return a pointer and nbytes"""
if not isinstance(buf, Array):
buf = Array(buf)
cdef Array arr = buf
if not arr._contiguous():
raise ValueError("Array must be contiguous")
if not accept_host_buffer and not arr.cuda:
raise ValueError("Non-CUDA buffers not supported")
cdef size_t nbytes
if size is None:
nbytes = arr.nbytes
elif size > arr.nbytes:
raise ValueError("Size is greater than the size of the buffer")
else:
nbytes = size
return pair[uintptr_t, size_t](arr.ptr, nbytes)
cdef class CuFile:
""" File handle for GPUDirect Storage (GDS) """
cdef FileHandle _handle
def __init__(self, file_path, flags="r"):
self._handle = move(
FileHandle(
str.encode(str(pathlib.Path(file_path))),
str.encode(str(flags))
)
)
def close(self) -> None:
self._handle.close()
def closed(self) -> bool:
return self._handle.closed()
def fileno(self) -> int:
return self._handle.fd()
def open_flags(self) -> int:
return self._handle.fd_open_flags()
def pread(self, buf, size: Optional[int], file_offset: int, task_size) -> IOFuture:
cdef pair[uintptr_t, size_t] info = _parse_buffer(buf, size, True)
return _wrap_io_future(
self._handle.pread(
<void*>info.first,
info.second,
file_offset,
task_size if task_size else kvikio_cxx_api.task_size()
)
)
def pwrite(self, buf, size: Optional[int], file_offset: int, task_size) -> IOFuture:
cdef pair[uintptr_t, size_t] info = _parse_buffer(buf, size, True)
return _wrap_io_future(
self._handle.pwrite(
<void*>info.first,
info.second,
file_offset,
task_size if task_size else kvikio_cxx_api.task_size()
)
)
def read(self, buf, size: Optional[int], file_offset: int, dev_offset: int) -> int:
cdef pair[uintptr_t, size_t] info = _parse_buffer(buf, size, False)
return self._handle.read(
<void*>info.first,
info.second,
file_offset,
dev_offset,
)
def write(self, buf, size: Optional[int], file_offset: int, dev_offset: int) -> int:
cdef pair[uintptr_t, size_t] info = _parse_buffer(buf, size, False)
return self._handle.write(
<void*>info.first,
info.second,
file_offset,
dev_offset,
)
cdef class DriverProperties:
cdef kvikio_cxx_api.DriverProperties _handle
@property
def is_gds_available(self) -> bool:
try:
return self._handle.is_gds_available()
except RuntimeError:
return False
@property
def major_version(self) -> bool:
return self._handle.get_nvfs_major_version()
@property
def minor_version(self) -> bool:
return self._handle.get_nvfs_minor_version()
@property
def allow_compat_mode(self) -> bool:
return self._handle.get_nvfs_allow_compat_mode()
@property
def poll_mode(self) -> bool:
return self._handle.get_nvfs_poll_mode()
@poll_mode.setter
def poll_mode(self, enable: bool) -> None:
self._handle.set_nvfs_poll_mode(enable)
@property
def poll_thresh_size(self) -> int:
return self._handle.get_nvfs_poll_thresh_size()
@poll_thresh_size.setter
def poll_thresh_size(self, size_in_kb: int) -> None:
self._handle.set_nvfs_poll_thresh_size(size_in_kb)
@property
def max_device_cache_size(self) -> int:
return self._handle.get_max_device_cache_size()
@max_device_cache_size.setter
def max_device_cache_size(self, size_in_kb: int) -> None:
self._handle.set_max_device_cache_size(size_in_kb)
@property
def per_buffer_cache_size(self) -> int:
return self._handle.get_per_buffer_cache_size()
@property
def max_pinned_memory_size(self) -> int:
return self._handle.get_max_pinned_memory_size()
@max_pinned_memory_size.setter
def max_pinned_memory_size(self, size_in_kb: int) -> None:
self._handle.set_max_pinned_memory_size(size_in_kb)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/kvikio_cxx_api.pxd
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# distutils: language = c++
# cython: language_level=3
from posix cimport fcntl
from libcpp cimport bool
from libcpp.string cimport string
from libcpp.utility cimport pair
from libcpp.vector cimport vector
cdef extern from "<future>" namespace "std" nogil:
cdef cppclass future[T]:
future() except +
T get() except +
cdef extern from "<kvikio/utils.hpp>" namespace "kvikio" nogil:
bool is_future_done[T](const T& future) except +
cdef extern from "<kvikio/driver.hpp>" namespace "kvikio" nogil:
cdef cppclass DriverProperties:
DriverProperties() except +
bool is_gds_available() except +
unsigned int get_nvfs_major_version() except +
unsigned int get_nvfs_minor_version() except +
bool get_nvfs_allow_compat_mode() except +
bool get_nvfs_poll_mode() except +
size_t get_nvfs_poll_thresh_size() except +
void set_nvfs_poll_mode(bool enable) except +
void set_nvfs_poll_thresh_size(size_t size_in_kb) except +
size_t get_max_device_cache_size() except +
void set_max_device_cache_size(size_t size_in_kb) except +
size_t get_per_buffer_cache_size() except +
size_t get_max_pinned_memory_size() except +
void set_max_pinned_memory_size(size_t size_in_kb) except +
cdef extern from "<kvikio/buffer.hpp>" namespace "kvikio" nogil:
void memory_register(const void* devPtr) except +
void memory_deregister(const void* devPtr) except +
cdef extern from "<kvikio/defaults.hpp>" namespace "kvikio::defaults" nogil:
bool compat_mode() except +
void compat_mode_reset(bool enable) except +
unsigned int thread_pool_nthreads() except +
void thread_pool_nthreads_reset(unsigned int nthreads) except +
size_t task_size() except +
void task_size_reset(size_t nbytes) except +
size_t gds_threshold() except +
void gds_threshold_reset(size_t nbytes) except +
cdef extern from "<kvikio/file_handle.hpp>" namespace "kvikio" nogil:
cdef cppclass FileHandle:
FileHandle() except +
FileHandle(int fd) except +
FileHandle(
string file_path,
string flags,
) except +
FileHandle(
string file_path,
string flags,
fcntl.mode_t mode
) except +
void close()
bool closed()
int fd()
int fd_open_flags() except +
future[size_t] pread(
void* devPtr,
size_t size,
size_t file_offset,
size_t task_size
) except +
future[size_t] pwrite(
void* devPtr,
size_t size,
size_t file_offset,
size_t task_size
) except +
size_t read(
void* devPtr_base,
size_t size,
size_t file_offset,
size_t devPtr_offset
) except +
size_t write(
void* devPtr_base,
size_t size,
size_t file_offset,
size_t devPtr_offset
) except +
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/arr.pyx
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from cpython.array cimport array, newarrayobject
from cpython.buffer cimport PyBuffer_IsContiguous
from cpython.memoryview cimport PyMemoryView_FromObject, PyMemoryView_GET_BUFFER
from cpython.object cimport PyObject
from cpython.ref cimport Py_INCREF
from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM
from cython cimport auto_pickle, boundscheck, initializedcheck, nonecheck, wraparound
from libc.stdint cimport uintptr_t
from libc.string cimport memcpy
try:
from numpy import dtype as numpy_dtype
except ImportError:
numpy_dtype = None
cdef dict itemsize_mapping = {
intern("|b1"): 1,
intern("|i1"): 1,
intern("|u1"): 1,
intern("<i2"): 2,
intern(">i2"): 2,
intern("<u2"): 2,
intern(">u2"): 2,
intern("<i4"): 4,
intern(">i4"): 4,
intern("<u4"): 4,
intern(">u4"): 4,
intern("<i8"): 8,
intern(">i8"): 8,
intern("<u8"): 8,
intern(">u8"): 8,
intern("<f2"): 2,
intern(">f2"): 2,
intern("<f4"): 4,
intern(">f4"): 4,
intern("<f8"): 8,
intern(">f8"): 8,
intern("<f16"): 16,
intern(">f16"): 16,
intern("<c8"): 8,
intern(">c8"): 8,
intern("<c16"): 16,
intern(">c16"): 16,
intern("<c32"): 32,
intern(">c32"): 32,
}
cdef array array_Py_ssize_t = array("q")
cdef inline Py_ssize_t[::1] new_Py_ssize_t_array(Py_ssize_t n):
return newarrayobject(
(<PyObject*>array_Py_ssize_t).ob_type, n, array_Py_ssize_t.ob_descr
)
@auto_pickle(False)
cdef class Array:
""" An efficient wrapper for host and device array-like objects
Parameters
----------
obj: Object exposing the buffer protocol or __cuda_array_interface__
A host and device array-like object
"""
def __cinit__(self, obj):
cdef dict iface = getattr(obj, "__cuda_array_interface__", None)
self.cuda = (iface is not None)
cdef const Py_buffer* pybuf
cdef str typestr
cdef tuple data, shape, strides
cdef Py_ssize_t i
if self.cuda:
if iface.get("mask") is not None:
raise NotImplementedError("mask attribute not supported")
self.obj = obj
data = iface["data"]
self.ptr, self.readonly = data
typestr = iface["typestr"]
if typestr is None:
raise ValueError("Expected `str`, but got `None`")
elif typestr == "":
raise ValueError("Got unexpected empty `str`")
else:
try:
self.itemsize = itemsize_mapping[typestr]
except KeyError:
if numpy_dtype is not None:
self.itemsize = numpy_dtype(typestr).itemsize
else:
raise ValueError(
f"Unexpected data type, '{typestr}'."
" Please install NumPy to handle this format."
)
shape = iface["shape"]
strides = iface.get("strides")
self.ndim = len(shape)
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.shape_mv[i] = shape[i]
if strides is not None:
if len(strides) != self.ndim:
raise ValueError(
"The length of shape and strides must be equal"
)
self.strides_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.strides_mv[i] = strides[i]
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
else:
mv = PyMemoryView_FromObject(obj)
pybuf = PyMemoryView_GET_BUFFER(mv)
if pybuf.suboffsets != NULL:
raise NotImplementedError("Suboffsets are not supported")
self.ptr = <uintptr_t>pybuf.buf
self.obj = pybuf.obj
self.readonly = <bint>pybuf.readonly
self.ndim = <Py_ssize_t>pybuf.ndim
self.itemsize = <Py_ssize_t>pybuf.itemsize
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.shape_mv[0],
pybuf.shape,
self.ndim * sizeof(Py_ssize_t)
)
if not PyBuffer_IsContiguous(pybuf, b"C"):
self.strides_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.strides_mv[0],
pybuf.strides,
self.ndim * sizeof(Py_ssize_t)
)
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
cpdef bint _c_contiguous(self):
return _c_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def c_contiguous(self):
return self._c_contiguous()
cpdef bint _f_contiguous(self):
return _f_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def f_contiguous(self):
return self._f_contiguous()
cpdef bint _contiguous(self):
return _contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def contiguous(self):
return self._contiguous()
cpdef Py_ssize_t _nbytes(self):
return _nbytes(self.itemsize, self.ndim, self.shape_mv)
@property
def nbytes(self):
return self._nbytes()
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def shape(self):
cdef tuple shape = PyTuple_New(self.ndim)
cdef Py_ssize_t i
cdef object o
for i in range(self.ndim):
o = self.shape_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(shape, i, o)
return shape
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def strides(self):
cdef tuple strides = PyTuple_New(self.ndim)
cdef Py_ssize_t i, s
cdef object o
if self.strides_mv is not None:
for i from self.ndim > i >= 0 by 1:
o = self.strides_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
else:
s = self.itemsize
for i from self.ndim > i >= 0 by 1:
o = s
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
s *= self.shape_mv[i]
return strides
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _c_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from ndim > i >= 0 by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
return True
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _f_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from 0 <= i < ndim by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
elif ndim > 1:
return False
return True
cdef inline bint _contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef bint r = _c_contiguous(itemsize, ndim, shape_mv, strides_mv)
if not r:
r = _f_contiguous(itemsize, ndim, shape_mv, strides_mv)
return r
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline Py_ssize_t _nbytes(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv) nogil:
cdef Py_ssize_t i, nbytes = itemsize
for i in range(ndim):
nbytes *= shape_mv[i]
return nbytes
cpdef asarray(obj):
if isinstance(obj, Array):
return obj
else:
return Array(obj)
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/__init__.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
| 0 |
rapidsai_public_repos/kvikio/python/kvikio
|
rapidsai_public_repos/kvikio/python/kvikio/_lib/libnvcomp_ll.pyx
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from __future__ import annotations
from abc import ABC, abstractmethod
from enum import IntEnum
from libc.stdint cimport uint32_t, uintptr_t
from kvikio._lib.nvcomp_ll_cxx_api cimport cudaStream_t, nvcompStatus_t, nvcompType_t
import cupy
class nvCompStatus(IntEnum):
Success = nvcompStatus_t.nvcompSuccess,
ErrorInvalidValue = nvcompStatus_t.nvcompErrorInvalidValue,
ErrorNotSupported = nvcompStatus_t.nvcompErrorNotSupported,
ErrorCannotDecompress = nvcompStatus_t.nvcompErrorCannotDecompress,
ErrorBadChecksum = nvcompStatus_t.nvcompErrorBadChecksum,
ErrorCannotVerifyChecksums = nvcompStatus_t.nvcompErrorCannotVerifyChecksums,
ErrorCudaError = nvcompStatus_t.nvcompErrorCudaError,
ErrorInternal = nvcompStatus_t.nvcompErrorInternal,
class nvCompType(IntEnum):
CHAR = nvcompType_t.NVCOMP_TYPE_CHAR
UCHAR = nvcompType_t.NVCOMP_TYPE_UCHAR
SHORT = nvcompType_t.NVCOMP_TYPE_SHORT
USHORT = nvcompType_t.NVCOMP_TYPE_USHORT
INT = nvcompType_t.NVCOMP_TYPE_INT
UINT = nvcompType_t.NVCOMP_TYPE_UINT
LONGLONG = nvcompType_t.NVCOMP_TYPE_LONGLONG
ULONGLONG = nvcompType_t.NVCOMP_TYPE_ULONGLONG
BITS = nvcompType_t.NVCOMP_TYPE_BITS
class nvCompBatchAlgorithm(ABC):
"""Abstract class that provides interface to nvCOMP batched algorithms."""
# TODO(akamenev): it might be possible to have a simpler implementation that
# eilminates the need to have a separate implementation class for each algorithm,
# potentially using fused types in Cython (similar to C++ templates),
# but I could not figure out how to do that (e.g. each algorithm API set has
# a different type for the options and so on).
def get_compress_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
):
"""Get temporary space required for compression.
Parameters
----------
batch_size: int
The number of items in the batch.
max_uncompressed_chunk_bytes: int
The maximum size in bytes of a chunk in the batch.
Returns
-------
int
The size in bytes of the required GPU workspace for compression.
"""
err, temp_size = self._get_comp_temp_size(
batch_size,
max_uncompressed_chunk_bytes
)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(
f"Could not get compress temp buffer size, "
f"error: {nvCompStatus(err)!r}."
)
return temp_size
@abstractmethod
def _get_comp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
) -> tuple[nvcompStatus_t, size_t]:
"""Algorithm-specific implementation."""
...
def get_compress_chunk_size(self, size_t max_uncompressed_chunk_bytes):
"""Get the maximum size any chunk could compress to in the batch.
Parameters
----------
max_uncompressed_chunk_bytes: int
The maximum size in bytes of a chunk in the batch.
Returns
-------
int
The maximum compressed size in bytes of the largest chunk. That is,
the minimum amount of output memory required to be given to
the corresponding *CompressAsync function.
"""
err, comp_chunk_size = self._get_comp_chunk_size(max_uncompressed_chunk_bytes)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(
f"Could not get output buffer size, "
f"error: {nvCompStatus(err)!r}."
)
return comp_chunk_size
@abstractmethod
def _get_comp_chunk_size(self, size_t max_uncompressed_chunk_bytes):
"""Algorithm-specific implementation."""
...
def compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream,
):
"""Perform compression.
Parameters
----------
uncomp_chunks: cp.ndarray[uintp]
The pointers on the GPU, to uncompressed batched items.
uncomp_chunk_sizes: cp.ndarray[uint64]
The size in bytes of each uncompressed batch item on the GPU.
max_uncomp_chunk_bytes: int
The maximum size in bytes of the largest chunk in the batch.
batch_size: int
The number of chunks to compress.
temp_buf: cp.ndarray
The temporary GPU workspace.
comp_chunks: cp.ndarray[uintp]
(output) The list of pointers on the GPU, to the output location for each
compressed batch item.
comp_chunk_sizes: cp.ndarray[uint64]
(output) The compressed size in bytes of each chunk.
stream: cp.cuda.Stream
CUDA stream.
"""
err = self._compress(
uncomp_chunks,
uncomp_chunk_sizes,
max_uncomp_chunk_bytes,
batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream,
)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(f"Compression failed, error: {nvCompStatus(err)!r}.")
@abstractmethod
def _compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream
):
"""Algorithm-specific implementation."""
...
def get_decompress_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
):
"""Get the amount of temp space required on the GPU for decompression.
Parameters
----------
batch_size: int
The number of items in the batch.
max_uncompressed_chunk_bytes: int
The size in bytes of the largest chunk when uncompressed.
Returns
-------
int
The amount of temporary GPU space in bytes that will be
required to decompress.
"""
err, temp_size = self._get_decomp_temp_size(
batch_size,
max_uncompressed_chunk_bytes
)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(
f"Could not get decompress temp buffer size, "
f"error: {nvCompStatus(err)!r}."
)
return temp_size
@abstractmethod
def _get_decomp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
):
"""Algorithm-specific implementation."""
...
def get_decompress_size(
self,
comp_chunks,
comp_chunk_sizes,
stream,
):
"""Get the amount of space required on the GPU for decompression.
Parameters
----------
comp_chunks: cp.ndarray[uintp]
The pointers on the GPU, to compressed batched items.
comp_chunk_sizes: cp.ndarray[uint64]
The size in bytes of each compressed batch item.
stream: cp.cuda.Stream
CUDA stream.
Returns
-------
cp.ndarray[uint64]
The amount of GPU space in bytes that will be required
to decompress each chunk.
"""
assert len(comp_chunks) == len(comp_chunk_sizes)
batch_size = len(comp_chunks)
# nvCOMP requires all buffers to be in GPU memory.
uncomp_chunk_sizes = cupy.empty_like(comp_chunk_sizes)
err = self._get_decomp_size(
comp_chunks,
comp_chunk_sizes,
batch_size,
uncomp_chunk_sizes,
stream,
)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(
f"Could not get decompress buffer size, error: {nvCompStatus(err)!r}."
)
return uncomp_chunk_sizes
@abstractmethod
def _get_decomp_size(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
uncomp_chunk_sizes,
stream,
):
"""Algorithm-specific implementation."""
...
def decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
"""Perform decompression.
Parameters
----------
comp_chunks: cp.ndarray[uintp]
The pointers on the GPU, to compressed batched items.
comp_chunk_sizes: cp.ndarray[uint64]
The size in bytes of each compressed batch item.
batch_size: int
The number of chunks to decompress.
temp_buf: cp.ndarray
The temporary GPU workspace.
uncomp_chunks: cp.ndarray[uintp]
(output) The pointers on the GPU, to the output location for each
decompressed batch item.
uncomp_chunk_sizes: cp.ndarray[uint64]
The size in bytes of each decompress chunk location on the GPU.
actual_uncomp_chunk_sizes: cp.ndarray[uint64]
(output) The actual decompressed size in bytes of each chunk on the GPU.
statuses: cp.ndarray
(output) The status for each chunk of whether it was decompressed or not.
stream: cp.cuda.Stream
CUDA stream.
"""
err = self._decompress(
comp_chunks,
comp_chunk_sizes,
batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
)
if err != nvcompStatus_t.nvcompSuccess:
raise RuntimeError(f"Decompression failed, error: {nvCompStatus(err)!r}.")
@abstractmethod
def _decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
"""Algorithm-specific implementation."""
...
cdef uintptr_t to_ptr(buf):
return buf.data.ptr
cdef cudaStream_t to_stream(stream):
return <cudaStream_t><size_t>stream.ptr
#
# LZ4 algorithm.
#
from kvikio._lib.nvcomp_ll_cxx_api cimport (
nvcompBatchedLZ4CompressAsync,
nvcompBatchedLZ4CompressGetMaxOutputChunkSize,
nvcompBatchedLZ4CompressGetTempSize,
nvcompBatchedLZ4DecompressAsync,
nvcompBatchedLZ4DecompressGetTempSize,
nvcompBatchedLZ4GetDecompressSizeAsync,
nvcompBatchedLZ4Opts_t,
)
class nvCompBatchAlgorithmLZ4(nvCompBatchAlgorithm):
"""LZ4 algorithm implementation."""
algo_id: str = "lz4"
options: nvcompBatchedLZ4Opts_t
HEADER_SIZE_BYTES: size_t = sizeof(uint32_t)
def __init__(self, data_type: int = 0, has_header: bool = True):
"""Initialize the codec.
Parameters
----------
data_type: int
Source data type.
has_header: bool
Whether the compressed data has a header.
This enables data compatibility between numcodecs LZ4 codec,
which has the header and nvCOMP LZ4 codec which does not
require the header.
"""
self.options = nvcompBatchedLZ4Opts_t(data_type)
self.has_header = has_header
# Note on LZ4 header structure: numcodecs LZ4 codec prepends
# a 4-byte (uint32_t) header to each compressed chunk.
# The header stores the size of the original (uncompressed) data:
# https://github.com/zarr-developers/numcodecs/blob/cb155432e36536e17a2d054c8c24b7bf6f4a7347/numcodecs/lz4.pyx#L89
#
# The following CUDA kernels read / write chunk header by
# casting the chunk pointer to a pointer to unsigned int.
# CUDA kernel that copies uncompressed chunk size from the chunk header.
self._get_size_from_header_kernel = cupy.ElementwiseKernel(
"uint64 comp_chunk_ptr",
"uint64 uncomp_chunk_size",
"uncomp_chunk_size = *((unsigned int *)comp_chunk_ptr)",
"get_size_from_header",
)
# CUDA kernel that copies uncompressed chunk size to the chunk header.
self._set_chunk_size_header_kernel = cupy.ElementwiseKernel(
"uint64 uncomp_chunk_size",
"uint64 comp_chunk_ptr",
"((unsigned int *)comp_chunk_ptr)[0] = (unsigned int)uncomp_chunk_size",
"set_chunk_size_header",
no_return=True,
)
def _get_comp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
) -> tuple[nvcompStatus_t, size_t]:
cdef size_t temp_bytes = 0
err = nvcompBatchedLZ4CompressGetTempSize(
batch_size,
max_uncompressed_chunk_bytes,
self.options,
&temp_bytes
)
return (err, temp_bytes)
def _get_comp_chunk_size(self, size_t max_uncompressed_chunk_bytes):
cdef size_t max_compressed_bytes = 0
err = nvcompBatchedLZ4CompressGetMaxOutputChunkSize(
max_uncompressed_chunk_bytes,
self.options,
&max_compressed_bytes
)
# Add header size, if needed.
if err == nvcompStatus_t.nvcompSuccess and self.has_header:
max_compressed_bytes += self.HEADER_SIZE_BYTES
return (err, max_compressed_bytes)
def compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream,
):
if self.has_header:
# If there is a header, we need to:
# 1. Copy the uncompressed chunk size to the compressed chunk header.
# 2. Update target pointers in comp_chunks to skip the header portion,
# which is not compressed.
#
self._set_chunk_size_header_kernel(uncomp_chunk_sizes, comp_chunks)
# Update chunk pointer to skip the header.
comp_chunks += self.HEADER_SIZE_BYTES
super().compress(
uncomp_chunks,
uncomp_chunk_sizes,
max_uncomp_chunk_bytes,
batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream,
)
if self.has_header:
# Update chunk pointer and size to include the header.
comp_chunks -= self.HEADER_SIZE_BYTES
comp_chunk_sizes += self.HEADER_SIZE_BYTES
def _compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream
):
# Cast buffer pointers that have Python int type to appropriate C types
# suitable for passing to nvCOMP API.
return nvcompBatchedLZ4CompressAsync(
<const void* const*>to_ptr(uncomp_chunks),
<const size_t*>to_ptr(uncomp_chunk_sizes),
max_uncomp_chunk_bytes,
batch_size,
<void*>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(comp_chunks),
<size_t*>to_ptr(comp_chunk_sizes),
self.options,
to_stream(stream),
)
def _get_decomp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
):
cdef size_t temp_bytes = 0
err = nvcompBatchedLZ4DecompressGetTempSize(
batch_size,
max_uncompressed_chunk_bytes,
&temp_bytes
)
return (err, temp_bytes)
def get_decompress_size(
self,
comp_chunks,
comp_chunk_sizes,
stream,
):
if not self.has_header:
return super().get_decompress_size(
comp_chunks,
comp_chunk_sizes,
stream,
)
return self._get_size_from_header_kernel(comp_chunks)
def _get_decomp_size(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
uncomp_chunk_sizes,
stream,
):
return nvcompBatchedLZ4GetDecompressSizeAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<size_t*>to_ptr(uncomp_chunk_sizes),
batch_size,
to_stream(stream),
)
def decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
if self.has_header:
# Update chunk pointer and size to exclude the header.
comp_chunks += self.HEADER_SIZE_BYTES
comp_chunk_sizes -= self.HEADER_SIZE_BYTES
super().decompress(
comp_chunks,
comp_chunk_sizes,
batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
)
def _decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
# Cast buffer pointers that have Python int type to appropriate C types
# suitable for passing to nvCOMP API.
return nvcompBatchedLZ4DecompressAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<const size_t*>to_ptr(uncomp_chunk_sizes),
<size_t*>NULL,
batch_size,
<void* const>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(uncomp_chunks),
<nvcompStatus_t*>NULL,
to_stream(stream),
)
def __repr__(self):
return f"{self.__class__.__name__}(data_type={self.options['data_type']})"
#
# Gdeflate algorithm.
#
from kvikio._lib.nvcomp_ll_cxx_api cimport (
nvcompBatchedGdeflateCompressAsync,
nvcompBatchedGdeflateCompressGetMaxOutputChunkSize,
nvcompBatchedGdeflateCompressGetTempSize,
nvcompBatchedGdeflateDecompressAsync,
nvcompBatchedGdeflateDecompressGetTempSize,
nvcompBatchedGdeflateGetDecompressSizeAsync,
nvcompBatchedGdeflateOpts_t,
)
class nvCompBatchAlgorithmGdeflate(nvCompBatchAlgorithm):
"""Gdeflate algorithm implementation."""
algo_id: str = "gdeflate"
options: nvcompBatchedGdeflateOpts_t
def __init__(self, algo: int = 0):
self.options = nvcompBatchedGdeflateOpts_t(algo)
def _get_comp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
) -> tuple[nvcompStatus_t, size_t]:
cdef size_t temp_bytes = 0
err = nvcompBatchedGdeflateCompressGetTempSize(
batch_size,
max_uncompressed_chunk_bytes,
self.options,
&temp_bytes
)
return (err, temp_bytes)
def _get_comp_chunk_size(self, size_t max_uncompressed_chunk_bytes):
cdef size_t max_compressed_bytes = 0
err = nvcompBatchedGdeflateCompressGetMaxOutputChunkSize(
max_uncompressed_chunk_bytes,
self.options,
&max_compressed_bytes
)
return (err, max_compressed_bytes)
def _compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream
):
return nvcompBatchedGdeflateCompressAsync(
<const void* const*>to_ptr(uncomp_chunks),
<const size_t*>to_ptr(uncomp_chunk_sizes),
max_uncomp_chunk_bytes,
batch_size,
<void*>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(comp_chunks),
<size_t*>to_ptr(comp_chunk_sizes),
self.options,
to_stream(stream),
)
def _get_decomp_temp_size(
self,
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
):
cdef size_t temp_bytes = 0
err = nvcompBatchedGdeflateDecompressGetTempSize(
num_chunks,
max_uncompressed_chunk_bytes,
&temp_bytes
)
return (err, temp_bytes)
def _get_decomp_size(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
uncomp_chunk_sizes,
stream,
):
return nvcompBatchedGdeflateGetDecompressSizeAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<size_t*>to_ptr(uncomp_chunk_sizes),
batch_size,
to_stream(stream),
)
def _decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
return nvcompBatchedGdeflateDecompressAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<const size_t*>to_ptr(uncomp_chunk_sizes),
<size_t*>NULL,
batch_size,
<void* const>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(uncomp_chunks),
<nvcompStatus_t*>NULL,
to_stream(stream),
)
def __repr__(self):
return f"{self.__class__.__name__}(algo={self.options['algo']})"
#
# zstd algorithm.
#
from kvikio._lib.nvcomp_ll_cxx_api cimport (
nvcompBatchedZstdCompressAsync,
nvcompBatchedZstdCompressGetMaxOutputChunkSize,
nvcompBatchedZstdCompressGetTempSize,
nvcompBatchedZstdDecompressAsync,
nvcompBatchedZstdDecompressGetTempSize,
nvcompBatchedZstdGetDecompressSizeAsync,
nvcompBatchedZstdOpts_t,
)
class nvCompBatchAlgorithmZstd(nvCompBatchAlgorithm):
"""zstd algorithm implementation."""
algo_id: str = "zstd"
options: nvcompBatchedZstdOpts_t
def __init__(self):
self.options = nvcompBatchedZstdOpts_t(0)
def _get_comp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
) -> tuple[nvcompStatus_t, size_t]:
cdef size_t temp_bytes = 0
err = nvcompBatchedZstdCompressGetTempSize(
batch_size,
max_uncompressed_chunk_bytes,
self.options,
&temp_bytes
)
return (err, temp_bytes)
def _get_comp_chunk_size(self, size_t max_uncompressed_chunk_bytes):
cdef size_t max_compressed_bytes = 0
err = nvcompBatchedZstdCompressGetMaxOutputChunkSize(
max_uncompressed_chunk_bytes,
self.options,
&max_compressed_bytes
)
return (err, max_compressed_bytes)
def _compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream
):
return nvcompBatchedZstdCompressAsync(
<const void* const*>to_ptr(uncomp_chunks),
<const size_t*>to_ptr(uncomp_chunk_sizes),
max_uncomp_chunk_bytes,
batch_size,
<void*>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(comp_chunks),
<size_t*>to_ptr(comp_chunk_sizes),
self.options,
to_stream(stream),
)
def _get_decomp_temp_size(
self,
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
):
cdef size_t temp_bytes = 0
err = nvcompBatchedZstdDecompressGetTempSize(
num_chunks,
max_uncompressed_chunk_bytes,
&temp_bytes
)
return (err, temp_bytes)
def _get_decomp_size(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
uncomp_chunk_sizes,
stream,
):
return nvcompBatchedZstdGetDecompressSizeAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<size_t*>to_ptr(uncomp_chunk_sizes),
batch_size,
to_stream(stream),
)
def _decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
return nvcompBatchedZstdDecompressAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<const size_t*>to_ptr(uncomp_chunk_sizes),
<size_t*>to_ptr(actual_uncomp_chunk_sizes),
batch_size,
<void* const>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(uncomp_chunks),
<nvcompStatus_t*>to_ptr(statuses),
to_stream(stream),
)
def __repr__(self):
return f"{self.__class__.__name__}()"
#
# Snappy algorithm.
#
from kvikio._lib.nvcomp_ll_cxx_api cimport (
nvcompBatchedSnappyCompressAsync,
nvcompBatchedSnappyCompressGetMaxOutputChunkSize,
nvcompBatchedSnappyCompressGetTempSize,
nvcompBatchedSnappyDecompressAsync,
nvcompBatchedSnappyDecompressGetTempSize,
nvcompBatchedSnappyGetDecompressSizeAsync,
nvcompBatchedSnappyOpts_t,
)
class nvCompBatchAlgorithmSnappy(nvCompBatchAlgorithm):
"""Snappy algorithm implementation."""
algo_id: str = "snappy"
options: nvcompBatchedSnappyOpts_t
def __init__(self):
self.options = nvcompBatchedSnappyOpts_t(0)
def _get_comp_temp_size(
self,
size_t batch_size,
size_t max_uncompressed_chunk_bytes,
) -> tuple[nvcompStatus_t, size_t]:
cdef size_t temp_bytes = 0
err = nvcompBatchedSnappyCompressGetTempSize(
batch_size,
max_uncompressed_chunk_bytes,
self.options,
&temp_bytes
)
return (err, temp_bytes)
def _get_comp_chunk_size(self, size_t max_uncompressed_chunk_bytes):
cdef size_t max_compressed_bytes = 0
err = nvcompBatchedSnappyCompressGetMaxOutputChunkSize(
max_uncompressed_chunk_bytes,
self.options,
&max_compressed_bytes
)
return (err, max_compressed_bytes)
def _compress(
self,
uncomp_chunks,
uncomp_chunk_sizes,
size_t max_uncomp_chunk_bytes,
size_t batch_size,
temp_buf,
comp_chunks,
comp_chunk_sizes,
stream
):
return nvcompBatchedSnappyCompressAsync(
<const void* const*>to_ptr(uncomp_chunks),
<const size_t*>to_ptr(uncomp_chunk_sizes),
max_uncomp_chunk_bytes,
batch_size,
<void*>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(comp_chunks),
<size_t*>to_ptr(comp_chunk_sizes),
self.options,
to_stream(stream),
)
def _get_decomp_temp_size(
self,
size_t num_chunks,
size_t max_uncompressed_chunk_bytes,
):
cdef size_t temp_bytes = 0
err = nvcompBatchedSnappyDecompressGetTempSize(
num_chunks,
max_uncompressed_chunk_bytes,
&temp_bytes
)
return (err, temp_bytes)
def _get_decomp_size(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
uncomp_chunk_sizes,
stream,
):
return nvcompBatchedSnappyGetDecompressSizeAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<size_t*>to_ptr(uncomp_chunk_sizes),
batch_size,
to_stream(stream),
)
def _decompress(
self,
comp_chunks,
comp_chunk_sizes,
size_t batch_size,
temp_buf,
uncomp_chunks,
uncomp_chunk_sizes,
actual_uncomp_chunk_sizes,
statuses,
stream,
):
return nvcompBatchedSnappyDecompressAsync(
<const void* const*>to_ptr(comp_chunks),
<const size_t*>to_ptr(comp_chunk_sizes),
<const size_t*>to_ptr(uncomp_chunk_sizes),
<size_t*>NULL,
batch_size,
<void* const>to_ptr(temp_buf),
<size_t>temp_buf.nbytes,
<void* const*>to_ptr(uncomp_chunks),
<nvcompStatus_t*>NULL,
to_stream(stream),
)
def __repr__(self):
return f"{self.__class__.__name__}()"
SUPPORTED_ALGORITHMS = {
a.algo_id: a for a in [
nvCompBatchAlgorithmLZ4,
nvCompBatchAlgorithmGdeflate,
nvCompBatchAlgorithmZstd,
nvCompBatchAlgorithmSnappy,
]
}
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_examples.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import os
from importlib import import_module
from pathlib import Path
import pytest
examples_path = Path(os.path.realpath(__file__)).parent / ".." / "examples"
def test_hello_world(tmp_path, monkeypatch):
"""Test examples/hello_world.py"""
pytest.importorskip("cupy") # `examples/hello_world.py` requires CuPy
monkeypatch.syspath_prepend(str(examples_path))
import_module("hello_world").main(tmp_path / "test-file")
def test_zarr_cupy_nvcomp(tmp_path, monkeypatch):
"""Test examples/zarr_cupy_nvcomp.py"""
# `examples/zarr_cupy_nvcomp.py` requires the Zarr submodule
pytest.importorskip("kvikio.zarr")
monkeypatch.syspath_prepend(str(examples_path))
import_module("zarr_cupy_nvcomp").main(tmp_path / "test-file")
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_numpy.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import pytest
from kvikio.numpy import LikeWrapper, tofile
@pytest.mark.parametrize("dtype", ["u1", "int64", "float32", "float64"])
def test_tofile(tmp_path, xp, dtype):
"""Test tofile()"""
filepath = str(tmp_path / "test_tofile")
src = xp.arange(100, dtype=dtype)
tofile(src, filepath)
dst = xp.fromfile(filepath, dtype=dtype)
xp.testing.assert_array_equal(src, dst)
tofile(src[::2], filepath)
dst = xp.fromfile(filepath, dtype=dtype)
xp.testing.assert_array_equal(src[::2], dst)
@pytest.mark.parametrize("dtype", ["u1", "int64", "float32", "float64"])
def test_fromfile(tmp_path, xp, dtype):
"""Test NumPy's and CuPy's fromfile() with LikeWrapper"""
filepath = str(tmp_path / "test_fromfile")
src = xp.arange(100, dtype=dtype)
src.tofile(filepath)
like = LikeWrapper(like=xp.empty(()))
dst = xp.fromfile(filepath, dtype, like=like)
xp.testing.assert_array_equal(src, dst)
dst = xp.fromfile(filepath, dtype=dtype, like=like)
xp.testing.assert_array_equal(src, dst)
dst = xp.fromfile(file=filepath, dtype=dtype, like=like)
xp.testing.assert_array_equal(src, dst)
dst = xp.fromfile(file=filepath, dtype=dtype, count=100 - 42, like=like)
xp.testing.assert_array_equal(src[:-42], dst)
dst = xp.fromfile(file=filepath, dtype=dtype, offset=src.itemsize, like=like)
xp.testing.assert_array_equal(src[1:], dst)
dst = xp.fromfile(file=filepath, dtype=dtype, offset=1, count=10, like=like)
assert len(dst) == 10
# Test non-divisible offset
dst = xp.fromfile(file=filepath, dtype="u1", offset=7, like=like)
xp.testing.assert_array_equal(src.view(dtype="u1")[7:], dst)
filepath = str(tmp_path / "test_fromfile")
with open(filepath, mode="rb") as f:
dst = xp.fromfile(file=f, dtype=dtype, like=like)
xp.testing.assert_array_equal(src, dst)
def test_fromfile_error(tmp_path, xp):
filepath = str(tmp_path / "test_fromfile")
src = xp.arange(1, dtype="u1")
src.tofile(filepath)
like = LikeWrapper(like=src)
with pytest.raises(FileNotFoundError, match="no file"):
xp.fromfile("no file", like=like)
with pytest.raises(NotImplementedError, match="Non-default value of the `sep`"):
xp.fromfile(file=filepath, sep=",", like=like)
with pytest.raises(ValueError, match="[Nn]egative dimensions are not allowed"):
xp.fromfile(file=filepath, like=like, count=-42)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_basic_io.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import os
import random
from contextlib import contextmanager
import pytest
import kvikio
import kvikio.defaults
cupy = pytest.importorskip("cupy")
numpy = pytest.importorskip("numpy")
def check_bit_flags(x: int, y: int) -> bool:
"""Check that the bits set in `y` is also set in `x`"""
return x & y == y
@pytest.mark.parametrize("size", [1, 10, 100, 1000, 1024, 4096, 4096 * 10])
@pytest.mark.parametrize("nthreads", [1, 3, 4, 16])
@pytest.mark.parametrize("tasksize", [199, 1024])
def test_read_write(tmp_path, xp, gds_threshold, size, nthreads, tasksize):
"""Test basic read/write"""
filename = tmp_path / "test-file"
with kvikio.defaults.set_num_threads(nthreads):
with kvikio.defaults.set_task_size(tasksize):
# Write file
a = xp.arange(size)
f = kvikio.CuFile(filename, "w")
assert not f.closed
assert check_bit_flags(f.open_flags(), os.O_WRONLY)
assert f.write(a) == a.nbytes
# Try to read file opened in write-only mode
with pytest.raises(RuntimeError, match="unsupported file open flags"):
f.read(a)
# Close file
f.close()
assert f.closed
# Read file into a new array and compare
b = xp.empty_like(a)
f = kvikio.CuFile(filename, "r")
assert check_bit_flags(f.open_flags(), os.O_RDONLY)
assert f.read(b) == b.nbytes
assert all(a == b)
def test_file_handle_context(tmp_path):
"""Open a CuFile in a context"""
filename = tmp_path / "test-file"
a = cupy.arange(200)
b = cupy.empty_like(a)
with kvikio.CuFile(filename, "w+") as f:
assert not f.closed
assert check_bit_flags(f.open_flags(), os.O_RDWR)
assert f.write(a) == a.nbytes
assert f.read(b) == b.nbytes
assert all(a == b)
assert f.closed
@pytest.mark.skipif(
kvikio.defaults.compat_mode(),
reason="cannot test `set_compat_mode` when already running in compatibility mode",
)
def test_set_compat_mode_between_io(tmp_path):
"""Test changing `compat_mode`"""
with kvikio.defaults.set_compat_mode(False):
f = kvikio.CuFile(tmp_path / "test-file", "w")
assert not f.closed
assert f.open_flags() & os.O_WRONLY != 0
with kvikio.defaults.set_compat_mode(True):
a = cupy.arange(10)
assert f.write(a) == a.nbytes
def test_write_to_files_in_chunks(tmp_path, xp, gds_threshold):
"""Write to files in chunks"""
filename = tmp_path / "test-file"
a = xp.arange(200)
f = kvikio.CuFile(filename, "w")
nchunks = 20
chunks = []
file_offsets = []
order = list(range(nchunks))
random.shuffle(order)
for i in order:
chunk_size = len(a) // nchunks
offset = i * chunk_size
chunks.append(a[offset : offset + chunk_size])
file_offsets.append(offset * 8)
for i in range(nchunks):
f.write(chunks[i], file_offset=file_offsets[i])
f.close()
assert f.closed
# Read file into a new array and compare
b = xp.empty_like(a)
f = kvikio.CuFile(filename, "r")
assert f.read(b) == b.nbytes
assert all(a == b)
@pytest.mark.parametrize("nthreads", [1, 3, 16])
@pytest.mark.parametrize("tasksize", [1000, 4096, int(1.5 * 4096), int(2.3 * 4096)])
@pytest.mark.parametrize(
"start,end",
[(0, 10 * 4096), (1, int(1.3 * 4096)), (int(2.1 * 4096), int(5.6 * 4096))],
)
def test_read_write_slices(tmp_path, xp, gds_threshold, nthreads, tasksize, start, end):
"""Read and write different slices"""
with kvikio.defaults.set_num_threads(nthreads):
with kvikio.defaults.set_task_size(tasksize):
filename = tmp_path / "test-file"
a = xp.arange(10 * 4096) # 10 page-sizes
b = a.copy()
a[start:end] = 42
with kvikio.CuFile(filename, "w") as f:
assert f.write(a[start:end]) == a[start:end].nbytes
with kvikio.CuFile(filename, "r") as f:
assert f.read(b[start:end]) == b[start:end].nbytes
assert all(a == b)
@pytest.mark.parametrize("size", [1, 10, 100, 1000, 1024, 4096, 4096 * 10])
def test_raw_read_write(tmp_path, size):
"""Test raw read/write"""
filename = tmp_path / "test-file"
a = cupy.arange(size)
with kvikio.CuFile(filename, "w") as f:
assert f.raw_write(a) == a.nbytes
with kvikio.CuFile(filename, "r") as f:
assert f.raw_read(a) == a.nbytes
def test_raw_read_write_of_host_memory(tmp_path):
"""Test raw read/write of host memory, which isn't supported"""
filename = tmp_path / "test-file"
a = numpy.arange(1024)
with kvikio.CuFile(filename, "w") as f:
with pytest.raises(ValueError, match="Non-CUDA buffers not supported"):
f.raw_write(a)
with kvikio.CuFile(filename, "r") as f:
with pytest.raises(ValueError, match="Non-CUDA buffers not supported"):
assert f.raw_read(a) == a.nbytes
@contextmanager
def with_no_cuda_context():
"""Context that pop all CUDA contexts before the test and push them back on after"""
cuda = pytest.importorskip("cuda.cuda")
assert cuda.cuInit(0)[0] == cuda.CUresult.CUDA_SUCCESS
ctx_stack = []
while True:
err, ctx = cuda.cuCtxPopCurrent()
if err == cuda.CUresult.CUDA_ERROR_INVALID_CONTEXT:
break
assert err == cuda.CUresult.CUDA_SUCCESS
ctx_stack.append(ctx)
yield
for ctx in reversed(ctx_stack):
(err,) = cuda.cuCtxPushCurrent(ctx)
assert err == cuda.CUresult.CUDA_SUCCESS
def test_no_current_cuda_context(tmp_path, xp, gds_threshold):
"""Test IO when CUDA context is current"""
filename = tmp_path / "test-file"
a = xp.arange(100)
b = xp.empty_like(a)
with kvikio.CuFile(filename, "w+") as f:
with with_no_cuda_context():
f.write(a)
f.read(b)
assert all(a == b)
@pytest.mark.skipif(
cupy.cuda.runtime.getDeviceCount() < 2, reason="requires multiple GPUs"
)
def test_multiple_gpus(tmp_path, xp, gds_threshold):
"""Test IO from two different GPUs"""
filename = tmp_path / "test-file"
with kvikio.defaults.set_num_threads(10):
with kvikio.defaults.set_task_size(10):
# Allocate an array on each device
with cupy.cuda.Device(0):
a0 = xp.arange(200)
with cupy.cuda.Device(1):
a1 = xp.zeros(200, dtype=a0.dtype)
# Test when the device match the allocation
with kvikio.CuFile(filename, "w") as f:
with cupy.cuda.Device(0):
assert f.write(a0) == a0.nbytes
with kvikio.CuFile(filename, "r") as f:
with cupy.cuda.Device(1):
assert f.read(a1) == a1.nbytes
assert bytes(a0) == bytes(a1)
# Test when the device doesn't match the allocation
with kvikio.CuFile(filename, "w") as f:
with cupy.cuda.Device(1):
assert f.write(a0) == a0.nbytes
with kvikio.CuFile(filename, "r") as f:
with cupy.cuda.Device(0):
assert f.read(a1) == a1.nbytes
assert bytes(a0) == bytes(a1)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/conftest.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import contextlib
import multiprocessing as mp
import subprocess
from typing import Iterable
import pytest
import kvikio.defaults
mp = mp.get_context("spawn") # type: ignore
def command_server(conn):
"""Server to run commands given through `conn`"""
while True:
# Get the next command to run
cmd, cwd, verbose = conn.recv()
# Run command
res: subprocess.CompletedProcess = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd
) # type: ignore
if verbose:
print(f"{cwd}$ " + " ".join(res.args))
print(res.stdout.decode(), end="")
# Send return code back to client
conn.send(res.returncode)
@pytest.fixture(scope="session", autouse=True)
def run_cmd():
"""Provide a `run_cmd` function to run commands in a separate process
Use `run_cmd(cmd, cwd, verbose)` to run a command.
Notice, the server that runs the commands are spawned before CUDA initialization.
"""
# Start the command server before the very first test
client_conn, server_conn = mp.Pipe()
p = mp.Process(
target=command_server,
args=(server_conn,),
)
p.start()
def run_cmd(cmd: Iterable[str], cwd, verbose=True):
client_conn.send((cmd, cwd, verbose))
return client_conn.recv()
yield run_cmd
# Kill the command server after the last test
p.kill()
@pytest.fixture()
def managers():
libnvcomp = pytest.importorskip("kvikio.nvcomp")
return [
libnvcomp.ANSManager,
libnvcomp.BitcompManager,
libnvcomp.CascadedManager,
libnvcomp.GdeflateManager,
libnvcomp.LZ4Manager,
libnvcomp.SnappyManager,
]
@pytest.fixture(
params=[("cupy", False), ("cupy", True), ("numpy", False)],
ids=["cupy", "cupy_async", "numpy"],
)
def xp(request):
"""Fixture to parametrize over numpy-like libraries"""
module_name, async_malloc = request.param
if async_malloc:
cupy = pytest.importorskip("cupy")
ctx = cupy.cuda.using_allocator(cupy.cuda.malloc_async)
else:
ctx = contextlib.nullcontext()
with ctx:
yield pytest.importorskip(module_name)
@pytest.fixture(
params=[0, 2**20],
ids=["gds_threshold=0MB", "gds_threshold=1MB"],
)
def gds_threshold(request):
"""Fixture to parametrize over GDS threshold values"""
with kvikio.defaults.set_gds_threshold(request.param):
yield request.param
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_zarr.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import math
import numpy
import pytest
cupy = pytest.importorskip("cupy")
zarr = pytest.importorskip("zarr")
kvikio_zarr = pytest.importorskip("kvikio.zarr")
kvikio_nvcomp_codec = pytest.importorskip("kvikio.nvcomp_codec")
numcodecs = pytest.importorskip("numcodecs")
if not kvikio_zarr.supported:
pytest.skip(
f"requires Zarr >={kvikio_zarr.MINIMUM_ZARR_VERSION}",
allow_module_level=True,
)
@pytest.fixture
def store(tmp_path):
"""Fixture that creates a GDS Store"""
return kvikio_zarr.GDSStore(tmp_path / "test-file.zarr")
def test_direct_store_access(store, xp):
"""Test accessing the GDS Store directly"""
a = xp.arange(5, dtype="u1")
store["a"] = a
b = store["a"]
# Notice, unless using getitems(), GDSStore always returns bytes
assert isinstance(b, bytes)
assert (xp.frombuffer(b, dtype="u1") == a).all()
@pytest.mark.parametrize("xp_write", ["numpy", "cupy"])
@pytest.mark.parametrize("xp_read_a", ["numpy", "cupy"])
@pytest.mark.parametrize("xp_read_b", ["numpy", "cupy"])
def test_direct_store_access_getitems(store, xp_write, xp_read_a, xp_read_b):
"""Test accessing the GDS Store directly using getitems()"""
xp_read_a = pytest.importorskip(xp_read_a)
xp_read_b = pytest.importorskip(xp_read_b)
xp_write = pytest.importorskip(xp_write)
a = xp_write.arange(5, dtype="u1")
b = a * 2
store["a"] = a
store["b"] = b
res = store.getitems(
keys=["a", "b"],
contexts={
"a": {"meta_array": xp_read_a.empty(())},
"b": {"meta_array": xp_read_b.empty(())},
},
)
assert isinstance(res["a"], xp_read_a.ndarray)
assert isinstance(res["b"], xp_read_b.ndarray)
cupy.testing.assert_array_equal(res["a"], a)
cupy.testing.assert_array_equal(res["b"], b)
def test_array(store, xp):
"""Test Zarr array"""
a = xp.arange(100)
z = zarr.array(a, chunks=10, compressor=None, store=store, meta_array=xp.empty(()))
assert isinstance(z.meta_array, type(a))
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(a, type(z[:]))
xp.testing.assert_array_equal(a, z[:])
def test_group(store, xp):
"""Test Zarr group"""
g = zarr.open_group(store, meta_array=xp.empty(()))
g.ones("data", shape=(10, 11), dtype=int, compressor=None)
a = g["data"]
assert a.shape == (10, 11)
assert a.dtype == int
assert isinstance(a, zarr.Array)
assert isinstance(a.meta_array, xp.ndarray)
assert isinstance(a[:], xp.ndarray)
assert (a[:] == 1).all()
def test_open_array(store, xp):
"""Test Zarr's open_array()"""
a = xp.arange(10)
z = zarr.open_array(
store,
shape=a.shape,
dtype=a.dtype,
chunks=(10,),
compressor=None,
meta_array=xp.empty(()),
)
z[:] = a
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(a, type(z[:]))
xp.testing.assert_array_equal(a, z[:])
@pytest.mark.parametrize("inline_array", [True, False])
def test_dask_read(store, xp, inline_array):
"""Test Zarr read in Dask"""
da = pytest.importorskip("dask.array")
a = xp.arange(100)
z = zarr.array(a, chunks=10, compressor=None, store=store, meta_array=xp.empty(()))
d = da.from_zarr(z, inline_array=inline_array)
d += 1
xp.testing.assert_array_equal(a + 1, d.compute())
def test_dask_write(store, xp):
"""Test Zarr write in Dask"""
da = pytest.importorskip("dask.array")
# Write dask array to disk using Zarr
a = xp.arange(100)
d = da.from_array(a, chunks=10)
da.to_zarr(d, store, compressor=None, meta_array=xp.empty(()))
# Validate the written Zarr array
z = zarr.open_array(store)
xp.testing.assert_array_equal(a, z[:])
@pytest.mark.parametrize("xp_read", ["numpy", "cupy"])
@pytest.mark.parametrize("xp_write", ["numpy", "cupy"])
@pytest.mark.parametrize("compressor", kvikio_zarr.nvcomp_compressors)
def test_compressor(store, xp_write, xp_read, compressor):
xp_read = pytest.importorskip(xp_read)
xp_write = pytest.importorskip(xp_write)
shape = (10, 1)
chunks = (10, 1)
a = xp_write.arange(math.prod(shape)).reshape(shape)
z = zarr.creation.create(
shape=shape,
chunks=chunks,
compressor=compressor(),
store=store,
meta_array=xp_read.empty(()),
)
z[:] = a
b = z[:]
assert isinstance(b, xp_read.ndarray)
cupy.testing.assert_array_equal(b, a)
@pytest.mark.parametrize("algo", ["lz4", "zstd"])
def test_decompressor_config_overwrite(tmp_path, xp, algo):
cpu_codec = numcodecs.registry.get_codec({"id": algo})
gpu_codec = kvikio_nvcomp_codec.NvCompBatchCodec(algo)
# Write using Zarr's default file store and the `cpu_codec` compressor
z = zarr.open_array(tmp_path, mode="w", shape=(10,), compressor=cpu_codec)
z[:] = range(10)
assert z.compressor == cpu_codec
# Open file using GDSStore and use `gpu_codec` as decompressor.
z = zarr.open_array(
kvikio_zarr.GDSStore(
tmp_path,
decompressor_config_overwrite=gpu_codec.get_config(),
),
mode="r",
meta_array=xp.empty(()),
)
assert z.compressor == gpu_codec
assert isinstance(z[:], xp.ndarray)
xp.testing.assert_array_equal(z[:], range(10))
@pytest.mark.parametrize("algo", ["lz4"])
def test_compressor_config_overwrite(tmp_path, xp, algo):
cpu_codec = numcodecs.registry.get_codec({"id": algo})
gpu_codec = kvikio_nvcomp_codec.NvCompBatchCodec(algo)
# Write file using GDSStore and the `gpu_codec` compressor. In order
# to make the file compatible with Zarr's builtin CPU decompressor,
# we set `cpu_codec` as the compressor in the meta file on disk.
z = zarr.open_array(
kvikio_zarr.GDSStore(
tmp_path,
compressor_config_overwrite=cpu_codec.get_config(),
decompressor_config_overwrite=gpu_codec.get_config(),
),
mode="w",
shape=10,
compressor=gpu_codec,
meta_array=xp.empty(()),
)
assert z.compressor == gpu_codec
z[:] = xp.arange(10)
# We can now open the file using Zarr's builtin CPU decompressor
z = zarr.open_array(tmp_path, mode="r")
assert isinstance(z[:], numpy.ndarray)
numpy.testing.assert_array_equal(z[:], range(10))
@pytest.mark.parametrize("write_mode", ["w", "w-", "a"])
@pytest.mark.parametrize("read_mode", ["r", "r+", "a"])
def test_open_cupy_array(tmp_path, write_mode, read_mode):
a = cupy.arange(10)
z = kvikio_zarr.open_cupy_array(
tmp_path,
mode=write_mode,
shape=a.shape,
dtype=a.dtype,
chunks=(2,),
compressor=kvikio_zarr.CompatCompressor.lz4(),
)
z[:] = a
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(z[:], type(a))
assert z.compressor == kvikio_nvcomp_codec.NvCompBatchCodec("lz4")
cupy.testing.assert_array_equal(a, z[:])
z = kvikio_zarr.open_cupy_array(
tmp_path,
mode=read_mode,
)
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(z[:], type(a))
assert z.compressor == kvikio_nvcomp_codec.NvCompBatchCodec("lz4")
cupy.testing.assert_array_equal(a, z[:])
z = zarr.open_array(tmp_path, mode=read_mode)
assert a.shape == z.shape
assert a.dtype == z.dtype
assert isinstance(z[:], numpy.ndarray)
assert z.compressor == kvikio_zarr.CompatCompressor.lz4().cpu
numpy.testing.assert_array_equal(a.get(), z[:])
@pytest.mark.parametrize("compressor", [None, kvikio_zarr.CompatCompressor.lz4().cpu])
def test_open_cupy_array_written_by_zarr(tmp_path, compressor):
data = numpy.arange(100)
z = zarr.open_array(
tmp_path,
shape=data.shape,
mode="w",
compressor=compressor,
)
z[:] = data
z = kvikio_zarr.open_cupy_array(tmp_path, mode="r")
assert isinstance(z[:], cupy.ndarray)
cupy.testing.assert_array_equal(z[:], data)
@pytest.mark.parametrize("mode", ["r", "r+", "a"])
def test_open_cupy_array_incompatible_compressor(tmp_path, mode):
zarr.create((10,), store=tmp_path, compressor=numcodecs.Blosc())
with pytest.raises(ValueError, match="non-CUDA compatible compressor"):
kvikio_zarr.open_cupy_array(tmp_path, mode=mode)
def test_open_cupy_array_unknown_mode(tmp_path):
a = cupy.arange(10)
with pytest.raises(ValueError, match="Unknown mode: x"):
kvikio_zarr.open_cupy_array(
tmp_path,
mode="x",
shape=a.shape,
dtype=a.dtype,
chunks=(2,),
)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_nvcomp.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import pytest
np = pytest.importorskip("numpy")
cupy = pytest.importorskip("cupy")
kvikio = pytest.importorskip("kvikio")
libnvcomp = pytest.importorskip("kvikio.nvcomp")
# TODO: don't hardcode the following expected values
LEN = {
"ANS": 11144,
"Bitcomp": 3208,
"Cascaded": 600,
"Gdeflate": 760,
"LZ4": 393,
"Snappy": 3548,
}
def assert_compression_size(actual, desired, rtol=0.1):
"""Compression ratios might change slightly between library versions
We mark a failure as "xfail"
"""
try:
np.testing.assert_allclose(actual, desired, rtol=rtol)
except AssertionError:
pytest.xfail("mismatch in compression ratios is acceptable")
raise
def managers():
return [
libnvcomp.ANSManager,
libnvcomp.BitcompManager,
libnvcomp.CascadedManager,
libnvcomp.GdeflateManager,
libnvcomp.LZ4Manager,
libnvcomp.SnappyManager,
]
def dtypes():
return [
"uint8",
"uint16",
"uint32",
"int8",
"int16",
"int32",
]
@pytest.mark.parametrize("manager, dtype", zip(managers(), dtypes()))
def test_round_trip_dtypes(manager, dtype):
length = 10000
data = cupy.array(
np.arange(
0,
length // cupy.dtype(dtype).type(0).itemsize,
dtype=dtype,
)
)
compressor_instance = manager(data_type=dtype)
compressed = compressor_instance.compress(data)
decompressed = compressor_instance.decompress(compressed)
assert (data == decompressed).all()
#
# ANS Options test
#
@pytest.mark.parametrize(
"inputs",
[
{},
{"chunk_size": 1 << 16, "device_id": 0},
{
"chunk_size": 1 << 16,
},
{
"device_id": 0,
},
],
)
def test_ans_inputs(inputs):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.ANSManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["ANS"])
@pytest.mark.parametrize(
"inputs",
[
{},
{"data_type": np.uint8, "algo": 0, "device_id": 0},
{"data_type": np.uint8},
{
"algo": 0,
},
{
"device_id": 0,
},
],
)
def test_bitcomp_inputs(inputs):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.BitcompManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["Bitcomp"])
@pytest.mark.parametrize(
"inputs, expected",
zip(
[
{"algo": 0},
{"algo": 1},
{"algo": 2},
],
[LEN["Bitcomp"], LEN["Bitcomp"], LEN["Bitcomp"]],
),
)
def test_bitcomp_algorithms(inputs, expected):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.BitcompManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), expected)
@pytest.mark.parametrize(
"inputs",
[
{},
{
"options": {
"chunk_size": 1 << 12,
"type": np.uint32,
"num_RLEs": 2,
"num_deltas": 1,
"use_bp": True,
},
},
{
"options": {
"chunk_size": 1 << 12,
"type": np.uint32,
"num_RLEs": 2,
"num_deltas": 1,
"use_bp": True,
},
"chunk_size": 1 << 16,
},
{
"options": {
"chunk_size": 1 << 12,
"type": np.uint32,
"num_RLEs": 2,
"num_deltas": 1,
"use_bp": True,
},
"data_type": np.uint8,
},
{
"options": {
"chunk_size": 1 << 12,
"type": np.uint32,
"num_RLEs": 2,
"num_deltas": 1,
"use_bp": True,
},
"device_id": 0,
},
],
)
def test_cascaded_inputs(inputs):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.CascadedManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["Cascaded"])
@pytest.mark.parametrize(
"inputs",
[
{},
{"chunk_size": 1 << 16, "algo": 0, "device_id": 0},
{
"chunk_size": 1 << 16,
},
{
"algo": 0,
},
{
"device_id": 0,
},
],
)
def test_gdeflate_inputs(inputs):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.GdeflateManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["Gdeflate"])
@pytest.mark.parametrize(
"inputs, expected",
zip(
[
{"algo": 0},
],
[LEN["Gdeflate"]],
),
)
def test_gdeflate_algorithms(inputs, expected):
size = 10000
dtype = np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.GdeflateManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), expected)
@pytest.mark.xfail(raises=ValueError)
@pytest.mark.parametrize(
"inputs, expected",
zip([{"algo": 1}, {"algo": 2}], [LEN["Gdeflate"], LEN["Gdeflate"]]),
)
def test_gdeflate_algorithms_not_implemented(inputs, expected):
size = 10000
dtype = np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.GdeflateManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), expected)
@pytest.mark.parametrize(
"inputs",
[
{},
{"chunk_size": 1 << 16, "data_type": np.uint8, "device_id": 0},
{
"chunk_size": 1 << 16,
},
{
"data_type": np.uint8,
},
{
"device_id": 0,
},
],
)
def test_lz4_inputs(inputs):
size = 10000
dtype = inputs.get("data_type") if inputs.get("data_type") else np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.LZ4Manager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["LZ4"])
@pytest.mark.parametrize(
"inputs",
[
{},
{"chunk_size": 1 << 16, "device_id": 0},
{
"chunk_size": 1 << 16,
},
{"device_id": 0},
],
)
def test_snappy_inputs(inputs):
size = 10000
dtype = np.int8
data = cupy.array(np.arange(0, size // dtype(0).itemsize, dtype=dtype))
compressor = libnvcomp.SnappyManager(**inputs)
final = compressor.compress(data)
assert_compression_size(len(final), LEN["Snappy"])
@pytest.mark.parametrize(
"compressor_size",
zip(
managers(),
[
{ # ANS
"max_compressed_buffer_size": 89373,
"num_chunks": 1,
"uncompressed_buffer_size": 10000,
},
{ # Bitcomp
"max_compressed_buffer_size": 16432,
"num_chunks": 1,
"uncompressed_buffer_size": 10000,
},
{ # Cascaded
"max_compressed_buffer_size": 12460,
"num_chunks": 3,
"uncompressed_buffer_size": 10000,
},
{ # Gdeflate
"max_compressed_buffer_size": 131160,
"num_chunks": 1,
"uncompressed_buffer_size": 10000,
},
{ # LZ4
"max_compressed_buffer_size": 65888,
"num_chunks": 1,
"uncompressed_buffer_size": 10000,
},
{ # Snappy
"max_compressed_buffer_size": 76575,
"num_chunks": 1,
"uncompressed_buffer_size": 10000,
},
],
),
)
def test_get_compression_config_with_default_options(compressor_size):
compressor = compressor_size[0]
expected = compressor_size[1]
length = 10000
dtype = cupy.uint8
data = cupy.array(
np.arange(
0,
length // cupy.dtype(dtype).type(0).itemsize,
dtype=dtype,
)
)
compressor_instance = compressor()
result = compressor_instance.configure_compression(len(data))
assert_compression_size(
result.pop("max_compressed_buffer_size"),
expected.pop("max_compressed_buffer_size"),
)
assert result == expected
@pytest.mark.parametrize(
"manager,expected",
zip(
managers(),
[
{ # ANS
"num_chunks": 1,
"decomp_data_size": 10000,
},
{ # Bitcomp
"num_chunks": 1,
"decomp_data_size": 10000,
},
{ # Cascaded
"num_chunks": 3,
"decomp_data_size": 10000,
},
{ # Gdeflate
"num_chunks": 1,
"decomp_data_size": 10000,
},
{ # LZ4
"num_chunks": 1,
"decomp_data_size": 10000,
},
{ # Snappy
"num_chunks": 1,
"decomp_data_size": 10000,
},
],
),
)
def test_get_decompression_config_with_default_options(manager, expected):
length = 10000
dtype = cupy.uint8
data = cupy.array(
np.arange(
0,
length // cupy.dtype(dtype).type(0).itemsize,
dtype=dtype,
)
)
compressor_instance = manager()
compressed = compressor_instance.compress(data)
result = compressor_instance.configure_decompression_with_compressed_buffer(
compressed
)
assert_compression_size(
result.pop("decomp_data_size"), expected.pop("decomp_data_size")
)
assert result == expected
@pytest.mark.parametrize(
"manager, expected",
zip(managers(), list(LEN.values())),
)
def test_get_compressed_output_size(manager, expected):
length = 10000
dtype = cupy.uint8
data = cupy.array(
np.arange(
0,
length // cupy.dtype(dtype).type(0).itemsize,
dtype=dtype,
)
)
compressor_instance = manager()
compressed = compressor_instance.compress(data)
buffer_size = compressor_instance.get_compressed_output_size(compressed)
assert_compression_size(buffer_size, expected)
@pytest.mark.parametrize("manager", managers())
def test_managed_manager(manager):
length = 10000
dtype = cupy.uint8
data = cupy.array(
np.arange(
0,
length // cupy.dtype(dtype).type(0).itemsize,
dtype=dtype,
)
)
compressor_instance = manager()
compressed = compressor_instance.compress(data)
manager = libnvcomp.ManagedDecompressionManager(compressed)
decompressed = manager.decompress(compressed)
assert len(decompressed) == 10000
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_benchmarks.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import os
import os.path
import sys
from pathlib import Path
import pytest
benchmarks_path = Path(os.path.realpath(__file__)).parent / ".." / "benchmarks"
pytest.importorskip("cupy")
pytest.importorskip("dask")
@pytest.mark.parametrize(
"api",
[
"cufile",
"posix",
"cufile-mfma",
"cufile-mf",
"cufile-ma",
"zarr",
],
)
def test_single_node_io(run_cmd, tmp_path, api):
"""Test benchmarks/single-node-io.py"""
if "zarr" in api:
kz = pytest.importorskip("kvikio.zarr")
if not kz.supported:
pytest.skip(f"requires Zarr >={kz.MINIMUM_ZARR_VERSION}")
retcode = run_cmd(
cmd=[
sys.executable or "python",
"single-node-io.py",
"-n",
"1MiB",
"-d",
str(tmp_path),
"--api",
api,
],
cwd=benchmarks_path,
)
assert retcode == 0
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_nvcomp_codec.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import itertools as it
import json
import cupy as cp
import numcodecs
import numpy as np
import pytest
import zarr
from numpy.testing import assert_equal
from kvikio.nvcomp_codec import NvCompBatchCodec
NVCOMP_CODEC_ID = "nvcomp_batch"
LZ4_ALGO = "LZ4"
GDEFLATE_ALGO = "Gdeflate"
SNAPPY_ALGO = "snappy"
ZSTD_ALGO = "zstd"
SUPPORTED_CODECS = [LZ4_ALGO, GDEFLATE_ALGO, SNAPPY_ALGO, ZSTD_ALGO]
def _get_codec(algo: str, **kwargs):
codec_args = {"id": NVCOMP_CODEC_ID, "algorithm": algo, "options": kwargs}
return numcodecs.registry.get_codec(codec_args)
@pytest.fixture(params=[(32,), (8, 16), (16, 16)])
def shape(request):
return request.param
# Separate fixture for combinations of shapes and chunks, since
# chunks array must have the same rank as data array.
@pytest.fixture(
params=it.chain(
it.product([(64,)], [(64,), (100,)]),
it.product([(16, 8), (16, 16)], [(8, 16), (16, 16), (40, 12)]),
)
)
def shape_chunks(request):
return request.param
@pytest.mark.parametrize("algo", SUPPORTED_CODECS)
def test_codec_registry(algo: str):
codec = _get_codec(algo)
assert isinstance(codec, numcodecs.abc.Codec)
@pytest.mark.parametrize("algo", SUPPORTED_CODECS)
def test_basic(algo: str, shape):
codec = NvCompBatchCodec(algo)
# Create data.
dtype = np.float32
data = np.ones(shape, dtype=dtype)
# Do roundtrip.
comp_data = codec.encode(data)
# Decompress and cast to original data type/shape.
decomp_data = codec.decode(comp_data).view(dtype).reshape(shape)
assert_equal(decomp_data, data)
@pytest.mark.parametrize("algo", SUPPORTED_CODECS)
def test_basic_zarr(algo: str, shape_chunks):
shape, chunks = shape_chunks
codec = NvCompBatchCodec(algo)
data = np.ones(shape, dtype=np.float32)
# This will do the compression.
z = zarr.array(data, chunks=chunks, compressor=codec)
# Test the decompression.
assert_equal(z[:], data[:])
@pytest.mark.parametrize("algo", SUPPORTED_CODECS)
@pytest.mark.parametrize("chunk_sizes", [(100, 100), (100, 150)])
@pytest.mark.parametrize("out", [None, "cpu", "gpu"])
def test_batch_comp_decomp(algo: str, chunk_sizes, out: str):
codec = _get_codec(algo)
np.random.seed(1)
dtype = np.float32
chunks = [np.random.randn(s).astype(dtype) for s in chunk_sizes]
out_buf = None
if out == "cpu":
out_buf = [np.empty_like(c) for c in chunks]
elif out == "gpu":
out_buf = [cp.empty_like(c) for c in chunks]
comp_chunks = codec.encode_batch([c.tobytes() for c in chunks])
assert len(comp_chunks) == 2
decomp_chunks = codec.decode_batch(comp_chunks, out=out_buf)
assert len(decomp_chunks) == 2
for i, dc in enumerate(decomp_chunks):
dc = dc.view(dtype=dtype)
if isinstance(dc, cp.ndarray):
dc = dc.get()
assert_equal(dc, chunks[i], f"{i=}")
if out_buf is not None:
ob = out_buf[i]
if isinstance(ob, cp.ndarray):
ob = ob.get()
assert_equal(ob, chunks[i], f"{i=}")
@pytest.mark.parametrize("algo", SUPPORTED_CODECS)
def test_comp_decomp(algo: str, shape_chunks):
shape, chunks = shape_chunks
codec = _get_codec(algo)
np.random.seed(1)
data = np.random.randn(*shape).astype(np.float32)
z1 = zarr.array(data, chunks=chunks, compressor=codec)
zarr_store = zarr.MemoryStore()
zarr.save_array(zarr_store, z1, compressor=codec)
# Check the store.
meta = json.loads(zarr_store[".zarray"])
assert meta["compressor"]["id"] == NVCOMP_CODEC_ID
assert meta["compressor"]["algorithm"] == algo.lower()
# Read back/decompress.
z2 = zarr.open_array(zarr_store)
assert_equal(z1[:], z2[:])
@pytest.mark.parametrize(
"algo, options",
[
("lz4", {"data_type": 4}), # NVCOMP_TYPE_INT data type.
("gdeflate", {"algo": 1}), # low-throughput, high compression ratio algo
],
)
def test_codec_options(algo, options):
codec = NvCompBatchCodec(algo, options)
shape = (16, 16)
chunks = (8, 8)
data = np.ones(shape, dtype=np.float32)
z = zarr.array(data, chunks=chunks, compressor=codec)
assert_equal(z[:], data[:])
def test_codec_invalid_options():
# There are currently only 3 supported algos in Gdeflate
codec = NvCompBatchCodec(GDEFLATE_ALGO, options={"algo": 10})
data = np.ones((16, 16), dtype=np.float32)
with pytest.raises(RuntimeError):
zarr.array(data, compressor=codec)
@pytest.mark.parametrize(
"cpu_algo, gpu_algo",
[
("lz4", LZ4_ALGO),
("zstd", ZSTD_ALGO),
],
)
def test_cpu_comp_gpu_decomp(cpu_algo, gpu_algo):
cpu_codec = numcodecs.registry.get_codec({"id": cpu_algo})
gpu_codec = _get_codec(gpu_algo)
shape = (16, 16)
chunks = (8, 8)
data = np.ones(shape, dtype=np.float32)
z1 = zarr.array(data, chunks=chunks)
store = {}
zarr.save_array(store, z1, compressor=cpu_codec)
meta = json.loads(store[".zarray"])
assert meta["compressor"]["id"] == cpu_algo
meta["compressor"] = {"id": NVCOMP_CODEC_ID, "algorithm": gpu_algo}
store[".zarray"] = json.dumps(meta).encode()
z2 = zarr.open_array(store, compressor=gpu_codec)
assert_equal(z1[:], z2[:])
def test_lz4_codec_header(shape_chunks):
shape, chunks = shape_chunks
# Test LZ4 nvCOMP codecs with and without the header.
codec_h = _get_codec(LZ4_ALGO, has_header=True)
codec_no_h = _get_codec(LZ4_ALGO, has_header=False)
np.random.seed(1)
data = np.random.randn(*shape).astype(np.float32)
z_h = zarr.array(data, chunks=chunks, compressor=codec_h)
z_no_h = zarr.array(data, chunks=chunks, compressor=codec_no_h)
# Result must be the same regardless of the header presence.
assert_equal(z_h[:], z_no_h[:])
def test_empty_batch():
codec = _get_codec(LZ4_ALGO)
assert len(codec.encode_batch([])) == 0
assert len(codec.decode_batch([])) == 0
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/tests/test_defaults.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import pytest
import kvikio.defaults
@pytest.mark.skipif(
kvikio.defaults.compat_mode(),
reason="cannot test `compat_mode` when already running in compatibility mode",
)
def test_compat_mode():
"""Test changing `compat_mode`"""
before = kvikio.defaults.compat_mode()
with kvikio.defaults.set_compat_mode(True):
assert kvikio.defaults.compat_mode()
kvikio.defaults.compat_mode_reset(False)
assert not kvikio.defaults.compat_mode()
assert before == kvikio.defaults.compat_mode()
def test_num_threads():
"""Test changing `num_threads`"""
before = kvikio.defaults.get_num_threads()
with kvikio.defaults.set_num_threads(3):
assert kvikio.defaults.get_num_threads() == 3
kvikio.defaults.num_threads_reset(4)
assert kvikio.defaults.get_num_threads() == 4
assert before == kvikio.defaults.get_num_threads()
def test_task_size():
"""Test changing `task_size`"""
before = kvikio.defaults.task_size()
with kvikio.defaults.set_task_size(3):
assert kvikio.defaults.task_size() == 3
kvikio.defaults.task_size_reset(4)
assert kvikio.defaults.task_size() == 4
assert before == kvikio.defaults.task_size()
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/cli/gpu_compressor.py
|
# NVIDIA 2022
import argparse
import os
import sys
import time
import cupy
import kvikio
import kvikio.nvcomp as nvcomp
def get_parser():
class NvcompParser(argparse.ArgumentParser):
"""
Handle special case and show help on invalid argument
"""
def error(self, message):
sys.stderr.write("\nERROR: {}\n\n".format(message))
self.print_help()
sys.exit(2)
parser = NvcompParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose Output")
parser.add_argument(
"-o",
"--out_file",
action="store",
dest="out_file",
help="Output filename",
)
parser.add_argument(
"-c",
choices=["ans", "bitcomp", "cascaded", "gdeflate", "lz4", "snappy"],
action="store",
dest="compression",
help="Which GPU algorithm to use for compression.",
)
parser.add_argument(
"-d",
action="store_true",
help="Decompress the incoming file",
)
parser.add_argument(action="store", dest="filename", help="Relative Filename")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print("GPU Compression Initialized") if args.verbose else None
file_size = os.path.getsize(args.filename)
""" test
data = cupy.arange(10000, dtype="uint8")
"""
data = cupy.zeros(file_size, dtype=cupy.int8)
t = time.time()
f = kvikio.CuFile(args.filename, "r")
f.read(data)
f.close()
read_time = time.time() - t
print(f"File read time: {read_time:.3} seconds.") if args.verbose else None
if args.d:
compressor = nvcomp.ManagedDecompressionManager(data)
elif args.compression == "ans":
compressor = nvcomp.ANSManager()
elif args.compression == "bitcomp":
compressor = nvcomp.BitcompManager()
elif args.compression == "cascaded":
compressor = nvcomp.CascadedManager()
elif args.compression == "gdeflate":
compressor = nvcomp.GdeflateManager()
elif args.compression == "snappy":
compressor = nvcomp.SnappyManager()
else:
compressor = nvcomp.LZ4Manager(chunk_size=1 << 16)
if args.d is True:
print(f"Decompressing {file_size} bytes") if args.verbose else None
t = time.time()
converted = compressor.decompress(data)
decompress_time = time.time() - t
print(
f"Decompression time: {decompress_time:.3} seconds"
) if args.verbose else None
if not args.out_file:
raise ValueError("Must specify filename with -o for decompression.")
t = time.time()
o = kvikio.CuFile(args.out_file, "w")
o.write(converted)
o.close()
io_time = time.time() - t
print(f"File write time: {io_time:.3} seconds") if args.verbose else None
print(
f"Decompressed file size {os.path.getsize(args.out_file)}"
) if args.verbose else None
else:
file_size = os.path.getsize(args.filename)
print(f"Compressing {file_size} bytes") if args.verbose else None
t = time.time()
converted = compressor.compress(data)
compress_time = time.time() - t
print(f"Compression time: {compress_time:.3} seconds") if args.verbose else None
t = time.time()
if args.out_file:
o = kvikio.CuFile(args.out_file, "w")
else:
o = kvikio.CuFile(args.filename + ".gpc", "w")
o.write(converted)
o.close()
io_time = time.time() - t
print(f"File write time: {io_time:.3} seconds") if args.verbose else None
print(
f"Compressed file size {compressor.get_compressed_output_size(converted)}"
) if args.verbose else None
if args.out_file:
end_name = args.out_file
else:
end_name = args.filename + ".gpc"
print(f"Created file {end_name}") if args.verbose else None
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/benchmarks/zarr-io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import argparse
import contextlib
import os
import os.path
import pathlib
import shutil
import statistics
import subprocess
import tempfile
from time import perf_counter as clock
from typing import ContextManager, Union
import cupy
import numcodecs.blosc
import numpy
import zarr
from dask.utils import format_bytes, parse_bytes
import kvikio
import kvikio.defaults
import kvikio.zarr
if not kvikio.zarr.supported:
raise RuntimeError(f"requires Zarr >={kvikio.zarr.MINIMUM_ZARR_VERSION}")
compressors = {
"none": (None, None),
"lz4": (numcodecs.blosc.Blosc(cname="lz4"), kvikio.zarr.LZ4()),
}
def drop_vm_cache(args):
if args.drop_vm_cache:
subprocess.check_output(["sudo /sbin/sysctl vm.drop_caches=3"], shell=True)
def create_src_data(args):
return cupy.random.random(args.nelem, dtype=args.dtype)
def run_kvikio(args):
dir_path = args.dir / "kvikio"
shutil.rmtree(str(dir_path), ignore_errors=True)
# Get the GPU compressor
compressor = compressors[args.compressor][1]
src = create_src_data(args)
# Write
drop_vm_cache(args)
t0 = clock()
z = zarr.create(
shape=(args.nelem,),
chunks=(args.chunksize,),
dtype=args.dtype,
compressor=compressor,
store=kvikio.zarr.GDSStore(dir_path),
meta_array=cupy.empty(()),
)
z[:] = src
os.sync()
write_time = clock() - t0
# Read
drop_vm_cache(args)
t0 = clock()
res = z[:]
read_time = clock() - t0
assert res.nbytes == args.nbytes
return read_time, write_time
def run_posix(args):
dir_path = args.dir / "posix"
shutil.rmtree(str(dir_path), ignore_errors=True)
# Get the CPU compressor
compressor = compressors[args.compressor][0]
src = create_src_data(args)
# Write
drop_vm_cache(args)
t0 = clock()
z = zarr.create(
shape=(args.nelem,),
chunks=(args.chunksize,),
dtype=args.dtype,
compressor=compressor,
store=zarr.DirectoryStore(dir_path),
meta_array=numpy.empty(()),
)
z[:] = src.get()
os.sync()
write_time = clock() - t0
# Read
drop_vm_cache(args)
t0 = clock()
res = cupy.asarray(z[:])
read_time = clock() - t0
assert res.nbytes == args.nbytes
return read_time, write_time
API = {
"kvikio": run_kvikio,
"posix": run_posix,
}
def main(args):
cupy.cuda.set_allocator(None) # Disable CuPy's default memory pool
cupy.arange(10) # Make sure CUDA is initialized
kvikio.defaults.num_threads_reset(args.nthreads)
props = kvikio.DriverProperties()
try:
import pynvml.smi
nvsmi = pynvml.smi.nvidia_smi.getInstance()
except ImportError:
gpu_name = "Unknown (install pynvml)"
mem_total = gpu_name
bar1_total = gpu_name
else:
info = nvsmi.DeviceQuery()["gpu"][0]
gpu_name = f"{info['product_name']} (dev #0)"
mem_total = format_bytes(
parse_bytes(
str(info["fb_memory_usage"]["total"]) + info["fb_memory_usage"]["unit"]
)
)
bar1_total = format_bytes(
parse_bytes(
str(info["bar1_memory_usage"]["total"])
+ info["bar1_memory_usage"]["unit"]
)
)
gds_version = "N/A (Compatibility Mode)"
if props.is_gds_available:
gds_version = f"v{props.major_version}.{props.minor_version}"
gds_config_json_path = os.path.realpath(
os.getenv("CUFILE_ENV_PATH_JSON", "/etc/cufile.json")
)
drop_vm_cache_msg = str(args.drop_vm_cache)
if not args.drop_vm_cache:
drop_vm_cache_msg += " (use --drop-vm-cache for better accuracy!)"
chunksize = args.chunksize * args.dtype.itemsize
print("Roundtrip benchmark")
print("----------------------------------")
if kvikio.defaults.compat_mode():
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - KvikIO compat mode ")
print(" libcufile.so not used ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
elif not props.is_gds_available:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - cuFile compat mode ")
print(" GDS not enabled ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(f"GPU | {gpu_name}")
print(f"GPU Memory Total | {mem_total}")
print(f"BAR1 Memory Total | {bar1_total}")
print(f"GDS driver | {gds_version}")
print(f"GDS config.json | {gds_config_json_path}")
print("----------------------------------")
print(f"nbytes | {args.nbytes} bytes ({format_bytes(args.nbytes)})")
print(f"chunksize | {chunksize} bytes ({format_bytes(chunksize)})")
print(f"4K aligned | {args.nbytes % 4096 == 0}")
print(f"drop-vm-cache | {drop_vm_cache_msg}")
print(f"directory | {args.dir}")
print(f"nthreads | {args.nthreads}")
print(f"nruns | {args.nruns}")
print(f"compressor | {args.compressor}")
print("==================================")
# Run each benchmark using the requested APIs
for api in args.api:
rs = []
ws = []
for _ in range(args.n_warmup_runs):
read, write = API[api](args)
for _ in range(args.nruns):
read, write = API[api](args)
rs.append(args.nbytes / read)
ws.append(args.nbytes / write)
def pprint_api_res(name, samples):
mean = statistics.mean(samples) if len(samples) > 1 else samples[0]
ret = f"{api} {name}".ljust(18)
ret += f"| {format_bytes(mean).rjust(10)}/s".ljust(14)
if len(samples) > 1:
stdev = statistics.stdev(samples) / mean * 100
ret += " ± %5.2f %%" % stdev
ret += " ("
for sample in samples:
ret += f"{format_bytes(sample)}/s, "
ret = ret[:-2] + ")" # Replace trailing comma
return ret
print(pprint_api_res("read", rs))
print(pprint_api_res("write", ws))
if __name__ == "__main__":
def parse_directory(x):
if x is None:
return x
else:
p = pathlib.Path(x)
if not p.is_dir():
raise argparse.ArgumentTypeError("Must be a directory")
return p
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
parser.add_argument(
"-n",
"--nbytes",
metavar="BYTES",
default="10 MiB",
type=parse_bytes,
help="Message size, which must be a multiple of 8 (default: %(default)s).",
)
parser.add_argument(
"--chunksize",
metavar="BYTES",
default="10 MiB",
type=parse_bytes,
help="Chunk size (default: %(default)s).",
)
parser.add_argument(
"--dtype",
default="float32",
type=numpy.dtype,
help="NumPy datatype to use (default: '%(default)s')",
)
parser.add_argument(
"-d",
"--dir",
metavar="PATH",
default=None,
type=parse_directory,
help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)",
)
parser.add_argument(
"--nruns",
metavar="RUNS",
default=1,
type=int,
help="Number of runs per API (default: %(default)s).",
)
parser.add_argument(
"--n-warmup-runs",
default=0,
type=int,
help="Number of warmup runs (default: %(default)s).",
)
parser.add_argument(
"-t",
"--nthreads",
metavar="THREADS",
default=1,
type=int,
help="Number of threads to use (default: %(default)s).",
)
parser.add_argument(
"--api",
metavar="API",
default=("kvikio", "posix"),
nargs="+",
choices=tuple(API.keys()) + ("all",),
help="List of APIs to use {%(choices)s}",
)
parser.add_argument(
"--compressor",
metavar="COMPRESSOR",
default="none",
choices=tuple(compressors.keys()),
help=(
"Set a nvCOMP compressor to use with Zarr "
"{%(choices)s} (default: %(default)s)"
),
)
parser.add_argument(
"--drop-vm-cache",
action="store_true",
default=False,
help=(
"Drop the VM cache between writes and reads, "
"requires sudo access to /sbin/sysctl"
),
)
args = parser.parse_args()
if "all" in args.api:
args.api = tuple(API.keys())
# Check if size is divisible by size of datatype
assert args.nbytes % args.dtype.itemsize == 0
assert args.chunksize % args.dtype.itemsize == 0
# Compute/convert to number of elements
args.nelem = args.nbytes // args.dtype.itemsize
args.chunksize = args.chunksize // args.dtype.itemsize
# Create a temporary directory if user didn't specify a directory
temp_dir: Union[tempfile.TemporaryDirectory, ContextManager]
if args.dir is None:
temp_dir = tempfile.TemporaryDirectory()
args.dir = pathlib.Path(temp_dir.name)
else:
temp_dir = contextlib.nullcontext()
with temp_dir:
main(args)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/benchmarks/single-node-io.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import argparse
import contextlib
import os
import os.path
import pathlib
import shutil
import statistics
import tempfile
from time import perf_counter as clock
from typing import Any, ContextManager, Dict, Union
import cupy
from dask.utils import format_bytes, parse_bytes
import kvikio
import kvikio.defaults
def get_zarr_compressors() -> Dict[str, Any]:
"""Returns a dict of available Zarr compressors"""
try:
import kvikio.zarr
except ImportError:
return {}
return {c.__name__.lower(): c for c in kvikio.zarr.nvcomp_compressors}
def create_data(nbytes):
"""Return a random uint8 cupy array"""
return cupy.arange(nbytes, dtype="uint8")
def run_cufile(args):
"""Single file and array"""
file_path = args.dir / "kvikio-single-file"
data = create_data(args.nbytes)
if args.pre_register_buffer:
kvikio.memory_register(data)
# Write
f = kvikio.CuFile(file_path, flags="w")
t0 = clock()
res = f.write(data)
f.close()
write_time = clock() - t0
assert res == args.nbytes, f"IO mismatch, expected {args.nbytes} got {res}"
# Read
f = kvikio.CuFile(file_path, flags="r")
t0 = clock()
res = f.read(data)
f.close()
read_time = clock() - t0
assert res == args.nbytes, f"IO mismatch, expected {args.nbytes} got {res}"
if args.pre_register_buffer:
kvikio.memory_deregister(data)
return read_time, write_time
def run_cufile_multiple_files_multiple_arrays(args):
"""One file and array per thread"""
chunksize = args.nbytes // args.nthreads
assert args.nbytes % args.nthreads == 0, "--nbytes must be divisible by --nthreads"
# Create a file path and CuPy array per thread
file_path = str(args.dir / "cufile-p-%03d")
arrays = [create_data(chunksize) for _ in range(args.nthreads)]
if args.pre_register_buffer:
for array in arrays:
kvikio.memory_register(array)
# Write
files = [kvikio.CuFile(file_path % i, flags="w") for i in range(args.nthreads)]
t0 = clock()
futures = [f.pwrite(a, task_size=a.nbytes) for f, a in zip(files, arrays)]
res = sum(f.get() for f in futures)
del files
write_time = clock() - t0
assert res == args.nbytes
# Read
files = [kvikio.CuFile(file_path % i, flags="r") for i in range(args.nthreads)]
t0 = clock()
futures = [f.pread(a, task_size=a.nbytes) for f, a in zip(files, arrays)]
res = sum(f.get() for f in futures)
del files
read_time = clock() - t0
assert res == args.nbytes
if args.pre_register_buffer:
for array in arrays:
kvikio.memory_deregister(array)
return read_time, write_time
def run_cufile_multiple_files(args):
"""Single array but one file per thread"""
chunksize = args.nbytes // args.nthreads
assert args.nbytes % args.nthreads == 0, "--nbytes must be divisible by --nthreads"
file_path = str(args.dir / "cufile-p-%03d")
data = create_data(args.nbytes)
if args.pre_register_buffer:
kvikio.memory_register(data)
# Write
files = [kvikio.CuFile(file_path % i, flags="w") for i in range(args.nthreads)]
t0 = clock()
futures = [
f.pwrite(data[i * chunksize : (i + 1) * chunksize]) for i, f in enumerate(files)
]
res = sum(f.get() for f in futures)
del files
write_time = clock() - t0
assert res == args.nbytes, f"IO mismatch, expected {args.nbytes} got {res}"
# Read
files = [kvikio.CuFile(file_path % i, flags="r") for i in range(args.nthreads)]
t0 = clock()
futures = [
f.pread(data[i * chunksize : (i + 1) * chunksize]) for i, f in enumerate(files)
]
res = sum(f.get() for f in futures)
del files
read_time = clock() - t0
assert res == args.nbytes, f"IO mismatch, expected {args.nbytes} got {res}"
if args.pre_register_buffer:
kvikio.memory_deregister(data)
return read_time, write_time
def run_cufile_multiple_arrays(args):
"""A single file but one array per thread"""
chunksize = args.nbytes // args.nthreads
assert args.nbytes % args.nthreads == 0, "--nbytes must be divisible by --nthreads"
file_path = args.dir / "kvikio-multiple-arrays"
# Create a CuPy array per thread
arrays = [create_data(chunksize) for _ in range(args.nthreads)]
if args.pre_register_buffer:
for array in arrays:
kvikio.memory_register(array)
# Write
f = kvikio.CuFile(file_path, flags="w")
t0 = clock()
futures = [
f.pwrite(a, task_size=a.nbytes, file_offset=i * chunksize)
for i, a in enumerate(arrays)
]
res = sum(f.get() for f in futures)
f.close()
write_time = clock() - t0
assert res == args.nbytes
# Read
f = kvikio.CuFile(file_path, flags="r")
t0 = clock()
futures = [f.pread(a, task_size=a.nbytes) for a in arrays]
res = sum(f.get() for f in futures)
f.close()
read_time = clock() - t0
assert res == args.nbytes
if args.pre_register_buffer:
for array in arrays:
kvikio.memory_deregister(array)
return read_time, write_time
def run_posix(args):
"""Use the posix API, no calls to kvikio"""
file_path = args.dir / "posix-single-file"
data = create_data(args.nbytes)
# Write
f = open(file_path, "wb")
t0 = clock()
res = f.write(data.tobytes())
f.close()
write_time = clock() - t0
assert res == args.nbytes
# Read
f = open(file_path, "rb")
t0 = clock()
a = cupy.fromfile(f, dtype="uint8", count=len(data))
f.close()
read_time = clock() - t0
assert a.nbytes == args.nbytes
assert res == args.nbytes, f"IO mismatch, expected {args.nbytes} got {a.nbytes}"
return read_time, write_time
def run_zarr(args):
"""Use the Zarr API"""
import zarr
import kvikio.zarr
dir_path = args.dir / "zarr"
if not kvikio.zarr.supported:
raise RuntimeError(f"requires Zarr >={kvikio.zarr.MINIMUM_ZARR_VERSION}")
compressor = None
if args.zarr_compressor is not None:
compressor = get_zarr_compressors()[args.zarr_compressor]()
a = create_data(args.nbytes)
shutil.rmtree(str(dir_path), ignore_errors=True)
# Write
t0 = clock()
z = zarr.array(
a,
chunks=False,
compressor=compressor,
store=kvikio.zarr.GDSStore(dir_path),
meta_array=cupy.empty(()),
)
write_time = clock() - t0
# Read
t0 = clock()
res = z[:]
read_time = clock() - t0
assert res.nbytes == args.nbytes
return read_time, write_time
API = {
"cufile": run_cufile,
"zarr": run_zarr,
"posix": run_posix,
"cufile-mfma": run_cufile_multiple_files_multiple_arrays,
"cufile-mf": run_cufile_multiple_files,
"cufile-ma": run_cufile_multiple_arrays,
}
def main(args):
cupy.cuda.set_allocator(None) # Disable CuPy's default memory pool
cupy.arange(10) # Make sure CUDA is initialized
kvikio.defaults.num_threads_reset(args.nthreads)
props = kvikio.DriverProperties()
try:
import pynvml.smi
nvsmi = pynvml.smi.nvidia_smi.getInstance()
except ImportError:
gpu_name = "Unknown (install pynvml)"
mem_total = gpu_name
bar1_total = gpu_name
else:
info = nvsmi.DeviceQuery()["gpu"][0]
gpu_name = f"{info['product_name']} (dev #0)"
mem_total = format_bytes(
parse_bytes(
str(info["fb_memory_usage"]["total"]) + info["fb_memory_usage"]["unit"]
)
)
bar1_total = format_bytes(
parse_bytes(
str(info["bar1_memory_usage"]["total"])
+ info["bar1_memory_usage"]["unit"]
)
)
gds_version = "N/A (Compatibility Mode)"
if props.is_gds_available:
gds_version = f"v{props.major_version}.{props.minor_version}"
gds_config_json_path = os.path.realpath(
os.getenv("CUFILE_ENV_PATH_JSON", "/etc/cufile.json")
)
print("Roundtrip benchmark")
print("----------------------------------")
if kvikio.defaults.compat_mode():
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - KvikIO compat mode ")
print(" libcufile.so not used ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
elif not props.is_gds_available:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - cuFile compat mode ")
print(" GDS not enabled ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(f"GPU | {gpu_name}")
print(f"GPU Memory Total | {mem_total}")
print(f"BAR1 Memory Total | {bar1_total}")
print(f"GDS driver | {gds_version}")
print(f"GDS config.json | {gds_config_json_path}")
print("----------------------------------")
print(f"nbytes | {args.nbytes} bytes ({format_bytes(args.nbytes)})")
print(f"4K aligned | {args.nbytes % 4096 == 0}")
print(f"pre-reg-buf | {args.pre_register_buffer}")
print(f"directory | {args.dir}")
print(f"nthreads | {args.nthreads}")
print(f"nruns | {args.nruns}")
if args.zarr_compressor is not None:
print(f"Zarr compressor | {args.zarr_compressor}")
print("==================================")
# Run each benchmark using the requested APIs
for api in args.api:
rs = []
ws = []
for _ in range(args.nruns):
read, write = API[api](args)
rs.append(args.nbytes / read)
ws.append(args.nbytes / write)
def pprint_api_res(name, samples):
mean = statistics.mean(samples) if len(samples) > 1 else samples[0]
ret = f"{api} {name}".ljust(18)
ret += f"| {format_bytes(mean).rjust(10)}/s".ljust(14)
if len(samples) > 1:
stdev = statistics.stdev(samples) / mean * 100
ret += " ± %5.2f %%" % stdev
ret += " ("
for sample in samples:
ret += f"{format_bytes(sample)}/s, "
ret = ret[:-2] + ")" # Replace trailing comma
return ret
print(pprint_api_res("read", rs))
print(pprint_api_res("write", ws))
if __name__ == "__main__":
def parse_directory(x):
if x is None:
return x
else:
p = pathlib.Path(x)
if not p.is_dir():
raise argparse.ArgumentTypeError("Must be a directory")
return p
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
parser.add_argument(
"-n",
"--nbytes",
metavar="BYTES",
default="10 MiB",
type=parse_bytes,
help="Message size, which must be a multiple of 8 (default: %(default)s).",
)
parser.add_argument(
"-d",
"--dir",
metavar="PATH",
default=None,
type=parse_directory,
help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)",
)
parser.add_argument(
"--nruns",
metavar="RUNS",
default=1,
type=int,
help="Number of runs per API (default: %(default)s).",
)
parser.add_argument(
"--no-pre-register-buffer",
action="store_true",
default=False,
help="Disable pre-register of device buffer",
)
parser.add_argument(
"-t",
"--nthreads",
metavar="THREADS",
default=1,
type=int,
help="Number of threads to use (default: %(default)s).",
)
parser.add_argument(
"--api",
metavar="API",
default=("cufile", "posix"),
nargs="+",
choices=tuple(API.keys()) + ("all",),
help="List of APIs to use {%(choices)s}",
)
parser.add_argument(
"--zarr-compressor",
metavar="COMPRESSOR",
default=None,
choices=tuple(get_zarr_compressors().keys()),
help=(
"Set a nvCOMP compressor to use with Zarr "
"{%(choices)s} (default: %(default)s)"
),
)
args = parser.parse_args()
args.pre_register_buffer = args.no_pre_register_buffer is False
if "all" in args.api:
args.api = tuple(API.keys())
# Create a temporary directory if user didn't specify a directory
temp_dir: Union[tempfile.TemporaryDirectory, ContextManager]
if args.dir is None:
temp_dir = tempfile.TemporaryDirectory()
args.dir = pathlib.Path(temp_dir.name)
else:
temp_dir = contextlib.nullcontext()
with temp_dir:
main(args)
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/cmake/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
include(thirdparty/get_nvcomp.cmake)
| 0 |
rapidsai_public_repos/kvikio/python/cmake
|
rapidsai_public_repos/kvikio/python/cmake/thirdparty/get_nvcomp.cmake
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(KVIKIO_USE_PROPRIETARY_BINARY ON)
# This function finds nvcomp and sets any additional necessary environment variables.
function(find_and_configure_nvcomp)
include(${rapids-cmake-dir}/cpm/nvcomp.cmake)
rapids_cpm_nvcomp(
BUILD_EXPORT_SET kvikio-exports
INSTALL_EXPORT_SET kvikio-exports
USE_PROPRIETARY_BINARY ${KVIKIO_USE_PROPRIETARY_BINARY}
)
# Per-thread default stream
if(TARGET nvcomp AND PER_THREAD_DEFAULT_STREAM)
target_compile_definitions(nvcomp PRIVATE CUDA_API_PER_THREAD_DEFAULT_STREAM)
endif()
endfunction()
find_and_configure_nvcomp()
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/examples/hello_world.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import cupy
import kvikio
def main(path):
a = cupy.arange(100)
f = kvikio.CuFile(path, "w")
# Write whole array to file
f.write(a)
f.close()
b = cupy.empty_like(a)
f = kvikio.CuFile(path, "r")
# Read whole array from file
f.read(b)
assert all(a == b)
# Use contexmanager
c = cupy.empty_like(a)
with kvikio.CuFile(path, "r") as f:
f.read(c)
assert all(a == c)
# Non-blocking read
d = cupy.empty_like(a)
with kvikio.CuFile(path, "r") as f:
future1 = f.pread(d[:50])
future2 = f.pread(d[50:], file_offset=d[:50].nbytes)
future1.get() # Wait for first read
future2.get() # Wait for second read
assert all(a == d)
if __name__ == "__main__":
main("/tmp/kvikio-hello-world-file")
| 0 |
rapidsai_public_repos/kvikio/python
|
rapidsai_public_repos/kvikio/python/examples/zarr_cupy_nvcomp.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import cupy
import numpy
import zarr
import kvikio
import kvikio.zarr
def main(path):
a = cupy.arange(20)
# Let's use KvikIO's convenience function `open_cupy_array()` to create
# a new Zarr file on disk. Its semantic is the same as `zarr.open_array()`
# but uses a GDS file store, nvCOMP compression, and CuPy arrays.
z = kvikio.zarr.open_cupy_array(store=path, mode="w", shape=(20,), chunks=(5,))
# `z` is a regular Zarr Array that we can write to as usual
z[0:10] = numpy.arange(0, 10)
# but it also support direct reads and writes of CuPy arrays
z[10:20] = cupy.arange(10, 20)
# Reading `z` returns a CuPy array
assert isinstance(z[:], cupy.ndarray)
assert (a == z[:]).all()
# Normally, we cannot assume that GPU and CPU compressors are compatible.
# E.g., `open_cupy_array()` uses nvCOMP's Snappy GPU compression by default,
# which, as far as we know, isn’t compatible with any CPU compressor. Thus,
# let's re-write our Zarr array using a CPU and GPU compatible compressor.
#
# Warning: it isn't possible to use `CompatCompressor` as a compressor argument
# in Zarr directly. It is only meant for `open_cupy_array()`. However,
# in an example further down, we show how to write using regular Zarr.
z = kvikio.zarr.open_cupy_array(
store=path,
mode="w",
shape=(20,),
chunks=(5,),
compressor=kvikio.zarr.CompatCompressor.lz4(),
)
z[:] = a
# Because we are using a CompatCompressor, it is now possible to open the file
# using Zarr's built-in LZ4 decompressor that uses the CPU.
z = zarr.open_array(path)
# `z` is now read as a regular NumPy array
assert isinstance(z[:], numpy.ndarray)
assert (a.get() == z[:]).all()
# and we can write to is as usual
z[:] = numpy.arange(20, 40)
# And we can read the Zarr file back into a CuPy array.
z = kvikio.zarr.open_cupy_array(store=path, mode="r")
assert isinstance(z[:], cupy.ndarray)
assert (cupy.arange(20, 40) == z[:]).all()
# Similarly, we can also open a file written by regular Zarr.
# Let's write the file without any compressor.
ary = numpy.arange(10)
z = zarr.open(store=path, mode="w", shape=ary.shape, compressor=None)
z[:] = ary
# This works as before where the file is read as a CuPy array
z = kvikio.zarr.open_cupy_array(store=path)
assert isinstance(z[:], cupy.ndarray)
assert (z[:] == cupy.asarray(ary)).all()
# Using a compressor is a bit more tricky since not all CPU compressors
# are GPU compatible. To make sure we use a compable compressor, we use
# the CPU-part of `CompatCompressor.lz4()`.
ary = numpy.arange(10)
z = zarr.open(
store=path,
mode="w",
shape=ary.shape,
compressor=kvikio.zarr.CompatCompressor.lz4().cpu,
)
z[:] = ary
# This works as before where the file is read as a CuPy array
z = kvikio.zarr.open_cupy_array(store=path)
assert isinstance(z[:], cupy.ndarray)
assert (z[:] == cupy.asarray(ary)).all()
if __name__ == "__main__":
main("/tmp/zarr-cupy-nvcomp")
| 0 |
rapidsai_public_repos/kvikio/conda
|
rapidsai_public_repos/kvikio/conda/environments/all_cuda-120_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-nvcc
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cudf==23.12.*
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask>=2022.05.2
- distributed>=2022.05.2
- doxygen=1.9.1
- gcc_linux-64=11.*
- libcufile
- libcufile-dev
- ninja
- numcodecs <0.12.0
- numpy>=1.21
- numpydoc
- nvcomp==3.0.4
- packaging
- pre-commit
- pytest
- pytest-cov
- python>=3.9,<3.11
- scikit-build>=0.13.1
- sphinx
- sphinx-click
- sphinx_rtd_theme
- sysroot_linux-64=2.17
- zarr
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/kvikio/conda
|
rapidsai_public_repos/kvikio/conda/environments/all_cuda-118_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cudf==23.12.*
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask>=2022.05.2
- distributed>=2022.05.2
- doxygen=1.9.1
- gcc_linux-64=11.*
- libcufile-dev=1.4.0.31
- libcufile=1.4.0.31
- ninja
- numcodecs <0.12.0
- numpy>=1.21
- numpydoc
- nvcc_linux-64=11.8
- nvcomp==3.0.4
- packaging
- pre-commit
- pytest
- pytest-cov
- python>=3.9,<3.11
- scikit-build>=0.13.1
- sphinx
- sphinx-click
- sphinx_rtd_theme
- sysroot_linux-64=2.17
- zarr
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/kvikio/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
nvcomp_version:
- "=3.0.4"
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/kvikio/meta.yaml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
{% set version = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').lstrip('v') %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: kvikio
version: {{ version }}
source:
git_url: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=kvikio-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=kvikio-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
script:
- cd python
- python -m pip install . -vv
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
- ninja
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- python
- setuptools
- pip
- cython >=3.0.0
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- cuda-version ={{ cuda_version }}
- nvcomp {{ nvcomp_version }}
- scikit-build >=0.13.1
- libkvikio ={{ version }}
run:
- python
- numpy >=1.20
- cupy >=12.0.0
- zarr
# See https://github.com/zarr-developers/numcodecs/pull/475
- numcodecs <0.12.0
- packaging
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
test:
requires:
- pytest
imports:
- kvikio
about:
home: https://rapids.ai
license: Apache-2.0
license_family: Apache
license_file: LICENSE
summary: KvikIO - GPUDirect Storage
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/libkvikio/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cmake_version:
- ">=3.26.4"
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcufile_host_version:
- "1.4.0.31"
cuda11_libcufile_run_version:
- ">=1.0.0.82,<=1.4.0.31"
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/libkvikio/build.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
./build.sh -n libkvikio
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/libkvikio/meta.yaml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
{% set version = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').lstrip('v') %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libkvikio-split
source:
git_url: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_GENERATOR
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libkvikio-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libkvikio-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
requirements:
build:
- cmake {{ cmake_version }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_host_version }} # [linux64]
- libcufile-dev {{ cuda11_libcufile_host_version }} # [linux64]
{% else %}
- libcufile-dev # [linux64]
{% endif %}
outputs:
- name: libkvikio
version: {{ version }}
script: install_libkvikio.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
run_exports:
- {{ pin_subpackage("libkvikio", max_pin="x.x") }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
run:
- cuda-version {{ cuda_spec }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_run_version }} # [linux64]
- libcufile-dev {{ cuda11_libcufile_run_version }} # [linux64]
{% else %}
- libcufile-dev # [linux64]
{% endif %}
test:
commands:
- test -f $PREFIX/include/kvikio/file_handle.hpp
about:
home: https://rapids.ai
license: Apache-2.0
license_family: Apache
license_file: LICENSE
summary: libkvikio library
- name: libkvikio-tests
version: {{ version }}
script: install_libkvikio_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_run_version }} # [linux64]
{% else %}
- cuda-cudart-dev
- libcufile-dev # [linux64]
{% endif %}
run:
- cuda-version {{ cuda_spec }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_run_version }} # [linux64]
{% endif %}
about:
home: https://rapids.ai
license: Apache-2.0
license_family: Apache
license_file: LICENSE
summary: libkvikio tests
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/libkvikio/install_libkvikio.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
cmake --install cpp/build
| 0 |
rapidsai_public_repos/kvikio/conda/recipes
|
rapidsai_public_repos/kvikio/conda/recipes/libkvikio/install_libkvikio_tests.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/cpp/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(cmake/fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-export)
include(rapids-find)
project(
KvikIO
VERSION 23.12.00
LANGUAGES CXX
)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/")
# Write the version header
rapids_cmake_write_version_file(include/kvikio/version_config.hpp)
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# build options
option(KvikIO_BUILD_EXAMPLES "Configure CMake to build examples" ON)
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
# find packages we depend on
rapids_cpm_init()
rapids_find_package(
CUDAToolkit REQUIRED
BUILD_EXPORT_SET kvikio-exports
INSTALL_EXPORT_SET kvikio-exports
)
rapids_find_package(
Threads REQUIRED
BUILD_EXPORT_SET kvikio-exports
INSTALL_EXPORT_SET kvikio-exports
)
rapids_find_package(
cuFile
BUILD_EXPORT_SET kvikio-exports
INSTALL_EXPORT_SET kvikio-exports
)
if(NOT cuFile_FOUND)
message(WARNING "Building KvikIO without cuFile")
else()
file(READ "${cuFile_INCLUDE_DIRS}/cufile.h" CUFILE_H_STR)
string(FIND "${CUFILE_H_STR}" "cuFileBatchIOSetUp" cuFileBatchIOSetUp_location)
if(cuFileBatchIOSetUp_location EQUAL "-1")
set(cuFile_BATCH_API_FOUND FALSE)
else()
set(cuFile_BATCH_API_FOUND TRUE)
endif()
message(STATUS "Found cuFile's Batch API: ${cuFile_BATCH_API_FOUND}")
string(FIND "${CUFILE_H_STR}" "cuFileReadAsync" cuFileReadAsync_location)
if(cuFileReadAsync_location EQUAL "-1")
set(cuFile_STREAM_API_FOUND FALSE)
else()
set(cuFile_STREAM_API_FOUND TRUE)
endif()
message(STATUS "Found cuFile's Stream API: ${cuFile_STREAM_API_FOUND}")
endif()
# library targets
add_library(kvikio INTERFACE)
add_library(kvikio::kvikio ALIAS kvikio)
target_include_directories(
kvikio INTERFACE "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:include>"
)
target_link_libraries(kvikio INTERFACE Threads::Threads)
target_link_libraries(kvikio INTERFACE CUDA::toolkit)
if(cuFile_FOUND)
target_link_libraries(kvikio INTERFACE cufile::cuFile_interface)
target_compile_definitions(kvikio INTERFACE KVIKIO_CUFILE_FOUND)
if(cuFile_BATCH_API_FOUND)
target_compile_definitions(kvikio INTERFACE KVIKIO_CUFILE_BATCH_API_FOUND)
endif()
if(cuFile_STREAM_API_FOUND)
target_compile_definitions(kvikio INTERFACE KVIKIO_CUFILE_STREAM_API_FOUND)
endif()
endif()
target_link_libraries(kvikio INTERFACE ${CMAKE_DL_LIBS})
target_compile_features(kvikio INTERFACE cxx_std_17)
# optionally build examples
if(KvikIO_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
# optionally build tests
if(KvikIO_BUILD_TESTS AND CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
include(cmake/thirdparty/get_gtest.cmake)
include(CTest) # calls enable_testing()
add_subdirectory(tests)
endif()
include(CPack)
# install export targets
install(TARGETS kvikio EXPORT kvikio-exports)
install(DIRECTORY include/kvikio/ DESTINATION include/kvikio)
install(FILES ${KvikIO_BINARY_DIR}/include/kvikio/version_config.hpp DESTINATION include/kvikio)
include("${rapids-cmake-dir}/export/find_package_file.cmake")
rapids_export_find_package_file(
BUILD "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET kvikio-exports
)
rapids_export_find_package_file(
INSTALL "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/FindcuFile.cmake" EXPORT_SET kvikio-exports
)
set(doc_string
[=[
Provide targets for KvikIO: C++ bindings for cuFile.
]=]
)
rapids_export(
INSTALL kvikio
EXPORT_SET kvikio-exports
GLOBAL_TARGETS kvikio
NAMESPACE kvikio::
DOCUMENTATION doc_string
)
# build export targets
rapids_export(
BUILD kvikio
EXPORT_SET kvikio-exports
GLOBAL_TARGETS kvikio
NAMESPACE kvikio::
DOCUMENTATION doc_string
)
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/stream.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <algorithm>
#include <cstdlib>
#include <kvikio/error.hpp>
#include <kvikio/shim/cuda.hpp>
#include <kvikio/shim/cufile.hpp>
#include <tuple>
namespace kvikio {
/**
* @brief Future of an asynchronous IO operation
*
* This class shouldn't be used directly, instead some stream operations such as
* `FileHandle.read_async` and `FileHandle.write_async` returns an instance of this class. Use
* `.check_bytes_done()` to synchronize the associated CUDA stream and return the number of bytes
* read or written by the operation.
*
* The goal of this class is twofold:
* - Have `read_async` and `write_async` return an object that clearly associates the function
* arguments with the CUDA stream used. This is useful because the current validity of the
* arguments depends on the stream.
* - Support of by-value arguments. In many cases, a user will use `read_async` and `write_async`
* like most other asynchronous CUDA functions that take by-value arguments.
*
* To support by-value arguments, we allocate the arguments on the heap (malloc `ArgByVal`) and have
* the by-reference arguments points into `ArgByVal`. This way, the `read_async` and `write_async`
* can call `.get_args()` to get the by-reference arguments required by cuFile's stream API.
*/
class StreamFuture {
private:
struct ArgByVal {
std::size_t size;
off_t file_offset;
off_t devPtr_offset;
ssize_t bytes_done;
};
void* _devPtr_base{nullptr};
CUstream _stream{nullptr};
ArgByVal* _val{nullptr};
bool _stream_synchronized{false};
public:
StreamFuture() noexcept = default;
StreamFuture(
void* devPtr_base, std::size_t size, off_t file_offset, off_t devPtr_offset, CUstream stream)
: _devPtr_base{devPtr_base}, _stream{stream}
{
// Notice, we allocate the arguments using malloc() as specified in the cuFile docs:
// <https://docs.nvidia.com/gpudirect-storage/api-reference-guide/index.html#cufilewriteasync>
if ((_val = static_cast<ArgByVal*>(std::malloc(sizeof(ArgByVal)))) == nullptr) {
throw std::bad_alloc{};
}
*_val = {
.size = size, .file_offset = file_offset, .devPtr_offset = devPtr_offset, .bytes_done = 0};
}
/**
* @brief StreamFuture support move semantic but isn't copyable
*/
StreamFuture(const StreamFuture&) = delete;
StreamFuture& operator=(StreamFuture& o) = delete;
StreamFuture(StreamFuture&& o) noexcept
: _devPtr_base{std::exchange(o._devPtr_base, nullptr)},
_stream{std::exchange(o._stream, nullptr)},
_val{std::exchange(o._val, nullptr)},
_stream_synchronized{o._stream_synchronized}
{
}
StreamFuture& operator=(StreamFuture&& o) noexcept
{
_devPtr_base = std::exchange(o._devPtr_base, nullptr);
_stream = std::exchange(o._stream, nullptr);
_val = std::exchange(o._val, nullptr);
_stream_synchronized = o._stream_synchronized;
return *this;
}
/**
* @brief Return the arguments of the future call
*
* @return Tuple of the arguments in the order matching `FileHandle.read()` and
* `FileHandle.write()`
*/
std::tuple<void*, std::size_t*, off_t*, off_t*, ssize_t*, CUstream> get_args() const
{
if (_val == nullptr) {
throw kvikio::CUfileException("cannot get arguments from an uninitialized StreamFuture");
}
return {_devPtr_base,
&_val->size,
&_val->file_offset,
&_val->devPtr_offset,
&_val->bytes_done,
_stream};
}
/**
* @brief Return the number of bytes read or written by the future operation.
*
* Synchronize the associated CUDA stream.
*
* @return Number of bytes read or written by the future operation.
*/
std::size_t check_bytes_done()
{
if (_val == nullptr) {
throw kvikio::CUfileException("cannot check bytes done on an uninitialized StreamFuture");
}
if (!_stream_synchronized) {
_stream_synchronized = true;
CUDA_DRIVER_TRY(cudaAPI::instance().StreamSynchronize(_stream));
}
CUFILE_CHECK_STREAM_IO(&_val->bytes_done);
// At this point, we know `*_val->bytes_done` is a positive value otherwise
// CUFILE_CHECK_STREAM_IO() would have raised an exception.
return static_cast<std::size_t>(_val->bytes_done);
}
/**
* @brief Free the by-value arguments and make sure the associated CUDA stream has been
* synchronized.
*/
~StreamFuture() noexcept
{
if (_val != nullptr) {
try {
check_bytes_done();
} catch (const kvikio::CUfileException& e) {
std::cerr << e.what() << std::endl;
}
std::free(_val);
}
}
};
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/posix_io.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <unistd.h>
#include <cstddef>
#include <cstdlib>
#include <mutex>
#include <stack>
#include <cstring>
#include <kvikio/error.hpp>
#include <kvikio/shim/cuda.hpp>
#include <kvikio/utils.hpp>
namespace kvikio {
inline constexpr std::size_t posix_bounce_buffer_size = 2 << 23; // 16 MiB
namespace detail {
/**
* @brief Class to retain host memory allocations
*
* Call `AllocRetain::get` to get an allocation that will be retained when it
* goes out of scope (RAII). The size of all allocations are `posix_bounce_buffer_size`.
*/
class AllocRetain {
private:
std::stack<void*> _free_allocs;
std::mutex _mutex;
public:
class Alloc {
private:
AllocRetain* _manager;
void* _alloc;
public:
Alloc(AllocRetain* manager, void* alloc) : _manager(manager), _alloc{alloc} {}
Alloc(const Alloc&) = delete;
Alloc& operator=(Alloc const&) = delete;
Alloc(Alloc&& o) = delete;
Alloc& operator=(Alloc&& o) = delete;
~Alloc() noexcept { _manager->put(_alloc); }
void* get() noexcept { return _alloc; }
};
AllocRetain() = default;
[[nodiscard]] Alloc get()
{
const std::lock_guard lock(_mutex);
// Check if we have an allocation available
if (!_free_allocs.empty()) {
void* ret = _free_allocs.top();
_free_allocs.pop();
return Alloc(this, ret);
}
// If no available allocation, allocate and register a new one
void* alloc{};
// Allocate page-locked host memory
CUDA_DRIVER_TRY(cudaAPI::instance().MemHostAlloc(
&alloc, posix_bounce_buffer_size, CU_MEMHOSTREGISTER_PORTABLE));
return Alloc(this, alloc);
}
void put(void* alloc)
{
const std::lock_guard lock(_mutex);
_free_allocs.push(alloc);
}
void clear()
{
const std::lock_guard lock(_mutex);
while (!_free_allocs.empty()) {
CUDA_DRIVER_TRY(cudaAPI::instance().MemFreeHost(_free_allocs.top()));
_free_allocs.pop();
}
}
AllocRetain(const AllocRetain&) = delete;
AllocRetain& operator=(AllocRetain const&) = delete;
AllocRetain(AllocRetain&& o) = delete;
AllocRetain& operator=(AllocRetain&& o) = delete;
~AllocRetain() noexcept = default;
};
inline AllocRetain manager; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
/**
* @brief Read or write host memory to or from disk using POSIX
*
* @tparam IsReadOperation Whether the operation is a read or a write
* @param fd File descriptor
* @param buf Buffer to write
* @param count Number of bytes to write
* @param offset File offset
* @param partial If false, all of `count` bytes are read or written.
* @return The number of bytes read or written (always gather than zero)
*/
template <bool IsReadOperation>
ssize_t posix_host_io(int fd, const void* buf, size_t count, off_t offset, bool partial)
{
off_t cur_offset = offset;
size_t byte_remaining = count;
char* buffer = const_cast<char*>(static_cast<const char*>(buf));
while (byte_remaining > 0) {
ssize_t nbytes = 0;
if constexpr (IsReadOperation) {
nbytes = ::pread(fd, buffer, byte_remaining, cur_offset);
} else {
nbytes = ::pwrite(fd, buffer, byte_remaining, cur_offset);
}
if (nbytes == -1) {
const std::string name = IsReadOperation ? "pread" : "pwrite";
if (errno == EBADF) {
throw CUfileException{std::string{"POSIX error on " + name + " at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": unsupported file open flags"};
}
throw CUfileException{std::string{"POSIX error on " + name + " at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": " + strerror(errno)};
}
if constexpr (IsReadOperation) {
if (nbytes == 0) {
throw CUfileException{std::string{"POSIX error on pread at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": EOF"};
}
}
if (partial) { return nbytes; }
buffer += nbytes; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
cur_offset += nbytes;
byte_remaining -= nbytes;
}
return convert_size2ssize(count);
}
/**
* @brief Read or write device memory to or from disk using POSIX
*
* @tparam IsReadOperation Whether the operation is a read or a write
* @param fd File descriptor
* @param devPtr_base Device pointer to read or write to.
* @param size Number of bytes to read or write.
* @param file_offset Byte offset to the start of the file.
* @param devPtr_offset Byte offset to the start of the device pointer.
* @return Number of bytes read or written.
*/
template <bool IsReadOperation>
std::size_t posix_device_io(int fd,
const void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset)
{
auto alloc = manager.get();
CUdeviceptr devPtr = convert_void2deviceptr(devPtr_base) + devPtr_offset;
off_t cur_file_offset = convert_size2off(file_offset);
off_t byte_remaining = convert_size2off(size);
const off_t chunk_size2 = convert_size2off(posix_bounce_buffer_size);
while (byte_remaining > 0) {
const off_t nbytes_requested = std::min(chunk_size2, byte_remaining);
ssize_t nbytes_got = nbytes_requested;
if constexpr (IsReadOperation) {
nbytes_got = posix_host_io<true>(fd, alloc.get(), nbytes_requested, cur_file_offset, true);
CUDA_DRIVER_TRY(cudaAPI::instance().MemcpyHtoD(devPtr, alloc.get(), nbytes_got));
} else { // Is a write operation
CUDA_DRIVER_TRY(cudaAPI::instance().MemcpyDtoH(alloc.get(), devPtr, nbytes_requested));
posix_host_io<false>(fd, alloc.get(), nbytes_requested, cur_file_offset, false);
}
cur_file_offset += nbytes_got;
devPtr += nbytes_got;
byte_remaining -= nbytes_got;
}
return size;
}
} // namespace detail
/**
* @brief Read from disk to host memory using POSIX
*
* If `size` or `file_offset` isn't aligned with `page_size` then
* `fd` cannot have been opened with the `O_DIRECT` flag.
*
* @param fd File descriptor
* @param buf Base address of buffer in host memory.
* @param size Size in bytes to read.
* @param file_offset Offset in the file to read from.
* @param partial If false, all of `size` bytes are read.
* @return Size of bytes that were successfully read.
*/
inline std::size_t posix_host_read(
int fd, void* buf, std::size_t size, std::size_t file_offset, bool partial)
{
return detail::posix_host_io<true>(fd, buf, size, convert_size2off(file_offset), partial);
}
/**
* @brief Write host memory to disk using POSIX
*
* If `size` or `file_offset` isn't aligned with `page_size` then
* `fd` cannot have been opened with the `O_DIRECT` flag.
*
* @param fd File descriptor
* @param buf Base address of buffer in host memory.
* @param size Size in bytes to write.
* @param file_offset Offset in the file to write to.
* @param partial If false, all of `size` bytes are written.
* @return Size of bytes that were successfully read.
*/
inline std::size_t posix_host_write(
int fd, const void* buf, std::size_t size, std::size_t file_offset, bool partial)
{
return detail::posix_host_io<false>(fd, buf, size, convert_size2off(file_offset), partial);
}
/**
* @brief Read from disk to device memory using POSIX
*
* If `size` or `file_offset` isn't aligned with `page_size` then
* `fd` cannot have been opened with the `O_DIRECT` flag.
*
* @param fd File descriptor
* @param devPtr_base Base address of buffer in device memory.
* @param size Size in bytes to read.
* @param file_offset Offset in the file to read from.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to read into.
* @return Size of bytes that were successfully read.
*/
inline std::size_t posix_device_read(int fd,
const void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset)
{
return detail::posix_device_io<true>(fd, devPtr_base, size, file_offset, devPtr_offset);
}
/**
* @brief Write device memory to disk using POSIX
*
* If `size` or `file_offset` isn't aligned with `page_size` then
* `fd` cannot have been opened with the `O_DIRECT` flag.
*
* @param fd File descriptor
* @param devPtr_base Base address of buffer in device memory.
* @param size Size in bytes to write.
* @param file_offset Offset in the file to write to.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to write into.
* @return Size of bytes that were successfully written.
*/
inline std::size_t posix_device_write(int fd,
const void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset)
{
return detail::posix_device_io<false>(fd, devPtr_base, size, file_offset, devPtr_offset);
}
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/driver.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <vector>
#include <kvikio/error.hpp>
#include <kvikio/shim/cufile.hpp>
#include <kvikio/shim/cufile_h_wrapper.hpp>
namespace kvikio {
namespace detail {
[[nodiscard]] inline bool get_driver_flag(unsigned int prop, unsigned int flag) noexcept
{
return (prop & (1U << flag)) != 0;
}
inline void set_driver_flag(unsigned int& prop, unsigned int flag, bool val) noexcept
{
if (val) {
prop |= (1U << flag);
} else {
prop &= ~(1U << flag);
}
}
} // namespace detail
#ifdef KVIKIO_CUFILE_FOUND
class DriverInitializer {
// Optional, if not used cuFiles opens the driver automatically
public:
DriverInitializer() { CUFILE_TRY(cuFileAPI::instance().DriverOpen()); }
DriverInitializer(DriverInitializer const&) = delete;
DriverInitializer& operator=(DriverInitializer const&) = delete;
DriverInitializer(DriverInitializer&&) noexcept = delete;
DriverInitializer& operator=(DriverInitializer&&) noexcept = delete;
~DriverInitializer()
{
try {
CUFILE_TRY(cuFileAPI::instance().DriverClose());
} catch (const CUfileException& e) {
std::cerr << "Unable to close GDS file driver: ";
std::cerr << e.what();
std::cerr << std::endl;
}
}
};
class DriverProperties {
private:
CUfileDrvProps_t _props{};
bool _initialized{false};
// Because Cython does not handle exceptions in the default
// constructor, we initialize `_props` lazily.
void lazy_init()
{
if (_initialized) { return; }
_initialized = true;
CUFILE_TRY(cuFileAPI::instance().DriverGetProperties(&_props));
}
public:
DriverProperties() = default;
bool is_gds_available()
{
// If both the major and minor version is zero, the GDS driver isn't loaded.
return !(get_nvfs_major_version() == 0 && get_nvfs_minor_version() == 0);
}
[[nodiscard]] unsigned int get_nvfs_major_version()
{
lazy_init();
return _props.nvfs.major_version;
}
[[nodiscard]] unsigned int get_nvfs_minor_version()
{
lazy_init();
return _props.nvfs.minor_version;
}
[[nodiscard]] bool get_nvfs_allow_compat_mode()
{
lazy_init();
return detail::get_driver_flag(_props.nvfs.dcontrolflags, CU_FILE_ALLOW_COMPAT_MODE);
}
[[nodiscard]] bool get_nvfs_poll_mode()
{
lazy_init();
return detail::get_driver_flag(_props.nvfs.dcontrolflags, CU_FILE_USE_POLL_MODE);
}
[[nodiscard]] std::size_t get_nvfs_poll_thresh_size()
{
lazy_init();
return _props.nvfs.poll_thresh_size;
}
void set_nvfs_poll_mode(bool enable)
{
lazy_init();
CUFILE_TRY(cuFileAPI::instance().DriverSetPollMode(enable, get_nvfs_poll_thresh_size()));
detail::set_driver_flag(_props.nvfs.dcontrolflags, CU_FILE_USE_POLL_MODE, enable);
}
void set_nvfs_poll_thresh_size(std::size_t size_in_kb)
{
lazy_init();
CUFILE_TRY(cuFileAPI::instance().DriverSetPollMode(get_nvfs_poll_mode(), size_in_kb));
_props.nvfs.poll_thresh_size = size_in_kb;
}
[[nodiscard]] std::vector<CUfileDriverControlFlags> get_nvfs_statusflags()
{
lazy_init();
std::vector<CUfileDriverControlFlags> ret;
if (detail::get_driver_flag(_props.nvfs.dcontrolflags, CU_FILE_USE_POLL_MODE)) {
ret.push_back(CU_FILE_USE_POLL_MODE);
}
if (detail::get_driver_flag(_props.nvfs.dcontrolflags, CU_FILE_ALLOW_COMPAT_MODE)) {
ret.push_back(CU_FILE_ALLOW_COMPAT_MODE);
}
return ret;
}
[[nodiscard]] std::size_t get_max_device_cache_size()
{
lazy_init();
return _props.max_device_cache_size;
}
void set_max_device_cache_size(std::size_t size_in_kb)
{
lazy_init();
CUFILE_TRY(cuFileAPI::instance().DriverSetMaxCacheSize(size_in_kb));
_props.max_device_cache_size = size_in_kb;
}
[[nodiscard]] std::size_t get_per_buffer_cache_size()
{
lazy_init();
return _props.per_buffer_cache_size;
}
[[nodiscard]] std::size_t get_max_pinned_memory_size()
{
lazy_init();
return _props.max_device_pinned_mem_size;
}
void set_max_pinned_memory_size(std::size_t size_in_kb)
{
lazy_init();
CUFILE_TRY(cuFileAPI::instance().DriverSetMaxPinnedMemSize(size_in_kb));
_props.max_device_pinned_mem_size = size_in_kb;
}
[[nodiscard]] std::size_t get_max_batch_io_size()
{
#ifdef KVIKIO_CUFILE_BATCH_API_FOUND
lazy_init();
return _props.max_batch_io_size;
#else
return 0;
#endif
}
};
#else
struct DriverInitializer {
// Implement a non-default constructor to avoid `unused variable` warnings downstream
DriverInitializer() {}
};
struct DriverProperties {
// Implement a non-default constructor to avoid `unused variable` warnings downstream
DriverProperties() {}
static bool is_gds_available() { return false; }
[[nodiscard]] static unsigned int get_nvfs_major_version()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static unsigned int get_nvfs_minor_version()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static bool get_nvfs_allow_compat_mode()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static bool get_nvfs_poll_mode()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static std::size_t get_nvfs_poll_thresh_size()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
static void set_nvfs_poll_mode(bool enable)
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
static void set_nvfs_poll_thresh_size(std::size_t size_in_kb)
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static std::vector<CUfileDriverControlFlags> get_nvfs_statusflags()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static std::size_t get_max_device_cache_size()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
static void set_max_device_cache_size(std::size_t size_in_kb)
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static std::size_t get_per_buffer_cache_size()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] static std::size_t get_max_pinned_memory_size()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
static void set_max_pinned_memory_size(std::size_t size_in_kb)
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
[[nodiscard]] std::size_t get_max_batch_io_size()
{
throw CUfileException("KvikIO not compiled with cuFile.h");
}
};
#endif
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/parallel_operation.hpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <future>
#include <numeric>
#include <system_error>
#include <utility>
#include <vector>
#include <kvikio/defaults.hpp>
#include <kvikio/error.hpp>
#include <kvikio/utils.hpp>
namespace kvikio {
/**
* @brief Apply read or write operation in parallel.
*
* @tparam F The type of the function applying the read or write operation.
* @tparam T The type of the memory pointer.
* @param op The function applying the read or write operation.
* @param buf Buffer pointer to read or write to.
* @param size Number of bytes to read or write.
* @param file_offset Byte offset to the start of the file.
* @param task_size Size of each task in bytes.
* @return A future to be used later to check if the operation has finished its execution.
*/
template <typename F, typename T>
std::future<std::size_t> parallel_io(F op,
T buf,
std::size_t size,
std::size_t file_offset,
std::size_t task_size,
std::size_t devPtr_offset)
{
if (task_size == 0) { throw std::invalid_argument("`task_size` cannot be zero"); }
// Single-task guard
if (task_size >= size || page_size >= size) {
return defaults::thread_pool().submit(op, buf, size, file_offset, devPtr_offset);
}
// We know an upper bound of the total number of tasks
std::vector<std::future<std::size_t>> tasks;
tasks.reserve(size / task_size + 2);
// 1) Submit `task_size` sized tasks
while (size >= task_size) {
tasks.push_back(defaults::thread_pool().submit(op, buf, task_size, file_offset, devPtr_offset));
file_offset += task_size;
devPtr_offset += task_size;
size -= task_size;
}
// 2) Submit a task for the remainder
if (size > 0) {
tasks.push_back(defaults::thread_pool().submit(op, buf, size, file_offset, devPtr_offset));
}
// Finally, we sum the result of all tasks.
auto gather_tasks = [](std::vector<std::future<std::size_t>>&& tasks) -> std::size_t {
std::size_t ret = 0;
for (auto& task : tasks) {
ret += task.get();
}
return ret;
};
return std::async(std::launch::deferred, gather_tasks, std::move(tasks));
}
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/thread_pool.hpp
|
#pragma once
/**
* @file thread_pool.hpp
* @author Barak Shoshany ([email protected]) (http://baraksh.com)
* @version 2.0.0
* @date 2021-08-14
* @copyright Copyright (c) 2021 Barak Shoshany. Licensed under the MIT license. If you use this
* library in published research, please cite it as follows:
* - Barak Shoshany, "A C++17 Thread Pool for High-Performance Scientific Computing",
* doi:10.5281/zenodo.4742687, arXiv:2105.00613 (May 2021)
*
* @brief A C++17 thread pool for high-performance scientific computing.
* @details A modern C++17-compatible thread pool implementation, built from scratch with
* high-performance scientific computing in mind. The thread pool is implemented as a single
* lightweight and self-contained class, and does not have any dependencies other than the C++17
* standard library, thus allowing a great degree of portability. In particular, this implementation
* does not utilize OpenMP or any other high-level multithreading APIs, and thus gives the
* programmer precise low-level control over the details of the parallelization, which permits more
* robust optimizations. The thread pool was extensively tested on both AMD and Intel CPUs with up
* to 40 cores and 80 threads. Other features include automatic generation of futures and easy
* parallelization of loops. Two helper classes enable synchronizing printing to an output stream by
* different threads and measuring execution time for benchmarking purposes. Please visit the GitHub
* repository at https://github.com/bshoshany/thread-pool for documentation and updates, or to
* submit feature requests and bug reports.
*/
#define THREAD_POOL_VERSION "v2.0.0 (2021-08-14)"
#include <atomic> // std::atomic
#include <chrono> // std::chrono
#include <cstdint> // std::int_fast64_t, std::uint_fast32_t
#include <functional> // std::function
#include <future> // std::future, std::promise
#include <iostream> // std::cout, std::ostream
#include <memory> // std::shared_ptr, std::unique_ptr
#include <mutex> // std::mutex, std::scoped_lock
#include <queue> // std::queue
#include <thread> // std::this_thread, std::thread
#include <type_traits> // std::common_type_t, std::decay_t, std::enable_if_t, std::is_void_v, std::invoke_result_t
#include <utility> // std::move
// ============================================================================================= //
// Begin class thread_pool //
namespace kvikio::third_party {
/**
* @brief A C++17 thread pool class. The user submits tasks to be executed into a queue. Whenever a
* thread becomes available, it pops a task from the queue and executes it. Each task is
* automatically assigned a future, which can be used to wait for the task to finish executing
* and/or obtain its eventual return value.
*/
class thread_pool {
typedef std::uint_fast32_t ui32;
typedef std::uint_fast64_t ui64;
public:
// ============================
// Constructors and destructors
// ============================
/**
* @brief Construct a new thread pool.
*
* @param _thread_count The number of threads to use. The default value is the total number of
* hardware threads available, as reported by the implementation. With a hyperthreaded CPU, this
* will be twice the number of CPU cores. If the argument is zero, the default value will be used
* instead.
*/
thread_pool(const ui32& _thread_count = std::thread::hardware_concurrency())
: thread_count(_thread_count ? _thread_count : std::thread::hardware_concurrency()),
threads(new std::thread[_thread_count ? _thread_count : std::thread::hardware_concurrency()])
{
create_threads();
}
/**
* @brief Destruct the thread pool. Waits for all tasks to complete, then destroys all threads.
* Note that if the variable paused is set to true, then any tasks still in the queue will never
* be executed.
*/
~thread_pool()
{
wait_for_tasks();
running = false;
destroy_threads();
}
// =======================
// Public member functions
// =======================
/**
* @brief Get the number of tasks currently waiting in the queue to be executed by the threads.
*
* @return The number of queued tasks.
*/
ui64 get_tasks_queued() const
{
const std::scoped_lock lock(queue_mutex);
return tasks.size();
}
/**
* @brief Get the number of tasks currently being executed by the threads.
*
* @return The number of running tasks.
*/
ui32 get_tasks_running() const { return tasks_total - (ui32)get_tasks_queued(); }
/**
* @brief Get the total number of unfinished tasks - either still in the queue, or running in a
* thread.
*
* @return The total number of tasks.
*/
ui32 get_tasks_total() const { return tasks_total; }
/**
* @brief Get the number of threads in the pool.
*
* @return The number of threads.
*/
ui32 get_thread_count() const { return thread_count; }
/**
* @brief Parallelize a loop by splitting it into blocks, submitting each block separately to the
* thread pool, and waiting for all blocks to finish executing. The user supplies a loop function,
* which will be called once per block and should iterate over the block's range.
*
* @tparam T1 The type of the first index in the loop. Should be a signed or unsigned integer.
* @tparam T2 The type of the index after the last index in the loop. Should be a signed or
* unsigned integer. If T1 is not the same as T2, a common type will be automatically inferred.
* @tparam F The type of the function to loop through.
* @param first_index The first index in the loop.
* @param index_after_last The index after the last index in the loop. The loop will iterate from
* first_index to (index_after_last - 1) inclusive. In other words, it will be equivalent to "for
* (T i = first_index; i < index_after_last; i++)". Note that if first_index == index_after_last,
* the function will terminate without doing anything.
* @param loop The function to loop through. Will be called once per block. Should take exactly
* two arguments: the first index in the block and the index after the last index in the block.
* loop(start, end) should typically involve a loop of the form "for (T i = start; i < end; i++)".
* @param num_blocks The maximum number of blocks to split the loop into. The default is to use
* the number of threads in the pool.
*/
template <typename T1, typename T2, typename F>
void parallelize_loop(const T1& first_index,
const T2& index_after_last,
const F& loop,
ui32 num_blocks = 0)
{
typedef std::common_type_t<T1, T2> T;
T the_first_index = (T)first_index;
T last_index = (T)index_after_last;
if (the_first_index == last_index) return;
if (last_index < the_first_index) {
T temp = last_index;
last_index = the_first_index;
the_first_index = temp;
}
last_index--;
if (num_blocks == 0) num_blocks = thread_count;
ui64 total_size = (ui64)(last_index - the_first_index + 1);
ui64 block_size = (ui64)(total_size / num_blocks);
if (block_size == 0) {
block_size = 1;
num_blocks = (ui32)total_size > 1 ? (ui32)total_size : 1;
}
std::atomic<ui32> blocks_running = 0;
for (ui32 t = 0; t < num_blocks; t++) {
T start = ((T)(t * block_size) + the_first_index);
T end =
(t == num_blocks - 1) ? last_index + 1 : ((T)((t + 1) * block_size) + the_first_index);
blocks_running++;
push_task([start, end, &loop, &blocks_running] {
loop(start, end);
blocks_running--;
});
}
while (blocks_running != 0) {
sleep_or_yield();
}
}
/**
* @brief Push a function with no arguments or return value into the task queue.
*
* @tparam F The type of the function.
* @param task The function to push.
*/
template <typename F>
void push_task(const F& task)
{
tasks_total++;
{
const std::scoped_lock lock(queue_mutex);
tasks.push(std::function<void()>(task));
}
}
/**
* @brief Push a function with arguments, but no return value, into the task queue.
* @details The function is wrapped inside a lambda in order to hide the arguments, as the tasks
* in the queue must be of type std::function<void()>, so they cannot have any arguments or return
* value. If no arguments are provided, the other overload will be used, in order to avoid the
* (slight) overhead of using a lambda.
*
* @tparam F The type of the function.
* @tparam A The types of the arguments.
* @param task The function to push.
* @param args The arguments to pass to the function.
*/
template <typename F, typename... A>
void push_task(const F& task, const A&... args)
{
push_task([task, args...] { task(args...); });
}
/**
* @brief Reset the number of threads in the pool. Waits for all currently running tasks to be
* completed, then destroys all threads in the pool and creates a new thread pool with the new
* number of threads. Any tasks that were waiting in the queue before the pool was reset will then
* be executed by the new threads. If the pool was paused before resetting it, the new pool will
* be paused as well.
*
* @param _thread_count The number of threads to use. The default value is the total number of
* hardware threads available, as reported by the implementation. With a hyperthreaded CPU, this
* will be twice the number of CPU cores. If the argument is zero, the default value will be used
* instead.
*/
void reset(const ui32& _thread_count = std::thread::hardware_concurrency())
{
bool was_paused = paused;
paused = true;
wait_for_tasks();
running = false;
destroy_threads();
thread_count = _thread_count ? _thread_count : std::thread::hardware_concurrency();
threads.reset(new std::thread[thread_count]);
paused = was_paused;
running = true;
create_threads();
}
/**
* @brief Submit a function with zero or more arguments and no return value into the task queue,
* and get an std::future<bool> that will be set to true upon completion of the task.
*
* @tparam F The type of the function.
* @tparam A The types of the zero or more arguments to pass to the function.
* @param task The function to submit.
* @param args The zero or more arguments to pass to the function.
* @return A future to be used later to check if the function has finished its execution.
*/
template <typename F,
typename... A,
typename = std::enable_if_t<
std::is_void_v<std::invoke_result_t<std::decay_t<F>, std::decay_t<A>...>>>>
std::future<bool> submit(const F& task, const A&... args)
{
std::shared_ptr<std::promise<bool>> task_promise(new std::promise<bool>);
std::future<bool> future = task_promise->get_future();
push_task([task, args..., task_promise] {
try {
task(args...);
task_promise->set_value(true);
} catch (...) {
try {
task_promise->set_exception(std::current_exception());
} catch (...) {
}
}
});
return future;
}
/**
* @brief Submit a function with zero or more arguments and a return value into the task queue,
* and get a future for its eventual returned value.
*
* @tparam F The type of the function.
* @tparam A The types of the zero or more arguments to pass to the function.
* @tparam R The return type of the function.
* @param task The function to submit.
* @param args The zero or more arguments to pass to the function.
* @return A future to be used later to obtain the function's returned value, waiting for it to
* finish its execution if needed.
*/
template <typename F,
typename... A,
typename R = std::invoke_result_t<std::decay_t<F>, std::decay_t<A>...>,
typename = std::enable_if_t<!std::is_void_v<R>>>
std::future<R> submit(const F& task, const A&... args)
{
std::shared_ptr<std::promise<R>> task_promise(new std::promise<R>);
std::future<R> future = task_promise->get_future();
push_task([task, args..., task_promise] {
try {
task_promise->set_value(task(args...));
} catch (...) {
try {
task_promise->set_exception(std::current_exception());
} catch (...) {
}
}
});
return future;
}
/**
* @brief Wait for tasks to be completed. Normally, this function waits for all tasks, both those
* that are currently running in the threads and those that are still waiting in the queue.
* However, if the variable paused is set to true, this function only waits for the currently
* running tasks (otherwise it would wait forever). To wait for a specific task, use submit()
* instead, and call the wait() member function of the generated future.
*/
void wait_for_tasks()
{
while (true) {
if (!paused) {
if (tasks_total == 0) break;
} else {
if (get_tasks_running() == 0) break;
}
sleep_or_yield();
}
}
// ===========
// Public data
// ===========
/**
* @brief An atomic variable indicating to the workers to pause. When set to true, the workers
* temporarily stop popping new tasks out of the queue, although any tasks already executed will
* keep running until they are done. Set to false again to resume popping tasks.
*/
std::atomic<bool> paused = false;
/**
* @brief The duration, in microseconds, that the worker function should sleep for when it cannot
* find any tasks in the queue. If set to 0, then instead of sleeping, the worker function will
* execute std::this_thread::yield() if there are no tasks in the queue. The default value is
* 1000.
*/
ui32 sleep_duration = 1000;
private:
// ========================
// Private member functions
// ========================
/**
* @brief Create the threads in the pool and assign a worker to each thread.
*/
void create_threads()
{
for (ui32 i = 0; i < thread_count; i++) {
threads[i] = std::thread(&thread_pool::worker, this);
}
}
/**
* @brief Destroy the threads in the pool by joining them.
*/
void destroy_threads()
{
for (ui32 i = 0; i < thread_count; i++) {
threads[i].join();
}
}
/**
* @brief Try to pop a new task out of the queue.
*
* @param task A reference to the task. Will be populated with a function if the queue is not
* empty.
* @return true if a task was found, false if the queue is empty.
*/
bool pop_task(std::function<void()>& task)
{
const std::scoped_lock lock(queue_mutex);
if (tasks.empty())
return false;
else {
task = std::move(tasks.front());
tasks.pop();
return true;
}
}
/**
* @brief Sleep for sleep_duration microseconds. If that variable is set to zero, yield instead.
*
*/
void sleep_or_yield()
{
if (sleep_duration)
std::this_thread::sleep_for(std::chrono::microseconds(sleep_duration));
else
std::this_thread::yield();
}
/**
* @brief A worker function to be assigned to each thread in the pool. Continuously pops tasks out
* of the queue and executes them, as long as the atomic variable running is set to true.
*/
void worker()
{
while (running) {
std::function<void()> task;
if (!paused && pop_task(task)) {
task();
tasks_total--;
} else {
sleep_or_yield();
}
}
}
// ============
// Private data
// ============
/**
* @brief A mutex to synchronize access to the task queue by different threads.
*/
mutable std::mutex queue_mutex = {};
/**
* @brief An atomic variable indicating to the workers to keep running. When set to false, the
* workers permanently stop working.
*/
std::atomic<bool> running = true;
/**
* @brief A queue of tasks to be executed by the threads.
*/
std::queue<std::function<void()>> tasks = {};
/**
* @brief The number of threads in the pool.
*/
ui32 thread_count;
/**
* @brief A smart pointer to manage the memory allocated for the threads.
*/
std::unique_ptr<std::thread[]> threads;
/**
* @brief An atomic variable to keep track of the total number of unfinished tasks - either still
* in the queue, or running in a thread.
*/
std::atomic<ui32> tasks_total = 0;
};
} // namespace kvikio::third_party
// End class thread_pool //
// ============================================================================================= //
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/error.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <exception>
#include <system_error>
#include <kvikio/shim/cuda.hpp>
#include <kvikio/shim/cufile_h_wrapper.hpp>
namespace kvikio {
struct CUfileException : public std::runtime_error {
using std::runtime_error::runtime_error;
};
#ifndef CUDA_DRIVER_TRY
#define CUDA_DRIVER_TRY(...) \
GET_CUDA_DRIVER_TRY_MACRO(__VA_ARGS__, CUDA_DRIVER_TRY_2, CUDA_DRIVER_TRY_1) \
(__VA_ARGS__)
#define GET_CUDA_DRIVER_TRY_MACRO(_1, _2, NAME, ...) NAME
#define CUDA_DRIVER_TRY_2(_call, _exception_type) \
do { \
CUresult const error = (_call); \
if (error == CUDA_ERROR_STUB_LIBRARY) { \
throw(_exception_type){std::string{"CUDA error at: "} + __FILE__ + ":" + \
KVIKIO_STRINGIFY(__LINE__) + \
": CUDA_ERROR_STUB_LIBRARY(" \
"The CUDA driver loaded is a stub library)"}; \
} \
if (error != CUDA_SUCCESS) { \
const char* err_name = nullptr; \
const char* err_str = nullptr; \
CUresult err_name_status = cudaAPI::instance().GetErrorName(error, &err_name); \
CUresult err_str_status = cudaAPI::instance().GetErrorString(error, &err_str); \
if (err_name_status == CUDA_ERROR_INVALID_VALUE) { err_name = "unknown"; } \
if (err_str_status == CUDA_ERROR_INVALID_VALUE) { err_str = "unknown"; } \
throw(_exception_type){std::string{"CUDA error at: "} + __FILE__ + ":" + \
KVIKIO_STRINGIFY(__LINE__) + ": " + std::string(err_name) + "(" + \
std::string(err_str) + ")"}; \
} \
} while (0)
#define CUDA_DRIVER_TRY_1(_call) CUDA_DRIVER_TRY_2(_call, kvikio::CUfileException)
#endif
#ifdef KVIKIO_CUFILE_FOUND
#ifndef CUFILE_TRY
#define CUFILE_TRY(...) \
GET_CUFILE_TRY_MACRO(__VA_ARGS__, CUFILE_TRY_2, CUFILE_TRY_1) \
(__VA_ARGS__)
#define GET_CUFILE_TRY_MACRO(_1, _2, NAME, ...) NAME
#define CUFILE_TRY_2(_call, _exception_type) \
do { \
CUfileError_t const error = (_call); \
if (error.err != CU_FILE_SUCCESS) { \
if (error.err == CU_FILE_CUDA_DRIVER_ERROR) { \
CUresult const cuda_error = error.cu_err; \
CUDA_DRIVER_TRY(cuda_error); \
} \
throw(_exception_type){std::string{"cuFile error at: "} + __FILE__ + ":" + \
KVIKIO_STRINGIFY(__LINE__) + ": " + \
cufileop_status_error(error.err)}; \
} \
} while (0)
#define CUFILE_TRY_1(_call) CUFILE_TRY_2(_call, kvikio::CUfileException)
#endif
#endif
#ifndef CUFILE_CHECK_STREAM_IO
#define CUFILE_CHECK_STREAM_IO(...) \
GET_CUFILE_CHECK_STREAM_IO_MACRO( \
__VA_ARGS__, CUFILE_CHECK_STREAM_IO_2, CUFILE_CHECK_STREAM_IO_1) \
(__VA_ARGS__)
#define GET_CUFILE_CHECK_STREAM_IO_MACRO(_1, _2, NAME, ...) NAME
#ifdef KVIKIO_CUFILE_FOUND
#define CUFILE_CHECK_STREAM_IO_2(_nbytes_done, _exception_type) \
do { \
auto const _nbytes = *(_nbytes_done); \
if (_nbytes < 0) { \
throw(_exception_type){std::string{"cuFile error at: "} + __FILE__ + ":" + \
KVIKIO_STRINGIFY(__LINE__) + ": " + std::to_string(_nbytes)}; \
} \
} while (0)
#else
// if cufile isn't available, we don't do anything in the body
#define CUFILE_CHECK_STREAM_IO_2(_nbytes_done, _exception_type) \
do { \
} while (0)
#endif
#define CUFILE_CHECK_STREAM_IO_1(_call) CUFILE_CHECK_STREAM_IO_2(_call, kvikio::CUfileException)
#endif
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/defaults.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <sstream>
#include <stdexcept>
#include <string>
#include <utility>
#include <kvikio/shim/cufile.hpp>
#include <kvikio/thread_pool.hpp>
namespace kvikio {
namespace detail {
template <typename T>
T getenv_or(std::string_view env_var_name, T default_val)
{
const auto* env_val = std::getenv(env_var_name.data());
if (env_val == nullptr) { return default_val; }
std::stringstream sstream(env_val);
T converted_val;
sstream >> converted_val;
if (sstream.fail()) {
throw std::invalid_argument("unknown config value " + std::string{env_var_name} + "=" +
std::string{env_val});
}
return converted_val;
}
template <>
inline bool getenv_or(std::string_view env_var_name, bool default_val)
{
const auto* env_val = std::getenv(env_var_name.data());
if (env_val == nullptr) { return default_val; }
try {
// Try parsing `env_var_name` as a integer
return static_cast<bool>(std::stoi(env_val));
} catch (const std::invalid_argument&) {
}
// Convert to lowercase
std::string str{env_val};
std::transform(str.begin(), str.end(), str.begin(), ::tolower);
// Trim whitespaces
std::stringstream trimmer;
trimmer << str;
str.clear();
trimmer >> str;
// Match value
if (str == "true" || str == "on" || str == "yes") { return true; }
if (str == "false" || str == "off" || str == "no") { return false; }
throw std::invalid_argument("unknown config value " + std::string{env_var_name} + "=" +
std::string{env_val});
}
} // namespace detail
/**
* @brief Singleton class of default values used thoughtout KvikIO.
*
*/
class defaults {
private:
kvikio::third_party::thread_pool _thread_pool{get_num_threads_from_env()};
bool _compat_mode;
std::size_t _task_size;
std::size_t _gds_threshold;
static unsigned int get_num_threads_from_env()
{
const int ret = detail::getenv_or("KVIKIO_NTHREADS", 1);
if (ret <= 0) { throw std::invalid_argument("KVIKIO_NTHREADS has to be a positive integer"); }
return ret;
}
defaults()
{
// Determine the default value of `compat_mode`
{
if (std::getenv("KVIKIO_COMPAT_MODE") != nullptr) {
// Setting `KVIKIO_COMPAT_MODE` take precedence
_compat_mode = detail::getenv_or("KVIKIO_COMPAT_MODE", false);
} else {
// If `KVIKIO_COMPAT_MODE` isn't set, we infer based on runtime environment
_compat_mode = !is_cufile_available();
}
}
// Determine the default value of `task_size`
{
const ssize_t env = detail::getenv_or("KVIKIO_TASK_SIZE", 4 * 1024 * 1024);
if (env <= 0) {
throw std::invalid_argument("KVIKIO_TASK_SIZE has to be a positive integer");
}
_task_size = env;
}
// Determine the default value of `gds_threshold`
{
const ssize_t env = detail::getenv_or("KVIKIO_GDS_THRESHOLD", 1024 * 1024);
if (env <= 0) {
throw std::invalid_argument("KVIKIO_GDS_THRESHOLD has to be a positive integer");
}
_gds_threshold = env;
}
}
static defaults* instance()
{
static defaults _instance;
return &_instance;
}
public:
/**
* @brief Return whether the KvikIO library is running in compatibility mode or not
*
* Notice, this is not the same as the compatibility mode in cuFile. That is,
* cuFile can run in compatibility mode while KvikIO is not.
*
* When KvikIO is running in compatibility mode, it doesn't load `libcufile.so`. Instead,
* reads and writes are done using POSIX.
*
* Set the environment variable `KVIKIO_COMPAT_MODE` to enable/disable compatibility mode.
* By default, compatibility mode is enabled:
* - when `libcufile` cannot be found
* - when running in Windows Subsystem for Linux (WSL)
* - when `/run/udev` isn't readable, which typically happens when running inside a docker
* image not launched with `--volume /run/udev:/run/udev:ro`
*
* @return The boolean answer
*/
[[nodiscard]] static bool compat_mode() { return instance()->_compat_mode; }
/**
* @brief Reset the value of `kvikio::defaults::compat_mode()`
*
* Changing compatibility mode, effects all new FileHandles that doesn't sets the
* `compat_mode` argument explicitly but it never effect existing FileHandles.
*
* @param enable Whether to enable compatibility mode or not.
*/
static void compat_mode_reset(bool enable) { instance()->_compat_mode = enable; }
/**
* @brief Get the default thread pool.
*
* Notice, it is not possible to change the default thread pool. KvikIO will
* always use the same thread pool however it is possible to change number of
* threads in the pool (see `kvikio::default::thread_pool_nthreads_reset()`).
*
* @return The the default thread pool instance.
*/
[[nodiscard]] static kvikio::third_party::thread_pool& thread_pool()
{
return instance()->_thread_pool;
}
/**
* @brief Get the number of threads in the default thread pool.
*
* Set the default value using `kvikio::default::thread_pool_nthreads_reset()` or by
* setting the `KVIKIO_NTHREADS` environment variable. If not set, the default value is 1.
*
* @return The number of threads.
*/
[[nodiscard]] static unsigned int thread_pool_nthreads()
{
return thread_pool().get_thread_count();
}
/**
* @brief Reset the number of threads in the default thread pool. Waits for all currently running
* tasks to be completed, then destroys all threads in the pool and creates a new thread pool with
* the new number of threads. Any tasks that were waiting in the queue before the pool was reset
* will then be executed by the new threads. If the pool was paused before resetting it, the new
* pool will be paused as well.
*
* @param nthreads The number of threads to use.
*/
static void thread_pool_nthreads_reset(unsigned int nthreads) { thread_pool().reset(nthreads); }
/**
* @brief Get the default task size used for parallel IO operations.
*
* Set the default value using `kvikio::default::task_size_reset()` or by setting
* the `KVIKIO_TASK_SIZE` environment variable. If not set, the default value is 4 MiB.
*
* @return The default task size in bytes.
*/
[[nodiscard]] static std::size_t task_size() { return instance()->_task_size; }
/**
* @brief Reset the default task size used for parallel IO operations.
*
* @param nbytes The default task size in bytes.
*/
static void task_size_reset(std::size_t nbytes) { instance()->_task_size = nbytes; }
/**
* @brief Get the default GDS threshold, which is the minimum size to use GDS (in bytes).
*
* In order to improve performance of small IO, `.pread()` and `.pwrite()` implement a shortcut
* that circumvent the threadpool and use the POSIX backend directly.
*
* Set the default value using `kvikio::default::gds_threshold_reset()` or by setting the
* `KVIKIO_GDS_THRESHOLD` environment variable. If not set, the default value is 1 MiB.
*
* @return The default GDS threshold size in bytes.
*/
[[nodiscard]] static std::size_t gds_threshold() { return instance()->_gds_threshold; }
/**
* @brief Reset the default GDS threshold, which is the minimum size to use GDS (in bytes).
* @param nbytes The default GDS threshold size in bytes.
*/
static void gds_threshold_reset(std::size_t nbytes) { instance()->_gds_threshold = nbytes; }
};
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/utils.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <chrono>
#include <cstring>
#include <future>
#include <iostream>
#include <map>
#include <optional>
#include <tuple>
#include <kvikio/error.hpp>
#include <kvikio/shim/cuda.hpp>
namespace kvikio {
// cuFile defines a page size to 4 KiB
inline constexpr std::size_t page_size = 4096;
[[nodiscard]] inline off_t convert_size2off(std::size_t x)
{
if (x >= static_cast<std::size_t>(std::numeric_limits<off_t>::max())) {
throw CUfileException("size_t argument too large to fit off_t");
}
return static_cast<off_t>(x);
}
[[nodiscard]] inline ssize_t convert_size2ssize(std::size_t x)
{
if (x >= static_cast<std::size_t>(std::numeric_limits<ssize_t>::max())) {
throw CUfileException("size_t argument too large to fit ssize_t");
}
return static_cast<ssize_t>(x);
}
[[nodiscard]] inline CUdeviceptr convert_void2deviceptr(const void* devPtr)
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
return reinterpret_cast<CUdeviceptr>(devPtr);
}
/**
* @brief Check if `ptr` points to host memory (as opposed to device memory)
*
* In this context, managed memory counts as device memory
*
* @param ptr Memory pointer to query
* @return The boolean answer
*/
inline bool is_host_memory(const void* ptr)
{
CUpointer_attribute attrs[1] = {
CU_POINTER_ATTRIBUTE_MEMORY_TYPE,
};
CUmemorytype memtype{};
void* data[1] = {&memtype};
CUresult result =
cudaAPI::instance().PointerGetAttributes(1, attrs, data, convert_void2deviceptr(ptr));
// We assume that `ptr` is host memory when CUDA_ERROR_NOT_INITIALIZED
if (result == CUDA_ERROR_NOT_INITIALIZED) { return true; }
CUDA_DRIVER_TRY(result);
// Notice, queying `CU_POINTER_ATTRIBUTE_MEMORY_TYPE` returns zero when the memory
// is unregistered host memory. This is undocumented but how the Runtime CUDA API
// does it to support `cudaMemoryTypeUnregistered`.
return memtype == 0 || memtype == CU_MEMORYTYPE_HOST;
}
/**
* @brief Return the device owning the pointer
*
* @param ptr Device pointer to query
* @return The device ordinal
*/
[[nodiscard]] inline int get_device_ordinal_from_pointer(CUdeviceptr dev_ptr)
{
int ret = 0;
CUDA_DRIVER_TRY(
cudaAPI::instance().PointerGetAttribute(&ret, CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL, dev_ptr));
return ret;
}
/**
* @brief RAII wrapper for a CUDA primary context
*/
class CudaPrimaryContext {
public:
CUdevice dev{};
CUcontext ctx{};
CudaPrimaryContext(int device_ordinal)
{
CUDA_DRIVER_TRY(cudaAPI::instance().DeviceGet(&dev, device_ordinal));
CUDA_DRIVER_TRY(cudaAPI::instance().DevicePrimaryCtxRetain(&ctx, dev));
}
CudaPrimaryContext(const CudaPrimaryContext&) = delete;
CudaPrimaryContext& operator=(CudaPrimaryContext const&) = delete;
CudaPrimaryContext(CudaPrimaryContext&&) = delete;
CudaPrimaryContext&& operator=(CudaPrimaryContext&&) = delete;
~CudaPrimaryContext()
{
try {
CUDA_DRIVER_TRY(cudaAPI::instance().DevicePrimaryCtxRelease(dev), CUfileException);
} catch (const CUfileException& e) {
std::cerr << e.what() << std::endl;
}
}
};
/**
* @brief Given a device ordinal, return the primary context of the device.
*
* This function caches the primary contexts retrieved until program exit
*
* @param ordinal Device ordinal - an integer between 0 and the number of CUDA devices
* @return Primary CUDA context
*/
[[nodiscard]] inline CUcontext get_primary_cuda_context(int ordinal)
{
static std::map<int, CudaPrimaryContext> _primary_contexts;
_primary_contexts.try_emplace(ordinal, ordinal);
return _primary_contexts.at(ordinal).ctx;
}
/**
* @brief Return the CUDA context associated the given device pointer, if any.
*
* @param dev_ptr Device pointer to query
* @return Usable CUDA context, if one were found.
*/
[[nodiscard]] inline std::optional<CUcontext> get_context_associated_pointer(CUdeviceptr dev_ptr)
{
CUcontext ctx = nullptr;
const CUresult err =
cudaAPI::instance().PointerGetAttribute(&ctx, CU_POINTER_ATTRIBUTE_CONTEXT, dev_ptr);
if (err == CUDA_SUCCESS && ctx != nullptr) { return ctx; }
if (err != CUDA_ERROR_INVALID_VALUE) { CUDA_DRIVER_TRY(err); }
return {};
}
/**
* @brief Check if the current CUDA context can access the given device pointer
*
* @param dev_ptr Device pointer to query
* @return The boolean answer
*/
[[nodiscard]] inline bool current_context_can_access_pointer(CUdeviceptr dev_ptr)
{
CUdeviceptr current_ctx_dev_ptr{};
const CUresult err = cudaAPI::instance().PointerGetAttribute(
¤t_ctx_dev_ptr, CU_POINTER_ATTRIBUTE_DEVICE_POINTER, dev_ptr);
if (err == CUDA_SUCCESS && current_ctx_dev_ptr == dev_ptr) { return true; }
if (err != CUDA_ERROR_INVALID_VALUE) { CUDA_DRIVER_TRY(err); }
return false;
}
/**
* @brief Return a CUDA context that can be used with the given device pointer
*
* For robustness, we look for an usabale context in the following order:
* 1) If a context has been associated with `devPtr`, it is returned.
* 2) If the current context exists and can access `devPtr`, it is returned.
* 3) Return the primary context of the device that owns `devPtr`. We assume the
* primary context can access `devPtr`, which might not be true in the exceptional
* disjoint addressing cases mention in the CUDA docs[1]. In these cases, the user
* has to set an usable current context before reading/writing using KvikIO.
*
* [1] <https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__UNIFIED.html>
*
* @param devPtr Device pointer to query
* @return Usable CUDA context
*/
[[nodiscard]] inline CUcontext get_context_from_pointer(const void* devPtr)
{
CUdeviceptr dev_ptr = convert_void2deviceptr(devPtr);
// First we check if a context has been associated with `devPtr`.
{
auto ctx = get_context_associated_pointer(dev_ptr);
if (ctx.has_value()) { return ctx.value(); }
}
// If this isn't the case, we check the current context. If it exist and can access `devPtr`, we
// return the current context.
{
CUcontext ctx = nullptr;
CUDA_DRIVER_TRY(cudaAPI::instance().CtxGetCurrent(&ctx));
if (ctx != nullptr && current_context_can_access_pointer(dev_ptr)) { return ctx; }
}
// Finally, if we didn't find any usable context, we return the primary context of the
// device that owns `devPtr`. If the primary context cannot access `devPtr`, we accept failure.
return get_primary_cuda_context(get_device_ordinal_from_pointer(dev_ptr));
}
/**
* @brief Push CUDA context on creation and pop it on destruction
*/
class PushAndPopContext {
private:
CUcontext _ctx;
public:
PushAndPopContext(CUcontext ctx) : _ctx{ctx}
{
CUDA_DRIVER_TRY(cudaAPI::instance().CtxPushCurrent(_ctx));
}
PushAndPopContext(const PushAndPopContext&) = delete;
PushAndPopContext& operator=(PushAndPopContext const&) = delete;
PushAndPopContext(PushAndPopContext&&) = delete;
PushAndPopContext&& operator=(PushAndPopContext&&) = delete;
~PushAndPopContext()
{
try {
CUDA_DRIVER_TRY(cudaAPI::instance().CtxPopCurrent(&_ctx), CUfileException);
} catch (const CUfileException& e) {
std::cerr << e.what() << std::endl;
}
}
};
// Find the base and offset of the memory allocation `devPtr` is in
inline std::tuple<void*, std::size_t, std::size_t> get_alloc_info(const void* devPtr,
CUcontext* ctx = nullptr)
{
auto dev = convert_void2deviceptr(devPtr);
CUdeviceptr base_ptr{};
std::size_t base_size{};
CUcontext _ctx{};
if (ctx != nullptr) {
_ctx = *ctx;
} else {
_ctx = get_context_from_pointer(devPtr);
}
PushAndPopContext context(_ctx);
CUDA_DRIVER_TRY(cudaAPI::instance().MemGetAddressRange(&base_ptr, &base_size, dev));
std::size_t offset = dev - base_ptr;
// NOLINTNEXTLINE(performance-no-int-to-ptr, cppcoreguidelines-pro-type-reinterpret-cast)
return std::make_tuple(reinterpret_cast<void*>(base_ptr), base_size, offset);
}
template <typename T>
inline bool is_future_done(const T& future)
{
return future.wait_for(std::chrono::seconds(0)) != std::future_status::timeout;
}
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/batch.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <ctime>
#include <utility>
#include <vector>
#include <kvikio/error.hpp>
#include <kvikio/file_handle.hpp>
#include <kvikio/shim/cufile.hpp>
namespace kvikio {
/**
* @brief IO operation used when submitting batches
*/
struct BatchOp {
// The file handle of the file to read or write
FileHandle& file_handle;
// Base address of buffer in device memory (host memory not supported).
void* devPtr_base;
// Offset in the file to read from or write to.
off_t file_offset;
// Offset relative to the `devPtr_base` pointer to write into or read from.
off_t devPtr_offset;
// Size in bytes to read or write.
size_t size;
// The operation type: CUFILE_READ or CUFILE_WRITE.
CUfileOpcode_t opcode;
};
#ifdef KVIKIO_CUFILE_BATCH_API_FOUND
/**
* @brief Handle of an cuFile batch using semantic.
*
* The workflow is as follows:
* 1) Create a batch with a large enough `max_num_events`.
* 2) Call `.submit()` with a vector of operations (`vector.size() <= max_num_events`).
* 3) Call `.status()` to wait on the operations to finish, or
* 3) Call `.cancel()` to cancel the operations.
* 4) Go to step 2 or call `.close()` to free up resources.
*
* Notice, a batch handle can only handle one "submit" at a time and is closed
* in the destructor automatically.
*/
class BatchHandle {
private:
bool _initialized{false};
int _max_num_events{};
CUfileBatchHandle_t _handle{};
public:
BatchHandle() noexcept = default;
/**
* @brief Construct a batch handle
*
* @param max_num_events The maximum number of operations supported by this instance.
*/
BatchHandle(int max_num_events) : _initialized{true}, _max_num_events{max_num_events}
{
CUFILE_TRY(cuFileAPI::instance().BatchIOSetUp(&_handle, max_num_events));
}
/**
* @brief BatchHandle support move semantic but isn't copyable
*/
BatchHandle(const BatchHandle&) = delete;
BatchHandle& operator=(BatchHandle const&) = delete;
BatchHandle(BatchHandle&& o) noexcept
: _initialized{std::exchange(o._initialized, false)},
_max_num_events{std::exchange(o._max_num_events, 0)}
{
_handle = std::exchange(o._handle, CUfileBatchHandle_t{});
}
~BatchHandle() noexcept { close(); }
[[nodiscard]] bool closed() const noexcept { return !_initialized; }
/**
* @brief Destroy the batch handle and free up resources
*/
void close() noexcept
{
if (closed()) { return; }
_initialized = false;
cuFileAPI::instance().BatchIODestroy(_handle);
}
/**
* @brief Submit a vector of batch operations
*
* @param operations The vector of batch operations, which must not exceed the
* `max_num_events`.
*/
void submit(const std::vector<BatchOp>& operations)
{
if (convert_size2ssize(operations.size()) > _max_num_events) {
throw CUfileException("Cannot submit more than the max_num_events)");
}
std::vector<CUfileIOParams_t> io_batch_params;
io_batch_params.reserve(operations.size());
for (const auto& op : operations) {
if (op.file_handle.is_compat_mode_on()) {
throw CUfileException("Cannot submit a FileHandle opened in compatibility mode");
}
io_batch_params.push_back(CUfileIOParams_t{.mode = CUFILE_BATCH,
.u = {.batch = {.devPtr_base = op.devPtr_base,
.file_offset = op.file_offset,
.devPtr_offset = op.devPtr_offset,
.size = op.size}},
.fh = op.file_handle.handle(),
.opcode = op.opcode,
.cookie = nullptr});
}
CUFILE_TRY(cuFileAPI::instance().BatchIOSubmit(
_handle, io_batch_params.size(), io_batch_params.data(), 0));
}
/**
* @brief Get status of submitted operations
*
* @param min_nr The minimum number of IO entries for which status is requested.
* @param max_nr The maximum number of IO requests to poll for.
* @param timeout This parameter is used to specify the amount of time to wait for
* in this API, even if the minimum number of requests have not completed. If the
* timeout hits, it is possible that the number of returned IOs can be less than `min_nr`
* @return Vector of the status of the completed I/Os in the batch.
*/
std::vector<CUfileIOEvents_t> status(unsigned min_nr,
unsigned max_nr,
struct timespec* timeout = nullptr)
{
std::vector<CUfileIOEvents_t> ret;
ret.resize(_max_num_events);
CUFILE_TRY(cuFileAPI::instance().BatchIOGetStatus(_handle, min_nr, &max_nr, &ret[0], timeout));
ret.resize(max_nr);
return ret;
}
void cancel() { CUFILE_TRY(cuFileAPI::instance().BatchIOCancel(_handle)); }
};
#else
class BatchHandle {
public:
BatchHandle() noexcept = default;
BatchHandle(int max_num_events)
{
throw CUfileException("BatchHandle requires cuFile's batch API, please build with CUDA v12.1+");
}
[[nodiscard]] bool closed() const noexcept { return true; }
void close() noexcept {}
void submit(const std::vector<BatchOp>& operations) {}
std::vector<CUfileIOEvents_t> status(unsigned min_nr,
unsigned max_nr,
struct timespec* timeout = nullptr)
{
return std::vector<CUfileIOEvents_t>{};
}
void cancel() {}
};
#endif
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/buffer.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <iostream>
#include <map>
#include <vector>
#include <kvikio/defaults.hpp>
#include <kvikio/error.hpp>
#include <kvikio/shim/cufile.hpp>
#include <kvikio/shim/cufile_h_wrapper.hpp>
#include <kvikio/utils.hpp>
namespace kvikio {
/**
* @brief register an existing cudaMalloced memory with cuFile to pin for
* GPUDirect Storage access.
*
* @param devPtr_base device pointer to allocated
* @param length size of memory region from the above specified devPtr
* @param flags should be zero or `CU_FILE_RDMA_REGISTER` (experimental)
* @param errors_to_ignore CuFile errors to ignore such as `CU_FILE_MEMORY_ALREADY_REGISTERED`
* or `CU_FILE_INVALID_MAPPING_SIZE`
*
* @note This memory will be use to perform GPU direct DMA from the supported
* storage.
* @warning This API is intended for usecases where the memory is used as
* streaming buffer that is reused across multiple cuFile IO operations.
*/
/*NOLINTNEXTLINE(readability-function-cognitive-complexity)*/
inline void buffer_register(const void* devPtr_base,
std::size_t size,
int flags = 0,
const std::vector<int>& errors_to_ignore = std::vector<int>())
{
if (defaults::compat_mode()) { return; }
#ifdef KVIKIO_CUFILE_FOUND
CUfileError_t status = cuFileAPI::instance().BufRegister(devPtr_base, size, flags);
if (status.err != CU_FILE_SUCCESS) {
// Check if `status.err` is in `errors_to_ignore`
if (std::find(errors_to_ignore.begin(), errors_to_ignore.end(), status.err) ==
errors_to_ignore.end()) {
CUFILE_TRY(status);
}
}
#endif
}
/**
* @brief deregister an already registered device memory from cuFile
*
* @param devPtr_base device pointer to deregister
*/
inline void buffer_deregister(const void* devPtr_base)
{
if (defaults::compat_mode()) { return; }
#ifdef KVIKIO_CUFILE_FOUND
CUFILE_TRY(cuFileAPI::instance().BufDeregister(devPtr_base));
#endif
}
/**
* @brief Register device memory allocation which is part of devPtr. Use this
* together with FileHandle::pread() and FileHandle::pwrite().
*
* @param devPtr Device pointer
* @param flags Should be zero or `CU_FILE_RDMA_REGISTER` (experimental)
* @param errors_to_ignore CuFile errors to ignore such as `CU_FILE_MEMORY_ALREADY_REGISTERED`
* or `CU_FILE_INVALID_MAPPING_SIZE`
*
* @note This memory will be use to perform GPU direct DMA from the supported
* storage.
* @warning This API is intended for usecases where the memory is used as
* streaming buffer that is reused across multiple cuFile IO operations.
*/
inline void memory_register(const void* devPtr,
int flags = 0,
const std::vector<int>& errors_to_ignore = {})
{
auto [base, nbytes, offset] = get_alloc_info(devPtr);
buffer_register(base, nbytes, flags, errors_to_ignore);
}
/**
* @brief deregister an already registered device memory from cuFile.
*
* @param devPtr device pointer to deregister
*/
inline void memory_deregister(const void* devPtr)
{
auto [base, nbytes, offset] = get_alloc_info(devPtr);
buffer_deregister(base);
}
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/file_handle.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <cstddef>
#include <cstdlib>
#include <iostream>
#include <numeric>
#include <optional>
#include <system_error>
#include <utility>
#include <kvikio/buffer.hpp>
#include <kvikio/defaults.hpp>
#include <kvikio/error.hpp>
#include <kvikio/parallel_operation.hpp>
#include <kvikio/posix_io.hpp>
#include <kvikio/shim/cufile.hpp>
#include <kvikio/stream.hpp>
#include <kvikio/utils.hpp>
namespace kvikio {
namespace detail {
/**
* @brief Parse open file flags given as a string and return oflags
*
* @param flags The flags
* @param o_direct Append O_DIRECT to the open flags
* @return oflags
*/
inline int open_fd_parse_flags(const std::string& flags, bool o_direct)
{
int file_flags = -1;
if (flags.empty()) { throw std::invalid_argument("Unknown file open flag"); }
switch (flags[0]) {
case 'r':
file_flags = O_RDONLY;
if (flags[1] == '+') { file_flags = O_RDWR; }
break;
case 'w':
file_flags = O_WRONLY;
if (flags[1] == '+') { file_flags = O_RDWR; }
file_flags |= O_CREAT | O_TRUNC;
break;
case 'a': throw std::invalid_argument("Open flag 'a' isn't supported");
default: throw std::invalid_argument("Unknown file open flag");
}
file_flags |= O_CLOEXEC;
if (o_direct) { file_flags |= O_DIRECT; }
return file_flags;
}
/**
* @brief Open file using `open(2)`
*
* @param flags Open flags given as a string
* @param o_direct Append O_DIRECT to `flags`
* @param mode Access modes
* @return File descriptor
*/
inline int open_fd(const std::string& file_path,
const std::string& flags,
bool o_direct,
mode_t mode)
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
int fd = ::open(file_path.c_str(), open_fd_parse_flags(flags, o_direct), mode);
if (fd == -1) { throw std::system_error(errno, std::generic_category(), "Unable to open file"); }
return fd;
}
/**
* @brief Get the flags of the file descriptor (see `open(2)`)
*
* @return Open flags
*/
[[nodiscard]] inline int open_flags(int fd)
{
int ret = fcntl(fd, F_GETFL); // NOLINT(cppcoreguidelines-pro-type-vararg)
if (ret == -1) {
throw std::system_error(errno, std::generic_category(), "Unable to retrieve open flags");
}
return ret;
}
/**
* @brief Get file size from file descriptor `fstat(3)`
*
* @param file_descriptor Open file descriptor
* @return The number of bytes
*/
[[nodiscard]] inline std::size_t get_file_size(int file_descriptor)
{
struct stat st {};
int ret = fstat(file_descriptor, &st);
if (ret == -1) {
throw std::system_error(errno, std::generic_category(), "Unable to query file size");
}
return static_cast<std::size_t>(st.st_size);
}
} // namespace detail
/**
* @brief Handle of an open file registered with cufile.
*
* In order to utilize cufile and GDS, a file must be registered with cufile.
*/
class FileHandle {
private:
// We use two file descriptors, one opened with the O_DIRECT flag and one without.
int _fd_direct_on{-1};
int _fd_direct_off{-1};
bool _initialized{false};
bool _compat_mode{false};
mutable std::size_t _nbytes{0}; // The size of the underlying file, zero means unknown.
CUfileHandle_t _handle{};
public:
static constexpr mode_t m644 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH;
FileHandle() noexcept = default;
/**
* @brief Construct a file handle from a file path
*
* FileHandle opens the file twice and maintains two file descriptors.
* One file is opened with the specified `flags` and the other file is
* opened with the `flags` plus the `O_DIRECT` flag.
*
* @param file_path File path to the file
* @param flags Open flags (see also `fopen(3)`):
* "r" -> "open for reading (default)"
* "w" -> "open for writing, truncating the file first"
* "a" -> "open for writing, appending to the end of file if it exists"
* "+" -> "open for updating (reading and writing)"
* @param mode Access modes (see `open(2)`).
* @param compat_mode Enable KvikIO's compatibility mode for this file.
*/
FileHandle(const std::string& file_path,
const std::string& flags = "r",
mode_t mode = m644,
bool compat_mode = defaults::compat_mode())
: _fd_direct_off{detail::open_fd(file_path, flags, false, mode)},
_initialized{true},
_compat_mode{compat_mode}
{
try {
_fd_direct_on = detail::open_fd(file_path, flags, true, mode);
} catch (const std::system_error&) {
_compat_mode = true; // Fall back to compat mode if we cannot open the file with O_DIRECT
}
if (_compat_mode) { return; }
#ifdef KVIKIO_CUFILE_FOUND
CUfileDescr_t desc{}; // It is important to set to zero!
desc.type = CU_FILE_HANDLE_TYPE_OPAQUE_FD;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access)
desc.handle.fd = _fd_direct_on;
CUFILE_TRY(cuFileAPI::instance().HandleRegister(&_handle, &desc));
#endif
}
/**
* @brief FileHandle support move semantic but isn't copyable
*/
FileHandle(const FileHandle&) = delete;
FileHandle& operator=(FileHandle const&) = delete;
FileHandle(FileHandle&& o) noexcept
: _fd_direct_on{std::exchange(o._fd_direct_on, -1)},
_fd_direct_off{std::exchange(o._fd_direct_off, -1)},
_initialized{std::exchange(o._initialized, false)},
_compat_mode{std::exchange(o._compat_mode, false)},
_nbytes{std::exchange(o._nbytes, 0)},
_handle{std::exchange(o._handle, CUfileHandle_t{})}
{
}
FileHandle& operator=(FileHandle&& o) noexcept
{
_fd_direct_on = std::exchange(o._fd_direct_on, -1);
_fd_direct_off = std::exchange(o._fd_direct_off, -1);
_initialized = std::exchange(o._initialized, false);
_compat_mode = std::exchange(o._compat_mode, false);
_nbytes = std::exchange(o._nbytes, 0);
_handle = std::exchange(o._handle, CUfileHandle_t{});
return *this;
}
~FileHandle() noexcept { close(); }
[[nodiscard]] bool closed() const noexcept { return !_initialized; }
/**
* @brief Deregister the file and close the two files
*/
void close() noexcept
{
if (closed()) { return; }
if (!_compat_mode) {
#ifdef KVIKIO_CUFILE_FOUND
cuFileAPI::instance().HandleDeregister(_handle);
#endif
}
::close(_fd_direct_off);
if (_fd_direct_on != -1) { ::close(_fd_direct_on); }
_fd_direct_on = -1;
_fd_direct_off = -1;
_initialized = false;
}
/**
* @brief Get the underlying cuFile file handle
*
* The file handle must be open and not in compatibility mode i.e.
* both `.closed()` and `.is_compat_mode_on()` must be return false.
*
* @return cuFile's file handle
*/
[[nodiscard]] CUfileHandle_t handle()
{
if (closed()) { throw CUfileException("File handle is closed"); }
if (_compat_mode) {
throw CUfileException("The underlying cuFile handle isn't available in compatibility mode");
}
return _handle;
}
/**
* @brief Get one of the file descriptors
*
* Notice, FileHandle maintains two file descriptors - one opened with the
* `O_DIRECT` flag and one without. This function returns one of them but
* it is unspecified which one.
*
* @return File descriptor
*/
[[nodiscard]] int fd() const noexcept { return _fd_direct_off; }
/**
* @brief Get the flags of one of the file descriptors (see open(2))
*
* Notice, FileHandle maintains two file descriptors - one opened with the
* `O_DIRECT` flag and one without. This function returns the flags of one of
* them but it is unspecified which one.
*
* @return File descriptor
*/
[[nodiscard]] int fd_open_flags() const { return detail::open_flags(_fd_direct_off); }
/**
* @brief Get the file size
*
* The value are cached.
*
* @return The number of bytes
*/
[[nodiscard]] inline std::size_t nbytes() const
{
if (closed()) { return 0; }
if (_nbytes == 0) { _nbytes = detail::get_file_size(_fd_direct_off); }
return _nbytes;
}
/**
* @brief Reads specified bytes from the file into the device memory.
*
* This API reads the data from the GPU memory to the file at a specified offset
* and size bytes by using GDS functionality. The API works correctly for unaligned
* offset and data sizes, although the performance is not on-par with aligned read.
* This is a synchronous call and will block until the IO is complete.
*
* @note For the `devPtr_offset`, if data will be read starting exactly from the
* `devPtr_base` that is registered with `buffer_register`, `devPtr_offset` should
* be set to 0. To read starting from an offset in the registered buffer range,
* the relative offset should be specified in the `devPtr_offset`, and the
* `devPtr_base` must remain set to the base address that was used in the
* `buffer_register` call.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size Size in bytes to read.
* @param file_offset Offset in the file to read from.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to read into.
* This parameter should be used only with registered buffers.
* @return Size of bytes that were successfully read.
*/
std::size_t read(void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset)
{
if (_compat_mode) {
return posix_device_read(_fd_direct_off, devPtr_base, size, file_offset, devPtr_offset);
}
#ifdef KVIKIO_CUFILE_FOUND
ssize_t ret = cuFileAPI::instance().Read(
_handle, devPtr_base, size, convert_size2off(file_offset), convert_size2off(devPtr_offset));
if (ret == -1) {
throw std::system_error(errno, std::generic_category(), "Unable to read file");
}
if (ret < -1) {
throw CUfileException(std::string{"cuFile error at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": " + CUFILE_ERRSTR(ret));
}
return ret;
#else
throw CUfileException("KvikIO not compiled with cuFile.h");
#endif
}
/**
* @brief Writes specified bytes from the device memory into the file.
*
* This API writes the data from the GPU memory to the file at a specified offset
* and size bytes by using GDS functionality. The API works correctly for unaligned
* offset and data sizes, although the performance is not on-par with aligned writes.
* This is a synchronous call and will block until the IO is complete.
*
* @note GDS functionality modified the standard file system metadata in SysMem.
* However, GDS functionality does not take any special responsibility for writing
* that metadata back to permanent storage. The data is not guaranteed to be present
* after a system crash unless the application uses an explicit `fsync(2)` call. If the
* file is opened with an `O_SYNC` flag, the metadata will be written to the disk before
* the call is complete.
* Refer to the note in read for more information about `devPtr_offset`.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size Size in bytes to write.
* @param file_offset Offset in the file to write from.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to write from.
* This parameter should be used only with registered buffers.
* @return Size of bytes that were successfully written.
*/
std::size_t write(const void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset)
{
_nbytes = 0; // Invalidate the computed file size
if (_compat_mode) {
return posix_device_write(_fd_direct_off, devPtr_base, size, file_offset, devPtr_offset);
}
#ifdef KVIKIO_CUFILE_FOUND
ssize_t ret = cuFileAPI::instance().Write(
_handle, devPtr_base, size, convert_size2off(file_offset), convert_size2off(devPtr_offset));
if (ret == -1) {
throw std::system_error(errno, std::generic_category(), "Unable to write file");
}
if (ret < -1) {
throw CUfileException(std::string{"cuFile error at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": " + CUFILE_ERRSTR(ret));
}
return ret;
#else
throw CUfileException("KvikIO not compiled with cuFile.h");
#endif
}
/**
* @brief Reads specified bytes from the file into the device or host memory in parallel.
*
* This API is a parallel async version of `.read()` that partition the operation
* into tasks of size `task_size` for execution in the default thread pool.
*
* In order to improve performance of small buffers, when `size < gds_threshold` a shortcut
* that circumvent the threadpool and use the POSIX backend directly is used.
*
* @note For cuFile reads, the base address of the allocation `buf` is part of is used.
* This means that when registering buffers, use the base address of the allocation.
* This is what `memory_register` and `memory_deregister` do automatically.
*
* @param buf Address to device or host memory.
* @param size Size in bytes to read.
* @param file_offset Offset in the file to read from.
* @param task_size Size of each task in bytes.
* @param gds_threshold Minimum buffer size to use GDS and the thread pool.
* @return Future that on completion returns the size of bytes that were successfully read.
*/
std::future<std::size_t> pread(void* buf,
std::size_t size,
std::size_t file_offset = 0,
std::size_t task_size = defaults::task_size(),
std::size_t gds_threshold = defaults::gds_threshold())
{
if (is_host_memory(buf)) {
auto op = [this](void* hostPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t hostPtr_offset) -> std::size_t {
char* buf = static_cast<char*>(hostPtr_base) + hostPtr_offset;
return posix_host_read(_fd_direct_off, buf, size, file_offset, false);
};
return parallel_io(op, buf, size, file_offset, task_size, 0);
}
CUcontext ctx = get_context_from_pointer(buf);
// Shortcut that circumvent the threadpool and use the POSIX backend directly.
if (size < gds_threshold) {
auto task = [this, ctx, buf, size, file_offset]() -> std::size_t {
PushAndPopContext c(ctx);
return posix_device_read(_fd_direct_off, buf, size, file_offset, 0);
};
return std::async(std::launch::deferred, task);
}
// Regular case that use the threadpool and run the tasks in parallel
auto task = [this, ctx](void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset) -> std::size_t {
PushAndPopContext c(ctx);
return read(devPtr_base, size, file_offset, devPtr_offset);
};
auto [devPtr_base, base_size, devPtr_offset] = get_alloc_info(buf, &ctx);
return parallel_io(task, devPtr_base, size, file_offset, task_size, devPtr_offset);
}
/**
* @brief Writes specified bytes from device or host memory into the file in parallel.
*
* This API is a parallel async version of `.write()` that partition the operation
* into tasks of size `task_size` for execution in the default thread pool.
*
* In order to improve performance of small buffers, when `size < gds_threshold` a shortcut
* that circumvent the threadpool and use the POSIX backend directly is used.
*
* @note For cuFile reads, the base address of the allocation `buf` is part of is used.
* This means that when registering buffers, use the base address of the allocation.
* This is what `memory_register` and `memory_deregister` do automatically.
*
* @param buf Address to device or host memory.
* @param size Size in bytes to write.
* @param file_offset Offset in the file to write from.
* @param task_size Size of each task in bytes.
* @param gds_threshold Minimum buffer size to use GDS and the thread pool.
* @return Future that on completion returns the size of bytes that were successfully written.
*/
std::future<std::size_t> pwrite(const void* buf,
std::size_t size,
std::size_t file_offset = 0,
std::size_t task_size = defaults::task_size(),
std::size_t gds_threshold = defaults::gds_threshold())
{
if (is_host_memory(buf)) {
auto op = [this](const void* hostPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t hostPtr_offset) -> std::size_t {
const char* buf = static_cast<const char*>(hostPtr_base) + hostPtr_offset;
return posix_host_write(_fd_direct_off, buf, size, file_offset, false);
};
return parallel_io(op, buf, size, file_offset, task_size, 0);
}
CUcontext ctx = get_context_from_pointer(buf);
// Shortcut that circumvent the threadpool and use the POSIX backend directly.
if (size < gds_threshold) {
auto task = [this, ctx, buf, size, file_offset]() -> std::size_t {
PushAndPopContext c(ctx);
return posix_device_write(_fd_direct_off, buf, size, file_offset, 0);
};
return std::async(std::launch::deferred, task);
}
// Regular case that use the threadpool and run the tasks in parallel
auto op = [this, ctx](const void* devPtr_base,
std::size_t size,
std::size_t file_offset,
std::size_t devPtr_offset) -> std::size_t {
PushAndPopContext c(ctx);
return write(devPtr_base, size, file_offset, devPtr_offset);
};
auto [devPtr_base, base_size, devPtr_offset] = get_alloc_info(buf, &ctx);
return parallel_io(op, devPtr_base, size, file_offset, task_size, devPtr_offset);
}
/**
* @brief Reads specified bytes from the file into the device memory asynchronously.
*
* This is an asynchronous version of `.read()`, which will be executed in sequence
* for the specified stream.
*
* When running CUDA v12.1 or older, this function falls back to use `.read()` after
* `stream` has been synchronized.
*
* The arguments have the same meaning as in `.read()` but some of them are deferred.
* That is, the values pointed to by `size_p`, `file_offset_p` and `devPtr_offset_p`
* will not be evaluated until execution time. Notice, this behavior can be changed
* using cuFile's cuFileStreamRegister API.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size_p Pointer to size in bytes to read. If the exact size is not known at the time of
* I/O submission, then you must set it to the maximum possible I/O size for that stream I/O.
* Later the actual size can be set prior to the stream I/O execution.
* @param file_offset_p Pointer to offset in the file from which to read. Unless otherwise set
* using cuFileStreamRegister API, this value will not be evaluated until execution time.
* @param devPtr_offset_p Pointer to the offset relative to the bufPtr_base from which to write.
* Unless otherwise set using cuFileStreamRegister API, this value will not be evaluated until
* execution time.
* @param bytes_read_p Pointer to the bytes read from file. This pointer should be a non-NULL
* value and *bytes_read_p set to 0. The bytes_read_p memory should be allocated with
* cuMemHostAlloc/malloc/mmap or registered with cuMemHostRegister. After successful execution of
* the operation in the stream, the value *bytes_read_p will contain either:
* - The number of bytes successfully read.
* - -1 on IO errors.
* - All other errors return a negative integer value of the CUfileOpError enum value.
* @param stream CUDA stream in which to enqueue the operation. If NULL, make this operation
* synchronous.
*/
void read_async(void* devPtr_base,
std::size_t* size_p,
off_t* file_offset_p,
off_t* devPtr_offset_p,
ssize_t* bytes_read_p,
CUstream stream)
{
#ifdef KVIKIO_CUFILE_STREAM_API_FOUND
if (kvikio::is_batch_and_stream_available() && !_compat_mode) {
CUFILE_TRY(cuFileAPI::instance().ReadAsync(
_handle, devPtr_base, size_p, file_offset_p, devPtr_offset_p, bytes_read_p, stream));
return;
}
#endif
CUDA_DRIVER_TRY(cudaAPI::instance().StreamSynchronize(stream));
*bytes_read_p =
static_cast<ssize_t>(read(devPtr_base, *size_p, *file_offset_p, *devPtr_offset_p));
}
/**
* @brief Reads specified bytes from the file into the device memory asynchronously.
*
* This is an asynchronous version of `.read()`, which will be executed in sequence
* for the specified stream.
*
* When running CUDA v12.1 or older, this function falls back to use `.read()` after
* `stream` has been synchronized.
*
* The arguments have the same meaning as in `.read()` but returns a `StreamFuture` object
* that the caller must keep alive until all data has been read from disk. One way to do this,
* is by calling `StreamFuture.check_bytes_done()`, which will synchronize the associated stream
* and return the number of bytes read.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size Size in bytes to read.
* @param file_offset Offset in the file to read from.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to read into.
* This parameter should be used only with registered buffers.
* @param stream CUDA stream in which to enqueue the operation. If NULL, make this operation
* synchronous.
* @return A future object that must be kept alive until all data has been read to disk e.g.
* by synchronizing `stream`.
*/
[[nodiscard]] StreamFuture read_async(void* devPtr_base,
std::size_t size,
off_t file_offset = 0,
off_t devPtr_offset = 0,
CUstream stream = nullptr)
{
StreamFuture ret(devPtr_base, size, file_offset, devPtr_offset, stream);
auto [devPtr_base_, size_p, file_offset_p, devPtr_offset_p, bytes_read_p, stream_] =
ret.get_args();
read_async(devPtr_base_, size_p, file_offset_p, devPtr_offset_p, bytes_read_p, stream_);
return ret;
}
/**
* @brief Writes specified bytes from the device memory into the file asynchronously.
*
* This is an asynchronous version of `.write()`, which will be executed in sequence
* for the specified stream.
*
* When running CUDA v12.1 or older, this function falls back to use `.read()` after
* `stream` has been synchronized.
*
* The arguments have the same meaning as in `.write()` but some of them are deferred.
* That is, the values pointed to by `size_p`, `file_offset_p` and `devPtr_offset_p`
* will not be evaluated until execution time. Notice, this behavior can be changed
* using cuFile's cuFileStreamRegister API.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size_p Pointer to size in bytes to read. If the exact size is not known at the time of
* I/O submission, then you must set it to the maximum possible I/O size for that stream I/O.
* Later the actual size can be set prior to the stream I/O execution.
* @param file_offset_p Pointer to offset in the file from which to read. Unless otherwise set
* using cuFileStreamRegister API, this value will not be evaluated until execution time.
* @param devPtr_offset_p Pointer to the offset relative to the bufPtr_base from which to read.
* Unless otherwise set using cuFileStreamRegister API, this value will not be evaluated until
* execution time.
* @param bytes_written_p Pointer to the bytes read from file. This pointer should be a non-NULL
* value and *bytes_written_p set to 0. The bytes_written_p memory should be allocated with
* cuMemHostAlloc/malloc/mmap or registered with cuMemHostRegister.
* After successful execution of the operation in the stream, the value *bytes_written_p will
* contain either:
* - The number of bytes successfully read.
* - -1 on IO errors.
* - All other errors return a negative integer value of the CUfileOpError enum value.
* @param stream CUDA stream in which to enqueue the operation. If NULL, make this operation
* synchronous.
*/
void write_async(void* devPtr_base,
std::size_t* size_p,
off_t* file_offset_p,
off_t* devPtr_offset_p,
ssize_t* bytes_written_p,
CUstream stream)
{
#ifdef KVIKIO_CUFILE_STREAM_API_FOUND
if (kvikio::is_batch_and_stream_available() && !_compat_mode) {
CUFILE_TRY(cuFileAPI::instance().WriteAsync(
_handle, devPtr_base, size_p, file_offset_p, devPtr_offset_p, bytes_written_p, stream));
return;
}
#endif
CUDA_DRIVER_TRY(cudaAPI::instance().StreamSynchronize(stream));
*bytes_written_p =
static_cast<ssize_t>(write(devPtr_base, *size_p, *file_offset_p, *devPtr_offset_p));
}
/**
* @brief Writes specified bytes from the device memory into the file asynchronously.
*
* This is an asynchronous version of `.write()`, which will be executed in sequence
* for the specified stream.
*
* When running CUDA v12.1 or older, this function falls back to use `.read()` after
* `stream` has been synchronized.
*
* The arguments have the same meaning as in `.write()` but returns a `StreamFuture` object
* that the caller must keep alive until all data has been written to disk. One way to do this,
* is by calling `StreamFuture.check_bytes_done()`, which will synchronize the associated stream
* and return the number of bytes written.
*
* @param devPtr_base Base address of buffer in device memory. For registered buffers,
* `devPtr_base` must remain set to the base address used in the `buffer_register` call.
* @param size Size in bytes to write.
* @param file_offset Offset in the file to write from.
* @param devPtr_offset Offset relative to the `devPtr_base` pointer to write from.
* This parameter should be used only with registered buffers.
* @param stream CUDA stream in which to enqueue the operation. If NULL, make this operation
* synchronous.
* @return A future object that must be kept alive until all data has been written to disk e.g.
* by synchronizing `stream`.
*/
[[nodiscard]] StreamFuture write_async(void* devPtr_base,
std::size_t size,
off_t file_offset = 0,
off_t devPtr_offset = 0,
CUstream stream = nullptr)
{
StreamFuture ret(devPtr_base, size, file_offset, devPtr_offset, stream);
auto [devPtr_base_, size_p, file_offset_p, devPtr_offset_p, bytes_written_p, stream_] =
ret.get_args();
write_async(devPtr_base_, size_p, file_offset_p, devPtr_offset_p, bytes_written_p, stream_);
return ret;
}
/**
* @brief Returns `true` if the compatibility mode has been enabled for this file.
*
* Compatibility mode can be explicitly enabled in object creation. The mode is also enabled
* automatically, if file cannot be opened with the `O_DIRECT` flag.
*
* @return compatibility mode state for the object
*/
[[nodiscard]] bool is_compat_mode_on() const noexcept { return _compat_mode; }
};
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include/kvikio
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/shim/cuda.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda.h>
#include <kvikio/shim/utils.hpp>
namespace kvikio {
/**
* @brief Shim layer of the cuda C-API
*
* This is a singleton class that use `dlopen` on construction to load the C-API of cuda.
*
* For example, `cudaAPI::instance().MemHostAlloc()` corresponds to calling `cuMemHostAlloc()`
*/
class cudaAPI {
public:
decltype(cuInit)* Init{nullptr};
decltype(cuMemHostAlloc)* MemHostAlloc{nullptr};
decltype(cuMemFreeHost)* MemFreeHost{nullptr};
decltype(cuMemcpyHtoD)* MemcpyHtoD{nullptr};
decltype(cuMemcpyDtoH)* MemcpyDtoH{nullptr};
decltype(cuPointerGetAttribute)* PointerGetAttribute{nullptr};
decltype(cuPointerGetAttributes)* PointerGetAttributes{nullptr};
decltype(cuCtxPushCurrent)* CtxPushCurrent{nullptr};
decltype(cuCtxPopCurrent)* CtxPopCurrent{nullptr};
decltype(cuCtxGetCurrent)* CtxGetCurrent{nullptr};
decltype(cuMemGetAddressRange)* MemGetAddressRange{nullptr};
decltype(cuGetErrorName)* GetErrorName{nullptr};
decltype(cuGetErrorString)* GetErrorString{nullptr};
decltype(cuDeviceGet)* DeviceGet{nullptr};
decltype(cuDevicePrimaryCtxRetain)* DevicePrimaryCtxRetain{nullptr};
decltype(cuDevicePrimaryCtxRelease)* DevicePrimaryCtxRelease{nullptr};
decltype(cuStreamSynchronize)* StreamSynchronize{nullptr};
private:
cudaAPI()
{
void* lib = load_library("libcuda.so.1");
// Notice, the API version loaded must match the version used downstream. That is,
// if a project uses the `_v2` CUDA Driver API or the newest Runtime API, the symbols
// loaded should also be the `_v2` symbols. Thus, we use KVIKIO_STRINGIFY() to get
// the name of the symbol through cude.h.
get_symbol(MemHostAlloc, lib, KVIKIO_STRINGIFY(cuMemHostAlloc));
get_symbol(MemFreeHost, lib, KVIKIO_STRINGIFY(cuMemFreeHost));
get_symbol(MemcpyHtoD, lib, KVIKIO_STRINGIFY(cuMemcpyHtoD));
get_symbol(MemcpyDtoH, lib, KVIKIO_STRINGIFY(cuMemcpyDtoH));
get_symbol(PointerGetAttribute, lib, KVIKIO_STRINGIFY(cuPointerGetAttribute));
get_symbol(PointerGetAttributes, lib, KVIKIO_STRINGIFY(cuPointerGetAttributes));
get_symbol(CtxPushCurrent, lib, KVIKIO_STRINGIFY(cuCtxPushCurrent));
get_symbol(CtxPopCurrent, lib, KVIKIO_STRINGIFY(cuCtxPopCurrent));
get_symbol(CtxGetCurrent, lib, KVIKIO_STRINGIFY(cuCtxGetCurrent));
get_symbol(MemGetAddressRange, lib, KVIKIO_STRINGIFY(cuMemGetAddressRange));
get_symbol(GetErrorName, lib, KVIKIO_STRINGIFY(cuGetErrorName));
get_symbol(GetErrorString, lib, KVIKIO_STRINGIFY(cuGetErrorString));
get_symbol(DeviceGet, lib, KVIKIO_STRINGIFY(cuDeviceGet));
get_symbol(DevicePrimaryCtxRetain, lib, KVIKIO_STRINGIFY(cuDevicePrimaryCtxRetain));
get_symbol(DevicePrimaryCtxRelease, lib, KVIKIO_STRINGIFY(cuDevicePrimaryCtxRelease));
get_symbol(StreamSynchronize, lib, KVIKIO_STRINGIFY(cuStreamSynchronize));
}
public:
cudaAPI(cudaAPI const&) = delete;
void operator=(cudaAPI const&) = delete;
static cudaAPI& instance()
{
static cudaAPI _instance;
return _instance;
}
};
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include/kvikio
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/shim/utils.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <dlfcn.h>
#include <sys/utsname.h>
#include <filesystem>
#include <sstream>
#include <vector>
namespace kvikio {
#define KVIKIO_STRINGIFY_DETAIL(x) #x
#define KVIKIO_STRINGIFY(x) KVIKIO_STRINGIFY_DETAIL(x)
/**
* @brief Load shared library
*
* @param name Name of the library to load.
* @return The library handle.
*/
inline void* load_library(const char* name, int mode = RTLD_LAZY | RTLD_LOCAL | RTLD_NODELETE)
{
::dlerror(); // Clear old errors
void* ret = ::dlopen(name, mode);
if (ret == nullptr) { throw std::runtime_error(::dlerror()); }
return ret;
}
/**
* @brief Load shared library
*
* @param names Vector of names to try when loading shared library.
* @return The library handle.
*/
inline void* load_library(const std::vector<const char*>& names,
int mode = RTLD_LAZY | RTLD_LOCAL | RTLD_NODELETE)
{
std::stringstream ss;
for (const char* name : names) {
ss << name << " ";
try {
return load_library(name, mode);
} catch (const std::runtime_error&) {
}
}
throw std::runtime_error("cannot open shared object file, tried: " + ss.str());
}
/**
* @brief Get symbol using `dlsym`
*
* @tparam T The type of the function pointer.
* @param handle The function pointer (output).
* @param lib The library handle returned by `dlopen`.
* @param name Name of the symbol/function to load.
*/
template <typename T>
void get_symbol(T& handle, void* lib, const char* name)
{
::dlerror(); // Clear old errors
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
handle = reinterpret_cast<T>(::dlsym(lib, name));
const char* err = ::dlerror();
if (err != nullptr) { throw std::runtime_error(err); }
}
/**
* @brief Try to detect if running in Windows Subsystem for Linux (WSL)
*
* When unable to determine environment, `false` is returned.
*
* @return The boolean answer
*/
[[nodiscard]] inline bool is_running_in_wsl()
{
struct utsname buf {};
int err = ::uname(&buf);
if (err == 0) {
const std::string name(static_cast<char*>(buf.release));
// 'Microsoft' for WSL1 and 'microsoft' for WSL2
return name.find("icrosoft") != std::string::npos;
}
return false;
}
/**
* @brief Check if `/run/udev` is readable
*
* cuFile files with `internal error` when `/run/udev` isn't readable.
* This typically happens when running inside a docker image not launched
* with `--volume /run/udev:/run/udev:ro`.
*
* @return The boolean answer
*/
[[nodiscard]] inline bool run_udev_readable()
{
try {
return std::filesystem::is_directory("/run/udev");
} catch (const std::filesystem::filesystem_error&) {
return false;
}
}
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include/kvikio
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/shim/cufile.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdexcept>
#include <iostream>
#include <kvikio/shim/cufile_h_wrapper.hpp>
#include <kvikio/shim/utils.hpp>
namespace kvikio {
#ifdef KVIKIO_CUFILE_FOUND
/**
* @brief Shim layer of the cuFile C-API
*
* This is a singleton class that use `dlopen` on construction to load the C-API of cuFile.
*
* For example, `cuFileAPI::instance().FileRead()` corresponds to calling `cuFileRead()`
*/
class cuFileAPI {
public:
decltype(cuFileHandleRegister)* HandleRegister{nullptr};
decltype(cuFileHandleDeregister)* HandleDeregister{nullptr};
decltype(cuFileRead)* Read{nullptr};
decltype(cuFileWrite)* Write{nullptr};
decltype(cuFileBufRegister)* BufRegister{nullptr};
decltype(cuFileBufDeregister)* BufDeregister{nullptr};
decltype(cuFileDriverOpen)* DriverOpen{nullptr};
decltype(cuFileDriverClose)* DriverClose{nullptr};
decltype(cuFileDriverGetProperties)* DriverGetProperties{nullptr};
decltype(cuFileDriverSetPollMode)* DriverSetPollMode{nullptr};
decltype(cuFileDriverSetMaxCacheSize)* DriverSetMaxCacheSize{nullptr};
decltype(cuFileDriverSetMaxPinnedMemSize)* DriverSetMaxPinnedMemSize{nullptr};
#ifdef KVIKIO_CUFILE_BATCH_API_FOUND
decltype(cuFileBatchIOSetUp)* BatchIOSetUp{nullptr};
decltype(cuFileBatchIOSubmit)* BatchIOSubmit{nullptr};
decltype(cuFileBatchIOGetStatus)* BatchIOGetStatus{nullptr};
decltype(cuFileBatchIOCancel)* BatchIOCancel{nullptr};
decltype(cuFileBatchIODestroy)* BatchIODestroy{nullptr};
#endif
#ifdef KVIKIO_CUFILE_STREAM_API_FOUND
decltype(cuFileReadAsync)* ReadAsync{nullptr};
decltype(cuFileWriteAsync)* WriteAsync{nullptr};
decltype(cuFileStreamRegister)* StreamRegister{nullptr};
decltype(cuFileStreamDeregister)* StreamDeregister{nullptr};
#endif
bool stream_available = false;
private:
cuFileAPI()
{
// CUDA versions before CUDA 11.7.1 did not ship libcufile.so.0, so this is
// a workaround that adds support for all prior versions of libcufile.
void* lib = load_library({"libcufile.so.0",
"libcufile.so.1.3.0" /* 11.7.0 */,
"libcufile.so.1.2.1" /* 11.6.2, 11.6.1 */,
"libcufile.so.1.2.0" /* 11.6.0 */,
"libcufile.so.1.1.1" /* 11.5.1 */,
"libcufile.so.1.1.0" /* 11.5.0 */,
"libcufile.so.1.0.2" /* 11.4.4, 11.4.3, 11.4.2 */,
"libcufile.so.1.0.1" /* 11.4.1 */,
"libcufile.so.1.0.0" /* 11.4.0 */});
get_symbol(HandleRegister, lib, KVIKIO_STRINGIFY(cuFileHandleRegister));
get_symbol(HandleDeregister, lib, KVIKIO_STRINGIFY(cuFileHandleDeregister));
get_symbol(Read, lib, KVIKIO_STRINGIFY(cuFileRead));
get_symbol(Write, lib, KVIKIO_STRINGIFY(cuFileWrite));
get_symbol(BufRegister, lib, KVIKIO_STRINGIFY(cuFileBufRegister));
get_symbol(BufDeregister, lib, KVIKIO_STRINGIFY(cuFileBufDeregister));
get_symbol(DriverOpen, lib, KVIKIO_STRINGIFY(cuFileDriverOpen));
get_symbol(DriverClose, lib, KVIKIO_STRINGIFY(cuFileDriverClose));
get_symbol(DriverGetProperties, lib, KVIKIO_STRINGIFY(cuFileDriverGetProperties));
get_symbol(DriverSetPollMode, lib, KVIKIO_STRINGIFY(cuFileDriverSetPollMode));
get_symbol(DriverSetMaxCacheSize, lib, KVIKIO_STRINGIFY(cuFileDriverSetMaxCacheSize));
get_symbol(DriverSetMaxPinnedMemSize, lib, KVIKIO_STRINGIFY(cuFileDriverSetMaxPinnedMemSize));
#ifdef KVIKIO_CUFILE_BATCH_API_FOUND
get_symbol(BatchIOSetUp, lib, KVIKIO_STRINGIFY(cuFileBatchIOSetUp));
get_symbol(BatchIOSubmit, lib, KVIKIO_STRINGIFY(cuFileBatchIOSubmit));
get_symbol(BatchIOGetStatus, lib, KVIKIO_STRINGIFY(cuFileBatchIOGetStatus));
get_symbol(BatchIOCancel, lib, KVIKIO_STRINGIFY(cuFileBatchIOCancel));
get_symbol(BatchIODestroy, lib, KVIKIO_STRINGIFY(cuFileBatchIODestroy));
#endif
#ifdef KVIKIO_CUFILE_STREAM_API_FOUND
get_symbol(ReadAsync, lib, KVIKIO_STRINGIFY(cuFileReadAsync));
get_symbol(WriteAsync, lib, KVIKIO_STRINGIFY(cuFileWriteAsync));
get_symbol(StreamRegister, lib, KVIKIO_STRINGIFY(cuFileStreamRegister));
get_symbol(StreamDeregister, lib, KVIKIO_STRINGIFY(cuFileStreamDeregister));
try {
void* s{};
get_symbol(s, lib, "cuFileReadAsync");
stream_available = true;
} catch (const std::runtime_error&) {
}
#endif
// cuFile is supposed to open and close the driver automatically but because of a bug in
// CUDA 11.8, it sometimes segfault. See <https://github.com/rapidsai/kvikio/issues/159>.
CUfileError_t const error = DriverOpen();
if (error.err != CU_FILE_SUCCESS) {
throw std::runtime_error(std::string{"cuFile error at: "} + __FILE__ + ":" +
KVIKIO_STRINGIFY(__LINE__) + ": " +
cufileop_status_error(error.err));
}
}
~cuFileAPI()
{
CUfileError_t const error = DriverClose();
if (error.err != CU_FILE_SUCCESS) {
std::cerr << "Unable to close GDS file driver: " << cufileop_status_error(error.err)
<< std::endl;
}
}
public:
cuFileAPI(cuFileAPI const&) = delete;
void operator=(cuFileAPI const&) = delete;
static cuFileAPI& instance()
{
static cuFileAPI _instance;
return _instance;
}
};
#endif
/**
* @brief Check if the cuFile library is available
*
* Notice, this doesn't check if the runtime environment supports cuFile.
*
* @return The boolean answer
*/
#ifdef KVIKIO_CUFILE_FOUND
inline bool is_cufile_library_available()
{
try {
cuFileAPI::instance();
} catch (const std::runtime_error&) {
return false;
}
return true;
}
#else
constexpr bool is_cufile_library_available() { return false; }
#endif
/**
* @brief Check if the cuFile is available and expected to work
*
* Besides checking if the cuFile library is available, this also checks the
* runtime environment.
*
* @return The boolean answer
*/
inline bool is_cufile_available()
{
return is_cufile_library_available() && run_udev_readable() && !is_running_in_wsl();
}
/**
* @brief Check if cuFile's batch and stream API is available
*
* Technically, the batch API is available in CUDA 12.1 but since there is no good
* way to check CUDA version using the driver API, we check for the existing of the
* `cuFileReadAsync` symbol, which is defined in CUDA 12.2+.
*
* @return The boolean answer
*/
#if defined(KVIKIO_CUFILE_STREAM_API_FOUND) && defined(KVIKIO_CUFILE_STREAM_API_FOUND)
inline bool is_batch_and_stream_available()
{
try {
return is_cufile_available() && cuFileAPI::instance().stream_available;
} catch (const std::runtime_error&) {
return false;
}
}
#else
constexpr bool is_batch_and_stream_available() { return false; }
#endif
} // namespace kvikio
| 0 |
rapidsai_public_repos/kvikio/cpp/include/kvikio
|
rapidsai_public_repos/kvikio/cpp/include/kvikio/shim/cufile_h_wrapper.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* In order to support compilation when `cufile.h` isn't available, we
* wrap all use of cufile in a `#ifdef KVIKIO_CUFILE_FOUND` guard.
*
* The motivation here is to make KvikIO work in all circumstances so
* that libraries doesn't have to implement there own fallback solutions.
*/
#ifdef KVIKIO_CUFILE_FOUND
#include <cufile.h>
#else
using CUfileDriverControlFlags_t = enum CUfileDriverControlFlags {
CU_FILE_USE_POLL_MODE = 0, /*!< use POLL mode. properties.use_poll_mode*/
CU_FILE_ALLOW_COMPAT_MODE = 1 /*!< allow COMPATIBILITY mode. properties.allow_compat_mode*/
};
using CUfileHandle_t = void*;
#endif
// If the Batch API isn't defined, we define some of the data types here.
// Notice, this doesn't need to be ABI compatible with the cufile definitions.
#ifndef KVIKIO_CUFILE_BATCH_API_FOUND
typedef enum CUfileOpcode { CUFILE_READ = 0, CUFILE_WRITE } CUfileOpcode_t;
typedef enum CUFILEStatus_enum {
CUFILE_WAITING = 0x000001, /* required value prior to submission */
CUFILE_PENDING = 0x000002, /* once enqueued */
CUFILE_INVALID = 0x000004, /* request was ill-formed or could not be enqueued */
CUFILE_CANCELED = 0x000008, /* request successfully canceled */
CUFILE_COMPLETE = 0x0000010, /* request successfully completed */
CUFILE_TIMEOUT = 0x0000020, /* request timed out */
CUFILE_FAILED = 0x0000040 /* unable to complete */
} CUfileStatus_t;
typedef struct CUfileIOEvents {
void* cookie;
CUfileStatus_t status; /* status of the operation */
size_t ret; /* -ve error or amount of I/O done. */
} CUfileIOEvents_t;
#endif
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/scripts/run-cmake-format.sh
|
#!/bin/bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
status=0
if [ -z ${KVIKIO_ROOT:+PLACEHOLDER} ]; then
KVIKIO_BUILD_DIR=$(git rev-parse --show-toplevel 2>&1)/cpp/build
status=$?
else
KVIKIO_BUILD_DIR=${KVIKIO_ROOT}
fi
if ! [ ${status} -eq 0 ]; then
if [[ ${KVIKIO_BUILD_DIR} == *"not a git repository"* ]]; then
echo "This script must be run inside the kvikio repository, or the KVIKIO_ROOT environment variable must be set."
else
echo "Script failed with unknown error attempting to determine project root:"
echo ${KVIKIO_BUILD_DIR}
fi
exit 1
fi
DEFAULT_FORMAT_FILE_LOCATIONS=(
"${KVIKIO_BUILD_DIR:-${HOME}}/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
)
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
for file_path in ${DEFAULT_FORMAT_FILE_LOCATIONS[@]}; do
if [ -f ${file_path} ]; then
RAPIDS_CMAKE_FORMAT_FILE=${file_path}
break
fi
done
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found at any of the default search locations: "
echo ""
( IFS=$'\n'; echo "${DEFAULT_FORMAT_FILE_LOCATIONS[*]}" )
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
cmake-format -i --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
elif [[ $1 == "cmake-lint" ]]; then
# Since the pre-commit hook is verbose, we have to be careful to only
# present cmake-lint's output (which is quite verbose) if we actually
# observe a failure.
OUTPUT=$(cmake-lint --config-files cpp/cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2})
status=$?
if ! [ ${status} -eq 0 ]; then
echo "${OUTPUT}"
fi
exit ${status}
fi
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/cmake/config.json
|
{
"parse": {
"additional_commands": {
"CPMFindPackage": {
"kwargs": {
"NAME": 1,
"GITHUB_REPOSITORY": "?",
"GIT_TAG": "?",
"VERSION": "?",
"GIT_SHALLOW": "?",
"OPTIONS": "*",
"FIND_PACKAGE_ARGUMENTS": "*"
}
},
"ConfigureTest": {
"flags": ["TEST_NAME", "TEST_SRC"]
},
"ConfigureBench": {
"flags": ["BENCH_NAME", "BENCH_SRC"]
}
}
},
"format": {
"line_width": 100,
"tab_size": 2,
"command_case": "unchanged",
"max_lines_hwrap": 1,
"max_pargs_hwrap": 999,
"dangle_parens": true
},
"lint": {
"disabled_codes": ["C0301", "C0112"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "_[A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "_[0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/cmake/fetch_rapids.cmake
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/KVIKIO_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.12/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/KVIKIO_RAPIDS.cmake
)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/KVIKIO_RAPIDS.cmake)
| 0 |
rapidsai_public_repos/kvikio/cpp/cmake
|
rapidsai_public_repos/kvikio/cpp/cmake/Modules/FindcuFile.cmake
|
# =============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#[=======================================================================[.rst:
FindcuFile
----------
Find cuFile headers and libraries.
Imported Targets
^^^^^^^^^^^^^^^^
``cufile::cuFile``
The cuFile library, if found.
``cufile::cuFileRDMA``
The cuFile RDMA library, if found.
Result Variables
^^^^^^^^^^^^^^^^
This will define the following variables in your project:
``cuFile_FOUND``
true if (the requested version of) cuFile is available.
``cuFile_VERSION``
the version of cuFile.
``cuFile_LIBRARIES``
the libraries to link against to use cuFile.
``cuFileRDMA_LIBRARIES``
the libraries to link against to use cuFile RDMA.
``cuFile_INCLUDE_DIRS``
where to find the cuFile headers.
``cuFile_COMPILE_OPTIONS``
this should be passed to target_compile_options(), if the
target is not used for linking
#]=======================================================================]
# use pkg-config to get the directories and then use these values in the FIND_PATH() and
# FIND_LIBRARY() calls
find_package(PkgConfig QUIET)
pkg_check_modules(PKG_cuFile QUIET cuFile)
set(cuFile_COMPILE_OPTIONS ${PKG_cuFile_CFLAGS_OTHER})
set(cuFile_VERSION ${PKG_cuFile_VERSION})
# Find the location of the CUDA Toolkit
find_package(CUDAToolkit QUIET)
find_path(
cuFile_INCLUDE_DIR
NAMES cufile.h
HINTS ${PKG_cuFile_INCLUDE_DIRS} ${CUDAToolkit_INCLUDE_DIRS}
)
find_library(
cuFile_LIBRARY
NAMES cufile
HINTS ${PKG_cuFile_LIBRARY_DIRS} ${CUDAToolkit_LIBRARY_DIR}
)
find_library(
cuFileRDMA_LIBRARY
NAMES cufile_rdma
HINTS ${PKG_cuFile_LIBRARY_DIRS} ${CUDAToolkit_LIBRARY_DIR}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(
cuFile
FOUND_VAR cuFile_FOUND
REQUIRED_VARS cuFile_LIBRARY cuFileRDMA_LIBRARY cuFile_INCLUDE_DIR
VERSION_VAR cuFile_VERSION
)
if(cuFile_INCLUDE_DIR AND NOT TARGET cufile::cuFile_interface)
add_library(cufile::cuFile_interface INTERFACE IMPORTED GLOBAL)
target_include_directories(
cufile::cuFile_interface INTERFACE "$<BUILD_INTERFACE:${cuFile_INCLUDE_DIR}>"
)
target_compile_options(cufile::cuFile_interface INTERFACE "${cuFile_COMPILE_OPTIONS}")
target_compile_definitions(cufile::cuFile_interface INTERFACE CUFILE_FOUND)
endif()
if(cuFile_FOUND AND NOT TARGET cufile::cuFile)
add_library(cufile::cuFile UNKNOWN IMPORTED GLOBAL)
set_target_properties(
cufile::cuFile
PROPERTIES IMPORTED_LOCATION "${cuFile_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${cuFile_COMPILE_OPTIONS}"
INTERFACE_INCLUDE_DIRECTORIES "${cuFile_INCLUDE_DIR}"
)
endif()
if(cuFile_FOUND AND NOT TARGET cufile::cuFileRDMA)
add_library(cufile::cuFileRDMA UNKNOWN IMPORTED GLOBAL)
set_target_properties(
cufile::cuFileRDMA
PROPERTIES IMPORTED_LOCATION "${cuFileRDMA_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${cuFile_COMPILE_OPTIONS}"
INTERFACE_INCLUDE_DIRECTORIES "${cuFile_INCLUDE_DIR}"
)
endif()
mark_as_advanced(cuFile_LIBRARY cuFileRDMA_LIBRARY cuFile_INCLUDE_DIR)
if(cuFile_FOUND)
set(cuFile_LIBRARIES ${cuFile_LIBRARY})
set(cuFileRDMA_LIBRARIES ${cuFileRDMA_LIBRARY})
set(cuFile_INCLUDE_DIRS ${cuFile_INCLUDE_DIR})
endif()
| 0 |
rapidsai_public_repos/kvikio/cpp/cmake
|
rapidsai_public_repos/kvikio/cpp/cmake/Modules/ConfigureCUDA.cmake
|
# =============================================================================
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND CUDF_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations)
endif()
list(APPEND CUDF_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
# set warnings as errors
list(APPEND CUDF_CUDA_FLAGS -Werror=cross-execution-space-call)
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
if(DISABLE_DEPRECATION_WARNING)
list(APPEND CUDF_CXX_FLAGS -Wno-deprecated-declarations)
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations)
endif()
# make sure we produce smallest binary size
list(APPEND CUDF_CUDA_FLAGS -Xfatbin=-compress-all)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
if(CUDA_ENABLE_LINEINFO)
list(APPEND CUDF_CUDA_FLAGS -lineinfo)
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(VERBOSE "CUDF: Building with debugging flags")
list(APPEND CUDF_CUDA_FLAGS -Xcompiler=-rdynamic)
endif()
| 0 |
rapidsai_public_repos/kvikio/cpp/cmake
|
rapidsai_public_repos/kvikio/cpp/cmake/thirdparty/get_gtest.cmake
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# This function finds gtest and sets any additional necessary environment variables.
function(find_and_configure_gtest)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
# Find or install GoogleTest
rapids_cpm_gtest(
BUILD_EXPORT_SET kvikio-testing-exports INSTALL_EXPORT_SET kvikio-testing-exports
)
if(GTest_ADDED)
rapids_export(
BUILD GTest
VERSION ${GTest_VERSION}
EXPORT_SET GTestTargets
GLOBAL_TARGETS gtest gmock gtest_main gmock_main
NAMESPACE GTest::
)
include("${rapids-cmake-dir}/export/find_package_root.cmake")
rapids_export_find_package_root(
BUILD GTest [=[${CMAKE_CURRENT_LIST_DIR}]=] EXPORT_SET kvikio-testing-exports
)
endif()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/doxygen/main_page.md
|
# Welcome to KvikIO's C++ documentation!
KvikIO is a Python and C++ library for high performance file IO. It provides C++ and Python
bindings to [cuFile](https://docs.nvidia.com/gpudirect-storage/api-reference-guide/index.html)
which enables [GPUDirect Storage (GDS)](https://developer.nvidia.com/blog/gpudirect-storage/).
KvikIO also works efficiently when GDS isn't available and can read/write both host and device data seamlessly.
KvikIO C++ is a header-only library that is part of the [RAPIDS](https://rapids.ai/) suite of open-source software libraries for GPU-accelerated data science.
---
**Notice** this is the documentation for the C++ library. For the Python documentation, see under [kvikio](https://docs.rapids.ai/api/kvikio/nightly/).
---
## Features
* Object Oriented API.
* Exception handling.
* Concurrent reads and writes using an internal thread pool.
* Non-blocking API.
* Handle both host and device IO seamlessly.
## Installation
KvikIO is a header-only library and as such doesn't need installation.
However, for convenience we release Conda packages that makes it easy
to include KvikIO in your CMake projects.
### Conda/Mamba
We strongly recommend using [mamba](https://github.com/mamba-org/mamba) in place of conda, which we will do throughout the documentation.
Install the **stable release** from the ``rapidsai`` channel with the following:
```sh
# Install in existing environment
mamba install -c rapidsai -c conda-forge libkvikio
# Create new environment (CUDA 11.8)
mamba create -n libkvikio-env -c rapidsai -c conda-forge cuda-version=11.8 libkvikio
# Create new environment (CUDA 12.0)
mamba create -n libkvikio-env -c rapidsai -c conda-forge cuda-version=12.0 libkvikio
```
Install the **nightly release** from the ``rapidsai-nightly`` channel with the following:
```sh
# Install in existing environment
mamba install -c rapidsai-nightly -c conda-forge libkvikio
# Create new environment (CUDA 11.8)
mamba create -n libkvikio-env -c rapidsai-nightly -c conda-forge python=3.10 cuda-version=11.8 libkvikio
# Create new environment (CUDA 12.0)
mamba create -n libkvikio-env -c rapidsai-nightly -c conda-forge python=3.10 cuda-version=12.0 libkvikio
```
---
**Notice** if the nightly install doesn't work, set ``channel_priority: flexible`` in your ``.condarc``.
---
### Include KvikIO in a CMake project
An example of how to include KvikIO in an existing CMake project can be found here: <https://github.com/rapidsai/kvikio/blob/HEAD/cpp/examples/downstream/>.
### Build from source
To build the C++ example run:
```
./build.sh libkvikio
```
Then run the example:
```
./examples/basic_io
```
## Runtime Settings
#### Compatibility Mode (KVIKIO_COMPAT_MODE)
When KvikIO is running in compatibility mode, it doesn't load `libcufile.so`. Instead, reads and writes are done using POSIX. Notice, this is not the same as the compatibility mode in cuFile. That is cuFile can run in compatibility mode while KvikIO is not.
Set the environment variable `KVIKIO_COMPAT_MODE` to enable/disable compatibility mode. By default, compatibility mode is enabled:
- when `libcufile.so` cannot be found.
- when running in Windows Subsystem for Linux (WSL).
- when `/run/udev` isn't readable, which typically happens when running inside a docker image not launched with `--volume /run/udev:/run/udev:ro`.
#### Thread Pool (KVIKIO_NTHREADS)
KvikIO can use multiple threads for IO automatically. Set the environment variable `KVIKIO_NTHREADS` to the number of threads in the thread pool. If not set, the default value is 1.
#### Task Size (KVIKIO_TASK_SIZE)
KvikIO splits parallel IO operations into multiple tasks. Set the environment variable `KVIKIO_TASK_SIZE` to the maximum task size (in bytes). If not set, the default value is 4194304 (4 MiB).
#### GDS Threshold (KVIKIO_GDS_THRESHOLD)
In order to improve performance of small IO, `.pread()` and `.pwrite()` implement a shortcut that circumvent the threadpool and use the POSIX backend directly. Set the environment variable `KVIKIO_GDS_THRESHOLD` to the minimum size (in bytes) to use GDS. If not set, the default value is 1048576 (1 MiB).
## Example
```cpp
#include <cstddef>
#include <cuda_runtime.h>
#include <kvikio/file_handle.hpp>
using namespace std;
int main()
{
// Create two arrays `a` and `b`
constexpr std::size_t size = 100;
void *a = nullptr;
void *b = nullptr;
cudaMalloc(&a, size);
cudaMalloc(&b, size);
// Write `a` to file
kvikio::FileHandle fw("test-file", "w");
size_t written = fw.write(a, size);
fw.close();
// Read file into `b`
kvikio::FileHandle fr("test-file", "r");
size_t read = fr.read(b, size);
fr.close();
// Read file into `b` in parallel using 16 threads
kvikio::default_thread_pool::reset(16);
{
kvikio::FileHandle f("test-file", "r");
future<size_t> future = f.pread(b_dev, sizeof(a), 0); // Non-blocking
size_t read = future.get(); // Blocking
// Notice, `f` closes automatically on destruction.
}
}
```
For a full runnable example see <https://github.com/rapidsai/kvikio/blob/HEAD/cpp/examples/basic_io.cpp>.
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/doxygen/Doxyfile
|
# Doxyfile 1.8.18
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "libkvikio"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = 23.12.00
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = NO
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
ALIASES =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
# as Fortran files (default is PHP), and .f files as C (default is Fortran),
# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING = cu=C++ \
cuh=C++
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = NO
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# (including Cygwin) ands Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = main_page.md \
../include
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.cpp \
*.hpp \
*.h \
*.c \
*.cu \
*.cuh
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE = main_page.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = YES
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER = header.html
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 266
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 255
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 52
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: https://developer.apple.com/xcode/), introduced with OSX
# 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the master .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
# the HTML output. These images will generally look nicer at scaled resolutions.
# Possible values are: png The default and svg Looks nicer but requires the
# pdf2svg tool.
# The default value is: png.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FORMULA_FORMAT = png
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# LaTeX style sheets that are included after the standard style sheets created
# by doxygen. Using this option one can overrule certain style aspects. Doxygen
# will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_STYLESHEET =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = YES
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = YES
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = __device__= \
__host__=
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
# the class index. If set to NO, only the inherited external classes will be
# listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = NO
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for plantuml.
PLANTUML_CFG_FILE =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.
PLANTUML_INCLUDE_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/doxygen/header.html
|
<!-- HTML header for doxygen 1.8.20-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
<!-- RAPIDS CUSTOM JS & CSS: START, Please add these two lines back after every version upgrade -->
<script defer src="https://docs.rapids.ai/assets/js/custom.js"></script>
<link rel="stylesheet" href="https://docs.rapids.ai/assets/css/custom.css">
<!-- RAPIDS CUSTOM JS & CSS: END -->
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER--> <span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<td>$searchbox</td>
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/examples/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(TEST_INSTALL_PATH bin/tests/libkvikio)
set(TEST_NAME BASIC_IO_TEST)
add_executable(BASIC_IO_TEST basic_io.cpp)
set_target_properties(BASIC_IO_TEST PROPERTIES INSTALL_RPATH "\$ORIGIN/../../lib")
target_include_directories(BASIC_IO_TEST PRIVATE ../include ${cuFile_INCLUDE_DIRS})
target_link_libraries(BASIC_IO_TEST PRIVATE kvikio CUDA::cudart)
if(CMAKE_COMPILER_IS_GNUCXX)
set(KVIKIO_CXX_FLAGS "-Wall;-Werror;-Wno-unknown-pragmas")
target_compile_options(BASIC_IO_TEST PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${KVIKIO_CXX_FLAGS}>")
endif()
install(
TARGETS ${TEST_NAME}
COMPONENT testing
DESTINATION ${TEST_INSTALL_PATH}
EXCLUDE_FROM_ALL
)
| 0 |
rapidsai_public_repos/kvikio/cpp
|
rapidsai_public_repos/kvikio/cpp/examples/basic_io.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cuda_runtime_api.h>
#include <kvikio/batch.hpp>
#include <kvikio/buffer.hpp>
#include <kvikio/defaults.hpp>
#include <kvikio/driver.hpp>
#include <kvikio/error.hpp>
#include <kvikio/file_handle.hpp>
using namespace std;
void check(bool condition)
{
if (!condition) {
std::cout << "Error" << std::endl;
exit(-1);
}
}
constexpr int NELEM = 1024; // Number of elements used throughout the test
constexpr int SIZE = NELEM * sizeof(int); // Size of the memory allocations (in bytes)
constexpr int LARGE_SIZE = 8 * SIZE; // LARGE SIZE to test partial submit (in bytes)
int main()
{
std::size_t io_size = SIZE;
check(cudaSetDevice(0) == cudaSuccess);
cout << "KvikIO defaults: " << endl;
if (kvikio::defaults::compat_mode()) {
cout << " Compatibility mode: enabled" << endl;
} else {
kvikio::DriverInitializer manual_init_driver;
cout << " Compatibility mode: disabled" << endl;
kvikio::DriverProperties props;
cout << "DriverProperties: " << endl;
cout << " nvfs version: " << props.get_nvfs_major_version() << "."
<< props.get_nvfs_minor_version() << endl;
cout << " Allow compatibility mode: " << std::boolalpha << props.get_nvfs_allow_compat_mode()
<< endl;
cout << " Pool mode - enabled: " << std::boolalpha << props.get_nvfs_poll_mode()
<< ", threshold: " << props.get_nvfs_poll_thresh_size() << " kb" << endl;
cout << " Max pinned memory: " << props.get_max_pinned_memory_size() << " kb" << endl;
cout << " Max batch IO size: " << props.get_max_batch_io_size() << endl;
}
int* a{};
check(cudaHostAlloc((void**)&a, SIZE, cudaHostAllocDefault) == cudaSuccess);
for (int i = 0; i < NELEM; ++i) {
a[i] = i;
}
int* b = (int*)malloc(SIZE);
void* a_dev = nullptr;
void* b_dev = nullptr;
void* c_dev = nullptr;
check(cudaMalloc(&a_dev, SIZE) == cudaSuccess);
check(cudaMalloc(&b_dev, SIZE) == cudaSuccess);
check(cudaMalloc(&c_dev, SIZE) == cudaSuccess);
check(kvikio::is_host_memory(a) == true);
check(kvikio::is_host_memory(b) == true);
check(kvikio::is_host_memory(a_dev) == false);
check(kvikio::is_host_memory(b_dev) == false);
check(kvikio::is_host_memory(c_dev) == false);
{
kvikio::FileHandle f("/tmp/test-file", "w");
check(cudaMemcpy(a_dev, a, SIZE, cudaMemcpyHostToDevice) == cudaSuccess);
size_t written = f.pwrite(a_dev, SIZE, 0, 1).get();
check(written == SIZE);
check(written == f.nbytes());
cout << "Write: " << written << endl;
}
{
kvikio::FileHandle f("/tmp/test-file", "r");
size_t read = f.pread(b_dev, SIZE, 0, 1).get();
check(read == SIZE);
check(read == f.nbytes());
cout << "Read: " << read << endl;
check(cudaMemcpy(b, b_dev, SIZE, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < NELEM; ++i) {
check(a[i] == b[i]);
}
}
kvikio::defaults::thread_pool_nthreads_reset(16);
{
kvikio::FileHandle f("/tmp/test-file", "w");
size_t written = f.pwrite(a_dev, SIZE).get();
check(written == SIZE);
check(written == f.nbytes());
cout << "Parallel write (" << kvikio::defaults::thread_pool_nthreads()
<< " threads): " << written << endl;
}
{
kvikio::FileHandle f("/tmp/test-file", "r");
size_t read = f.pread(b_dev, SIZE, 0).get();
cout << "Parallel write (" << kvikio::defaults::thread_pool_nthreads() << " threads): " << read
<< endl;
check(cudaMemcpy(b, b_dev, SIZE, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < NELEM; ++i) {
check(a[i] == b[i]);
}
}
{
kvikio::FileHandle f("/tmp/test-file", "r+", kvikio::FileHandle::m644);
kvikio::buffer_register(c_dev, SIZE);
size_t read = f.pread(c_dev, SIZE).get();
check(read == SIZE);
check(read == f.nbytes());
kvikio::buffer_deregister(c_dev);
cout << "Read buffer registered data: " << read << endl;
}
{
kvikio::FileHandle f("/tmp/test-file", "w");
size_t written = f.pwrite(a, SIZE).get();
check(written == SIZE);
check(written == f.nbytes());
cout << "Parallel POSIX write (" << kvikio::defaults::thread_pool_nthreads()
<< " threads): " << written << endl;
}
{
kvikio::FileHandle f("/tmp/test-file", "r");
size_t read = f.pread(b, SIZE).get();
check(read == SIZE);
check(read == f.nbytes());
for (int i = 0; i < NELEM; ++i) {
check(a[i] == b[i]);
}
cout << "Parallel POSIX read (" << kvikio::defaults::thread_pool_nthreads()
<< " threads): " << read << endl;
}
if (kvikio::is_batch_and_stream_available() && !kvikio::defaults::compat_mode()) {
// Here we use the batch API to read "/tmp/test-file" into `b_dev` by
// submitting 4 batch operations.
constexpr int num_ops_in_batch = 4;
constexpr int batchsize = SIZE / num_ops_in_batch;
kvikio::DriverProperties props;
check(num_ops_in_batch < props.get_max_batch_io_size());
{
// We open the file as usual.
kvikio::FileHandle f("/tmp/test-file", "r");
// Then we create a batch
auto batch = kvikio::BatchHandle(num_ops_in_batch);
// And submit 4 operations each with its own offset
std::vector<kvikio::BatchOp> ops;
for (int i = 0; i < num_ops_in_batch; ++i) {
ops.push_back(kvikio::BatchOp{.file_handle = f,
.devPtr_base = b_dev,
.file_offset = i * batchsize,
.devPtr_offset = i * batchsize,
.size = batchsize,
.opcode = CUFILE_READ});
}
batch.submit(ops);
// Finally, we wait on all 4 operations to be finished and check the result
auto statuses = batch.status(num_ops_in_batch, num_ops_in_batch);
check(statuses.size() == num_ops_in_batch);
size_t total_read = 0;
for (auto status : statuses) {
check(status.status == CUFILE_COMPLETE);
check(status.ret == batchsize);
total_read += status.ret;
}
check(cudaMemcpy(b, b_dev, SIZE, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < NELEM; ++i) {
check(a[i] == b[i]);
}
cout << "Batch read using 4 operations: " << total_read << endl;
batch.submit(ops);
batch.cancel();
statuses = batch.status(num_ops_in_batch, num_ops_in_batch);
check(statuses.empty());
cout << "Batch canceling of all 4 operations" << endl;
}
} else {
cout << "The batch API isn't available, requires CUDA 12.2+" << endl;
}
{
cout << "Performing async I/O using by-reference arguments" << endl;
off_t f_off{0};
off_t d_off{0};
// Notice, we have to allocate the `bytes_done_p` argument on the heap and set it to 0.
ssize_t* bytes_done_p{};
check(cudaHostAlloc((void**)&bytes_done_p, SIZE, cudaHostAllocDefault) == cudaSuccess);
*bytes_done_p = 0;
// Let's create a new stream and submit an async write
CUstream stream{};
check(cudaStreamCreate(&stream) == cudaSuccess);
kvikio::FileHandle f_handle("/tmp/test-file", "w+");
check(cudaMemcpyAsync(a_dev, a, SIZE, cudaMemcpyHostToDevice, stream) == cudaSuccess);
f_handle.write_async(a_dev, &io_size, &f_off, &d_off, bytes_done_p, stream);
// After synchronizing `stream`, we can read the number of bytes written
check(cudaStreamSynchronize(stream) == cudaSuccess);
// Note, `*bytes_done_p` might be negative, which indicate an IO error thus we
// use `CUFILE_CHECK_STREAM_IO` to check for errors.
CUFILE_CHECK_STREAM_IO(bytes_done_p);
check(*bytes_done_p == SIZE);
cout << "File async write: " << *bytes_done_p << endl;
// Let's async read the data back into device memory
*bytes_done_p = 0;
f_handle.read_async(c_dev, &io_size, &f_off, &d_off, bytes_done_p, stream);
check(cudaStreamSynchronize(stream) == cudaSuccess);
CUFILE_CHECK_STREAM_IO(bytes_done_p);
check(*bytes_done_p == SIZE);
cout << "File async read: " << *bytes_done_p << endl;
check(cudaFreeHost((void*)bytes_done_p) == cudaSuccess);
}
{
cout << "Performing async I/O using by-value arguments" << endl;
// Let's create a new stream and submit an async write
CUstream stream{};
check(cudaStreamCreate(&stream) == cudaSuccess);
kvikio::FileHandle f_handle("/tmp/test-file", "w+");
check(cudaMemcpyAsync(a_dev, a, SIZE, cudaMemcpyHostToDevice, stream) == cudaSuccess);
// Notice, we get a handle `res`, which will synchronize the CUDA stream on destruction
kvikio::StreamFuture res = f_handle.write_async(a_dev, SIZE, 0, 0, stream);
// But we can also trigger the synchronization and get the bytes written by calling
// `check_bytes_done()`.
check(res.check_bytes_done() == SIZE);
cout << "File async write: " << res.check_bytes_done() << endl;
// Let's async read the data back into device memory
res = f_handle.read_async(c_dev, SIZE, 0, 0, stream);
check(res.check_bytes_done() == SIZE);
cout << "File async read: " << res.check_bytes_done() << endl;
}
}
| 0 |
rapidsai_public_repos/kvikio/cpp/examples
|
rapidsai_public_repos/kvikio/cpp/examples/downstream/downstream_example.cpp
|
#include <iostream>
#include <kvikio/defaults.hpp>
#include <kvikio/driver.hpp>
using namespace std;
void check(bool condition)
{
if (!condition) {
std::cout << "Error" << std::endl;
exit(-1);
}
}
int main()
{
cout << "KvikIO defaults: " << endl;
if (kvikio::defaults::compat_mode()) {
cout << " Compatibility mode: enabled" << endl;
} else {
kvikio::DriverInitializer manual_init_driver;
cout << " Compatibility mode: disabled" << endl;
kvikio::DriverProperties props;
cout << "DriverProperties: " << endl;
cout << " Version: " << props.get_nvfs_major_version() << "." << props.get_nvfs_minor_version()
<< endl;
cout << " Allow compatibility mode: " << std::boolalpha << props.get_nvfs_allow_compat_mode()
<< endl;
cout << " Pool mode - enabled: " << std::boolalpha << props.get_nvfs_poll_mode()
<< ", threshold: " << props.get_nvfs_poll_thresh_size() << " kb" << endl;
cout << " Max pinned memory: " << props.get_max_pinned_memory_size() << " kb" << endl;
}
}
| 0 |
rapidsai_public_repos/kvikio/cpp/examples
|
rapidsai_public_repos/kvikio/cpp/examples/downstream/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
project(
KvikIODownstreamExample
VERSION 1.00
LANGUAGES CXX
)
# Get CPM see <https://github.com/cpm-cmake/CPM.cmake>
include(cmake/get_cpm.cmake)
# Get KvikIO see <https://github.com/rapidsai/kvikio>
include(cmake/get_kvikio.cmake)
add_executable(downstream_example downstream_example.cpp)
# Notice, even though KvikIO is a header-only library, we link to it here. Linking to
# `kvikio::kvikio` makes CMake include the headers of KvikIO when building.
target_link_libraries(downstream_example PRIVATE kvikio::kvikio)
| 0 |
rapidsai_public_repos/kvikio/cpp/examples/downstream
|
rapidsai_public_repos/kvikio/cpp/examples/downstream/cmake/get_cpm.cmake
|
set(CPM_DOWNLOAD_VERSION 0.35.5)
if(CPM_SOURCE_CACHE)
# Expand relative path. This is important if the provided path contains a tilde (~)
get_filename_component(CPM_SOURCE_CACHE ${CPM_SOURCE_CACHE} ABSOLUTE)
set(CPM_DOWNLOAD_LOCATION "${CPM_SOURCE_CACHE}/cpm/CPM_${CPM_DOWNLOAD_VERSION}.cmake")
elseif(DEFINED ENV{CPM_SOURCE_CACHE})
set(CPM_DOWNLOAD_LOCATION "$ENV{CPM_SOURCE_CACHE}/cpm/CPM_${CPM_DOWNLOAD_VERSION}.cmake")
else()
set(CPM_DOWNLOAD_LOCATION "${CMAKE_BINARY_DIR}/cmake/CPM_${CPM_DOWNLOAD_VERSION}.cmake")
endif()
if(NOT (EXISTS ${CPM_DOWNLOAD_LOCATION}))
message(STATUS "Downloading CPM.cmake to ${CPM_DOWNLOAD_LOCATION}")
file(
DOWNLOAD
https://github.com/cpm-cmake/CPM.cmake/releases/download/v${CPM_DOWNLOAD_VERSION}/CPM.cmake
${CPM_DOWNLOAD_LOCATION}
)
endif()
include(${CPM_DOWNLOAD_LOCATION})
| 0 |
rapidsai_public_repos/kvikio/cpp/examples/downstream
|
rapidsai_public_repos/kvikio/cpp/examples/downstream/cmake/get_kvikio.cmake
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Use CPM to fetch KvikIO, which makes `kvikio::kvikio` available for `target_link_libraries`
function(find_and_configure_kvikio MIN_VERSION)
CPMFindPackage(
NAME KvikIO
VERSION ${MIN_VERSION}
GIT_REPOSITORY
https://github.com/rapidsai/kvikio.git
GIT_TAG branch-${MIN_VERSION}
GIT_SHALLOW
TRUE
SOURCE_SUBDIR
cpp
OPTIONS "KvikIO_BUILD_EXAMPLES OFF"
)
endfunction()
find_and_configure_kvikio("22.10")
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/legate/pyproject.toml
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"cmake>=3.26.4",
"cython>=3.0.0",
"ninja",
"scikit-build>=0.13.1",
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "legate_kvikio"
version = "23.12.00"
description = "KvikIO - GPUDirect Storage"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
"cupy-cuda11x>=12.0.0",
"numcodecs <0.12.0",
"numpy>=1.21",
"packaging",
"zarr",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.optional-dependencies]
test = [
"cuda-python>=11.7.1,<12.0a0",
"dask>=2022.05.2",
"distributed>=2022.05.2",
"pytest",
"pytest-cov",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/kvikio"
[tool.black]
line-length = 88
target-version = ["py39"]
include = '\.py?$'
exclude = '''
/(
thirdparty |
\.eggs |
\.git |
\.hg |
\.mypy_cache |
\.tox |
\.venv |
_build |
buck-out |
build |
dist |
_skbuild
)/
'''
[tool.isort]
line_length = 88
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_first_party = [
"kvikio",
"legate_kvikio",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
"thirdparty",
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"_build",
"buck-out",
"build",
"dist",
"__init__.py",
]
[tool.mypy]
ignore_missing_imports = true
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/legate/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
project(
LegateKvikIO
VERSION 23.04.00
LANGUAGES C CXX
)
# This is for convenience only when doing editable builds to avoid setting the flag
if(NOT LegateKvikIO_ROOT)
set(LegateKvikIO_ROOT ${CMAKE_SOURCE_DIR}/build)
endif()
set(BUILD_SHARED_LIBS ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# We always want to build our bindings, so we trick the legate wrappers to never search for an
# installed version which is currently deduced by seeing if it is invoked via scikit-build. See
# <https://github.com/nv-legate/legate.core/pull/645>
find_package(legate_core REQUIRED)
set(SKBUILD OFF)
legate_add_cpp_subdirectory(cpp TARGET legate_kvikio EXPORT legate_kvikio-export)
legate_default_python_install(legate_kvikio EXPORT legate_kvikio-export)
# Generates `install_info.py`
legate_add_cffi(${CMAKE_SOURCE_DIR}/cpp/task_opcodes.hpp TARGET legate_kvikio)
| 0 |
rapidsai_public_repos/kvikio
|
rapidsai_public_repos/kvikio/legate/setup.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import os
from pathlib import Path
from setuptools import find_packages
from skbuild import setup
import legate.install_info as lg_install_info
legate_dir = Path(lg_install_info.libpath).parent.as_posix()
cmake_flags = [
f"-Dlegate_core_ROOT:STRING={legate_dir}",
]
os.environ["SKBUILD_CONFIGURE_OPTIONS"] = " ".join(cmake_flags)
setup(
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
zip_safe=False,
)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/tests/test_kerchunk.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import math
import numpy as np
import pytest
from numpy.testing import assert_array_equal
hdf5_read = pytest.importorskip("legate_kvikio.kerchunk").hdf5_read
num = pytest.importorskip("cunumeric")
shape_chunks = (
"shape,chunks",
[
((2,), (2,)),
((5,), (2,)),
((4, 2), (2, 2)),
((2, 4), (2, 2)),
((2, 3), (2, 2)),
((5, 4, 3, 2), (2, 2, 2, 2)),
],
)
@pytest.mark.parametrize(*shape_chunks)
@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"])
def test_hdf5_read_array(tmp_path, shape, chunks, dtype):
h5py = pytest.importorskip("h5py")
filename = tmp_path / "test-file.hdf5"
a = np.arange(math.prod(shape), dtype=dtype).reshape(shape)
with h5py.File(filename, "w") as f:
f.create_dataset("mydataset", chunks=chunks, data=a)
b = hdf5_read(filename, dataset_name="mydataset")
assert_array_equal(a, b)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/tests/test_basic_io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import math
import pytest
from legate.core import get_legate_runtime
from legate_kvikio import CuFile
read_tiles = pytest.importorskip("legate_kvikio.tile").read_tiles
write_tiles = pytest.importorskip("legate_kvikio.tile").write_tiles
num = pytest.importorskip("cunumeric")
def fence(*, block: bool):
"""Shorthand for a Legate fence"""
get_legate_runtime().issue_execution_fence(block=block)
@pytest.mark.parametrize("size", [1, 10, 100, 1000, 1024, 4096, 4096 * 10])
def test_read_write(tmp_path, size):
"""Test basic read/write"""
filename = tmp_path / "test-file"
a = num.arange(size)
f = CuFile(filename, "w")
f.write(a)
assert not f.closed
fence(block=True)
# Try to read file opened in write-only mode
with pytest.raises(ValueError, match="Cannot read a file opened with flags"):
f.read(a)
# Close file
f.close()
assert f.closed
# Read file into a new array and compare
b = num.empty_like(a)
f = CuFile(filename, "r")
f.read(b)
assert all(a == b)
def test_file_handle_context(tmp_path):
"""Open a CuFile in a context"""
filename = tmp_path / "test-file"
a = num.arange(200)
b = num.empty_like(a)
with CuFile(filename, "w+") as f:
assert not f.closed
f.write(a)
fence(block=False)
f.read(b)
assert all(a == b)
assert f.closed
@pytest.mark.parametrize(
"start,end",
[
(0, 10),
(1, 10),
(0, 10 * 4096),
(1, int(1.3 * 4096)),
(int(2.1 * 4096), int(5.6 * 4096)),
],
)
def test_read_write_slices(tmp_path, start, end):
"""Read and write different slices"""
filename = tmp_path / "test-file"
a = num.arange(10 * 4096) # 10 page-sizes
b = a.copy()
a[start:end] = 42
with CuFile(filename, "w") as f:
f.write(a[start:end])
fence(block=True)
with CuFile(filename, "r") as f:
f.read(b[start:end])
assert all(a == b)
@pytest.mark.parametrize(
"shape,tile_shape", [((2,), (3,)), ((2, 2), (3, 2)), ((2, 3), (2, 2))]
)
def test_read_write_tiles_error(tmp_path, shape, tile_shape):
with pytest.raises(ValueError, match="must be divisible"):
write_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape)
with pytest.raises(ValueError, match="must be divisible"):
read_tiles(ary=num.ones(shape), dirpath=tmp_path, tile_shape=tile_shape)
@pytest.mark.parametrize(
"shape,tile_shape,tile_start",
[
((2,), (2,), (1,)),
((4,), (2,), (0,)),
((4, 2), (2, 2), (1, 2)),
((2, 4), (2, 2), (2, 1)),
],
)
def test_read_write_tiles(tmp_path, shape, tile_shape, tile_start):
a = num.arange(math.prod(shape)).reshape(shape)
write_tiles(ary=a, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start)
fence(block=True)
b = num.empty_like(a)
read_tiles(ary=b, dirpath=tmp_path, tile_shape=tile_shape, tile_start=tile_start)
assert (a == b).all()
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/tests/test_zarr.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import math
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from legate.core import get_legate_runtime
read_array = pytest.importorskip("legate_kvikio.zarr").read_array
write_array = pytest.importorskip("legate_kvikio.zarr").write_array
num = pytest.importorskip("cunumeric")
zarr = pytest.importorskip("zarr")
shape_chunks = (
"shape,chunks",
[
((2,), (2,)),
((5,), (2,)),
((4, 2), (2, 2)),
((2, 4), (2, 2)),
((2, 3), (3, 2)),
((4, 3, 2, 1), (1, 2, 3, 4)),
],
)
@pytest.mark.parametrize(*shape_chunks)
@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"])
def test_write_array(tmp_path, shape, chunks, dtype):
"""Test write of a Zarr array"""
a = num.arange(math.prod(shape), dtype=dtype).reshape(shape)
write_array(ary=a, dirpath=tmp_path, chunks=chunks)
get_legate_runtime().issue_execution_fence(block=True)
b = zarr.open_array(tmp_path, mode="r")
assert_array_equal(a, b)
@pytest.mark.parametrize(*shape_chunks)
@pytest.mark.parametrize("dtype", ["u1", "u8", "f8"])
def test_read_array(tmp_path, shape, chunks, dtype):
"""Test read of a Zarr array"""
a = np.arange(math.prod(shape), dtype=dtype).reshape(shape)
zarr.open_array(tmp_path, mode="w", shape=shape, chunks=chunks, compressor=None)[
...
] = a
b = read_array(dirpath=tmp_path)
assert_array_equal(a, b)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/benchmarks/hdf5_read.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import argparse
import contextlib
import pathlib
import tempfile
from time import perf_counter as clock
from typing import ContextManager
import h5py
import numpy as np
DATASET = "dataset-hdf5-read"
def try_open_hdf5_array(filepath, shape, chunks, dtype):
try:
with h5py.File(filepath, "r") as f:
a = f[DATASET]
if a.shape == shape and a.chunks == chunks and a.dtype == dtype:
return a
except BaseException:
pass
return None
def create_hdf5_array(filepath: pathlib.Path, shape, chunks, dtype=np.float64) -> None:
ret = try_open_hdf5_array(filepath, shape, chunks, dtype)
if ret is None:
filepath.unlink(missing_ok=True)
# Write array using h5py
with h5py.File(filepath, "w") as f:
f.create_dataset(DATASET, chunks=chunks, data=np.random.random(shape))
print(f"HDF5 '{filepath}': shape: {shape}, " f"chunks: {chunks}, dtype: {dtype}")
@contextlib.contextmanager
def dask_h5py(args):
import cupy
import h5py
from dask import array as da
from dask_cuda import LocalCUDACluster
from distributed import Client
def f():
t0 = clock()
with h5py.File(args.dir / "A", "r") as af:
with h5py.File(args.dir / "B", "r") as bf:
a = da.from_array(af[DATASET], chunks=af[DATASET].chunks)
b = da.from_array(bf[DATASET], chunks=bf[DATASET].chunks)
a = a.map_blocks(cupy.asarray)
b = b.map_blocks(cupy.asarray)
c = args.op(da, a, b)
int(c.sum().compute())
t1 = clock()
return t1 - t0
with LocalCUDACluster(n_workers=args.n_workers) as cluster:
with Client(cluster):
yield f
@contextlib.contextmanager
def run_legate(args):
import cunumeric as num
from legate.core import get_legate_runtime
from legate_kvikio.kerchunk import hdf5_read
def f():
get_legate_runtime().issue_execution_fence(block=True)
t0 = clock()
a = hdf5_read(args.dir / "A", dataset_name=DATASET)
b = hdf5_read(args.dir / "B", dataset_name=DATASET)
c = args.op(num, a, b)
int(c.sum())
t1 = clock()
return t1 - t0
yield f
API = {
"dask-h5py": dask_h5py,
"legate": run_legate,
}
OP = {"add": lambda xp, a, b: a + b, "matmul": lambda xp, a, b: xp.matmul(a, b)}
def main(args):
create_hdf5_array(args.dir / "A", chunks=(args.c, args.c), shape=(args.m, args.m))
create_hdf5_array(args.dir / "B", chunks=(args.c, args.c), shape=(args.m, args.m))
timings = []
with API[args.api](args) as f:
for _ in range(args.n_warmup_runs):
elapsed = f()
print("elapsed[warmup]: ", elapsed)
for i in range(args.nruns):
elapsed = f()
print(f"elapsed[run #{i}]: ", elapsed)
timings.append(elapsed)
print(f"elapsed mean: {np.mean(timings):.5}s (std: {np.std(timings):.5}s)")
if __name__ == "__main__":
def parse_directory(x):
if x is None:
return x
else:
p = pathlib.Path(x)
if not p.is_dir():
raise argparse.ArgumentTypeError("Must be a directory")
return p
parser = argparse.ArgumentParser(description="Matrix operation on two Zarr files")
parser.add_argument(
"-m",
default=100,
type=int,
help="Dimension of the two square input matrices (MxM) (default: %(default)s).",
)
parser.add_argument(
"-c",
default=None,
type=int,
help="Dimension of the square chunks (CxC) (default: M//10).",
)
parser.add_argument(
"-d",
"--dir",
metavar="PATH",
default=None,
type=parse_directory,
help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)",
)
parser.add_argument(
"--nruns",
metavar="RUNS",
default=1,
type=int,
help="Number of runs (default: %(default)s).",
)
parser.add_argument(
"--api",
metavar="API",
default=tuple(API.keys())[0],
choices=tuple(API.keys()),
help="API to use {%(choices)s}",
)
parser.add_argument(
"--n-workers",
default=1,
type=int,
help="Number of workers (default: %(default)s).",
)
parser.add_argument(
"--op",
metavar="OP",
default=tuple(OP.keys())[0],
choices=tuple(OP.keys()),
help="Operation to run {%(choices)s}",
)
parser.add_argument(
"--n-warmup-runs",
default=1,
type=int,
help="Number of warmup runs (default: %(default)s).",
)
args = parser.parse_args()
args.op = OP[args.op] # Parse the operation argument
if args.c is None:
args.c = args.m // 10
# Create a temporary directory if user didn't specify a directory
temp_dir: tempfile.TemporaryDirectory | ContextManager
if args.dir is None:
temp_dir = tempfile.TemporaryDirectory()
args.dir = pathlib.Path(temp_dir.name)
else:
temp_dir = contextlib.nullcontext()
with temp_dir:
main(args)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/benchmarks/single-node-io.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import argparse
import contextlib
import os
import os.path
import pathlib
import statistics
import tempfile
from time import perf_counter as clock
from typing import ContextManager, Union
import cunumeric as num
from dask.utils import format_bytes, parse_bytes
import kvikio
import kvikio.defaults
import legate.core
from legate_kvikio import CuFile
runtime = legate.core.get_legate_runtime()
def run_cufile(args):
"""Single file and array"""
file_path = args.dir / "kvikio-single-file"
src = num.arange(args.nbytes, dtype="uint8")
dst = num.empty_like(src)
runtime.issue_execution_fence(block=True)
# Write
f = CuFile(file_path, flags="w")
t0 = clock()
f.write(src)
f.close()
runtime.issue_execution_fence(block=True)
write_time = clock() - t0
# Read
f = CuFile(file_path, flags="r")
t0 = clock()
f.read(dst)
f.close()
runtime.issue_execution_fence(block=True)
read_time = clock() - t0
assert (src == dst).all()
return read_time, write_time
API = {
"cufile": run_cufile,
}
def main(args):
props = kvikio.DriverProperties()
try:
import pynvml.smi
nvsmi = pynvml.smi.nvidia_smi.getInstance()
except ImportError:
gpu_name = "Unknown (install pynvml)"
mem_total = gpu_name
bar1_total = gpu_name
else:
info = nvsmi.DeviceQuery()["gpu"][0]
gpu_name = f"{info['product_name']} (dev #0)"
mem_total = format_bytes(
parse_bytes(
str(info["fb_memory_usage"]["total"]) + info["fb_memory_usage"]["unit"]
)
)
bar1_total = format_bytes(
parse_bytes(
str(info["bar1_memory_usage"]["total"])
+ info["bar1_memory_usage"]["unit"]
)
)
gds_version = "N/A (Compatibility Mode)"
if props.is_gds_available:
gds_version = f"v{props.major_version}.{props.minor_version}"
gds_config_json_path = os.path.realpath(
os.getenv("CUFILE_ENV_PATH_JSON", "/etc/cufile.json")
)
print("Roundtrip benchmark")
print("----------------------------------")
if kvikio.defaults.compat_mode():
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - KvikIO compat mode ")
print(" libcufile.so not used ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
elif not props.is_gds_available:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(" WARNING - cuFile compat mode ")
print(" GDS not enabled ")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(f"GPU | {gpu_name}")
print(f"GPU Memory Total | {mem_total}")
print(f"BAR1 Memory Total | {bar1_total}")
print(f"GDS driver | {gds_version}")
print(f"GDS config.json | {gds_config_json_path}")
print("----------------------------------")
print(f"nbytes | {args.nbytes} bytes ({format_bytes(args.nbytes)})")
print(f"4K aligned | {args.nbytes % 4096 == 0}")
print(f"directory | {args.dir}")
print(f"nthreads | {args.nthreads}")
print(f"nruns | {args.nruns}")
print(f"#CPUs | {runtime.num_cpus}")
print(f"#GPUs | {runtime.num_gpus}")
print("==================================")
# Run each benchmark using the requested APIs
for api in args.api:
rs = []
ws = []
for _ in range(args.nruns):
read, write = API[api](args)
rs.append(args.nbytes / read)
ws.append(args.nbytes / write)
def pprint_api_res(name, samples):
mean = statistics.mean(samples) if len(samples) > 1 else samples[0]
ret = f"{api} {name}".ljust(18)
ret += f"| {format_bytes(mean).rjust(10)}/s".ljust(14)
if len(samples) > 1:
stdev = statistics.stdev(samples) / mean * 100
ret += " ± %5.2f %%" % stdev
ret += " ("
for sample in samples:
ret += f"{format_bytes(sample)}/s, "
ret = ret[:-2] + ")" # Replace trailing comma
return ret
print(pprint_api_res("read", rs))
print(pprint_api_res("write", ws))
if __name__ == "__main__":
def parse_directory(x):
if x is None:
return x
else:
p = pathlib.Path(x)
if not p.is_dir():
raise argparse.ArgumentTypeError("Must be a directory")
return p
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
parser.add_argument(
"-n",
"--nbytes",
metavar="BYTES",
default="10 MiB",
type=parse_bytes,
help="Message size, which must be a multiple of 8 (default: %(default)s).",
)
parser.add_argument(
"-d",
"--dir",
metavar="PATH",
default=None,
type=parse_directory,
help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)",
)
parser.add_argument(
"--nruns",
metavar="RUNS",
default=1,
type=int,
help="Number of runs per API (default: %(default)s).",
)
parser.add_argument(
"-t",
"--nthreads",
metavar="THREADS",
default=1,
type=int,
help="Number of threads to use (default: %(default)s).",
)
parser.add_argument(
"--api",
metavar="API",
default=("cufile",),
nargs="+",
choices=tuple(API.keys()) + ("all",),
help="List of APIs to use {%(choices)s}",
)
args = parser.parse_args()
if "all" in args.api:
args.api = tuple(API.keys())
# Create a temporary directory if user didn't specify a directory
temp_dir: Union[tempfile.TemporaryDirectory, ContextManager]
if args.dir is None:
temp_dir = tempfile.TemporaryDirectory()
args.dir = pathlib.Path(temp_dir.name)
else:
temp_dir = contextlib.nullcontext()
with temp_dir:
main(args)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/benchmarks/zarr_read.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import argparse
import contextlib
import functools
import pathlib
import tempfile
from time import perf_counter as clock
from typing import ContextManager
import numpy as np
import zarr
from zarr.errors import ArrayNotFoundError
from kvikio.zarr import GDSStore
def try_open_zarr_array(dirpath, shape, chunks, dtype):
try:
a = zarr.open_array(dirpath, mode="r")
chunks = chunks or a.chunks
if a.shape == shape and a.chunks == chunks and a.dtype == dtype:
return a
except ArrayNotFoundError:
pass
return None
def create_zarr_array(dirpath, shape, chunks=None, dtype=np.float64) -> None:
ret = try_open_zarr_array(dirpath, shape, chunks, dtype)
if ret is None:
ret = zarr.open_array(
dirpath,
shape=shape,
dtype=dtype,
mode="w",
chunks=chunks,
compressor=None,
)
ret[:] = np.random.random(shape)
print(
f"Zarr '{ret.store.path}': shape: {ret.shape}, "
f"chunks: {ret.chunks}, dtype: {ret.dtype}"
)
@contextlib.contextmanager
def run_dask(args, *, use_cupy):
from dask import array as da
from dask_cuda import LocalCUDACluster
from distributed import Client
def f():
t0 = clock()
if use_cupy:
import cupy
az = zarr.open_array(GDSStore(args.dir / "A"), meta_array=cupy.empty(()))
bz = zarr.open_array(GDSStore(args.dir / "B"), meta_array=cupy.empty(()))
else:
az = args.dir / "A"
bz = args.dir / "B"
a = da.from_zarr(az)
b = da.from_zarr(bz)
c = args.op(da, a, b)
int(c.sum().compute())
t1 = clock()
return t1 - t0
with LocalCUDACluster(n_workers=args.n_workers) as cluster:
with Client(cluster):
yield f
@contextlib.contextmanager
def run_legate(args):
import cunumeric as num
from legate.core import get_legate_runtime
from legate_kvikio.zarr import read_array
def f():
get_legate_runtime().issue_execution_fence(block=True)
t0 = clock()
a = read_array(args.dir / "A")
b = read_array(args.dir / "B")
c = args.op(num, a, b)
int(c.sum())
t1 = clock()
return t1 - t0
yield f
API = {
"dask-cpu": functools.partial(run_dask, use_cupy=False),
"dask-gpu": functools.partial(run_dask, use_cupy=True),
"legate": run_legate,
}
OP = {"add": lambda xp, a, b: a + b, "matmul": lambda xp, a, b: xp.matmul(a, b)}
def main(args):
create_zarr_array(args.dir / "A", shape=(args.m, args.m))
create_zarr_array(args.dir / "B", shape=(args.m, args.m))
timings = []
with API[args.api](args) as f:
for _ in range(args.n_warmup_runs):
elapsed = f()
print("elapsed[warmup]: ", elapsed)
for i in range(args.nruns):
elapsed = f()
print(f"elapsed[run #{i}]: ", elapsed)
timings.append(elapsed)
print(f"elapsed mean: {np.mean(timings):.5}s (std: {np.std(timings):.5}s)")
if __name__ == "__main__":
def parse_directory(x):
if x is None:
return x
else:
p = pathlib.Path(x)
if not p.is_dir():
raise argparse.ArgumentTypeError("Must be a directory")
return p
parser = argparse.ArgumentParser(description="Matrix operation on two Zarr files")
parser.add_argument(
"-m",
default=100,
type=int,
help="Dimension of the two squired input matrix (MxM) (default: %(default)s).",
)
parser.add_argument(
"-d",
"--dir",
metavar="PATH",
default=None,
type=parse_directory,
help="Path to the directory to r/w from (default: tempfile.TemporaryDirectory)",
)
parser.add_argument(
"--nruns",
metavar="RUNS",
default=1,
type=int,
help="Number of runs (default: %(default)s).",
)
parser.add_argument(
"--api",
metavar="API",
default=tuple(API.keys())[0],
choices=tuple(API.keys()),
help="API to use {%(choices)s}",
)
parser.add_argument(
"--n-workers",
default=1,
type=int,
help="Number of workers (default: %(default)s).",
)
parser.add_argument(
"--op",
metavar="OP",
default=tuple(OP.keys())[0],
choices=tuple(OP.keys()),
help="Operation to run {%(choices)s}",
)
parser.add_argument(
"--n-warmup-runs",
default=1,
type=int,
help="Number of warmup runs (default: %(default)s).",
)
args = parser.parse_args()
args.op = OP[args.op] # Parse the operation argument
# Create a temporary directory if user didn't specify a directory
temp_dir: tempfile.TemporaryDirectory | ContextManager
if args.dir is None:
temp_dir = tempfile.TemporaryDirectory()
args.dir = pathlib.Path(temp_dir.name)
else:
temp_dir = contextlib.nullcontext()
with temp_dir:
main(args)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/task_opcodes.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
enum TaskOpCode {
OP_WRITE,
OP_READ,
OP_TILE_WRITE,
OP_TILE_READ,
OP_TILE_READ_BY_OFFSETS,
OP_NUM_TASK_IDS, // must be last
};
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
find_package(CUDAToolkit REQUIRED)
file(GLOB SOURCES "*.cpp" "*.hpp")
add_library(legate_kvikio ${SOURCES})
target_include_directories(
legate_kvikio
PRIVATE $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}> ${CMAKE_SOURCE_DIR}/../cpp/include
INTERFACE $<INSTALL_INTERFACE:include>
)
target_link_libraries(legate_kvikio PRIVATE legate::core CUDA::cudart)
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/legate_mapping.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "legate.h"
namespace legate_kvikio {
struct Registry {
static legate::TaskRegistrar& get_registrar();
};
template <typename T, int ID>
struct Task : public legate::LegateTask<T> {
using Registrar = Registry;
static constexpr int TASK_ID = ID;
};
} // namespace legate_kvikio
| 0 |
rapidsai_public_repos/kvikio/legate
|
rapidsai_public_repos/kvikio/legate/cpp/legate_mapping.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "core/mapping/mapping.h"
#include "legate_mapping.hpp"
#include "task_opcodes.hpp"
namespace legate_kvikio {
class Mapper : public legate::mapping::Mapper {
public:
Mapper() {}
Mapper(const Mapper& rhs) = delete;
Mapper& operator=(const Mapper& rhs) = delete;
// Legate mapping functions
void set_machine(const legate::mapping::MachineQueryInterface* machine) override
{
machine_ = machine;
}
legate::mapping::TaskTarget task_target(
const legate::mapping::Task& task,
const std::vector<legate::mapping::TaskTarget>& options) override
{
return *options.begin(); // Choose first priority
}
std::vector<legate::mapping::StoreMapping> store_mappings(
const legate::mapping::Task& task,
const std::vector<legate::mapping::StoreTarget>& options) override
{
using legate::mapping::StoreMapping;
std::vector<StoreMapping> mappings;
const auto& inputs = task.inputs();
const auto& outputs = task.outputs();
for (const auto& input : inputs) {
mappings.push_back(StoreMapping::default_mapping(input, options.front()));
mappings.back().policy.exact = true;
}
for (const auto& output : outputs) {
mappings.push_back(StoreMapping::default_mapping(output, options.front()));
mappings.back().policy.exact = true;
}
return std::move(mappings);
}
legate::Scalar tunable_value(legate::TunableID tunable_id) override { return 0; }
private:
const legate::mapping::MachineQueryInterface* machine_;
};
static const char* const library_name = "legate_kvikio";
Legion::Logger log_legate_kvikio(library_name);
/*static*/ legate::TaskRegistrar& Registry::get_registrar()
{
static legate::TaskRegistrar registrar;
return registrar;
}
void registration_callback()
{
legate::ResourceConfig config = {.max_tasks = OP_NUM_TASK_IDS};
auto context = legate::Runtime::get_runtime()->create_library(
library_name, config, std::make_unique<Mapper>());
Registry::get_registrar().register_all_tasks(context);
}
} // namespace legate_kvikio
extern "C" {
void legate_kvikio_perform_registration(void)
{
// Tell the runtime about our registration callback so we hook it
// in before the runtime starts and make it global so that we know
// that this call back is invoked everywhere across all nodes
legate::Core::perform_registration<legate_kvikio::registration_callback>();
}
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.