repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/node/modules/rmm/test
|
rapidsai_public_repos/node/modules/rmm/test/memory_resource/memory-resource-tests.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {expect} from '@jest/globals';
import {Uint8Buffer} from '@rapidsai/cuda';
import {DeviceBuffer} from '@rapidsai/rmm';
import {sizes} from '../utils';
import {memoryResourceTestConfigs} from './utils';
describe.each(memoryResourceTestConfigs)(`%s`, (_, testConfig) => {
// eslint-disable-next-line @typescript-eslint/unbound-method
const {comparable, supportsStreams, supportsGetMemInfo, createMemoryResource} = testConfig;
test(`MemoryResource Constructor`, () => {
let mr = createMemoryResource();
expect(mr.supportsStreams).toEqual(supportsStreams);
expect(mr.supportsGetMemInfo).toEqual(supportsGetMemInfo);
mr = <any>null;
});
test(`MemoryResource.prototype.getMemInfo`, () => {
let mr = createMemoryResource();
const memoryInfo = mr.getMemInfo();
expect(Array.isArray(memoryInfo)).toBe(true);
expect(memoryInfo.length).toBe(2);
memoryInfo.forEach((v) => expect(typeof v).toBe('number'));
mr = <any>null;
});
test(`MemoryResource.prototype.isEqual`, () => {
let mr1 = createMemoryResource();
let mr2 = createMemoryResource();
expect(mr1.isEqual(mr1)).toEqual(true);
expect(mr1.isEqual(mr2)).toEqual(comparable);
expect(mr2.isEqual(mr1)).toEqual(comparable);
expect(mr2.isEqual(mr2)).toEqual(true);
mr1 = <any>null;
mr2 = <any>null;
});
test(`works with DeviceBuffer`, () => {
let mr = createMemoryResource();
const [freeStart, totalStart] = (mr.supportsGetMemInfo ? mr.getMemInfo() : [0, 0]);
let dbuf = new DeviceBuffer(sizes['2_MiB'], mr);
// Fill the buffer with 1s because managed memory is only allocated when it's actually used.
new Uint8Buffer(dbuf).fill(1, 0, dbuf.byteLength);
new Uint8Buffer(dbuf)[dbuf.byteLength - 1];
const [freeEnd, totalEnd] = (mr.supportsGetMemInfo ? mr.getMemInfo() : [0, 0]);
expect(totalStart).toEqual(totalEnd);
if (mr.supportsGetMemInfo) { expect(freeStart - freeEnd).not.toEqual(0); }
mr = <any>null;
dbuf = <any>null;
});
});
| 0 |
rapidsai_public_repos/node/modules/rmm/test
|
rapidsai_public_repos/node/modules/rmm/test/memory_resource/per-device-resource-tests.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {expect} from '@jest/globals';
import {devices} from '@rapidsai/cuda';
import {
DeviceBuffer,
getPerDeviceResource,
MemoryResource,
setPerDeviceResource
} from '@rapidsai/rmm';
import {sizes} from '../utils';
import {memoryResourceTestConfigs} from './utils';
describe.each(memoryResourceTestConfigs)(`%s`, (_, {createMemoryResource}) => {
test(`set/get per-device resource`, () => {
const device = devices[0];
let prev: MemoryResource|null = null;
try {
const mr = createMemoryResource();
prev = setPerDeviceResource(device.id, mr);
expect(getPerDeviceResource(device.id)).toBe(mr);
new DeviceBuffer(sizes['2_MiB'], mr);
} finally {
if (prev != null) { setPerDeviceResource(device.id, prev); }
}
});
});
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/package.json
|
{
"name": "@rapidsai/io",
"version": "22.12.2",
"description": "cuIO - NVIDIA RAPIDS I/O Library",
"license": "Apache-2.0",
"main": "index.js",
"types": "build/js",
"author": "NVIDIA, Inc. (https://nvidia.com/)",
"maintainers": [
"Paul Taylor <[email protected]>"
],
"homepage": "https://github.com/rapidsai/node/tree/main/modules/io#readme",
"bugs": {
"url": "https://github.com/rapidsai/node/issues"
},
"repository": {
"type": "git",
"url": "git+https://github.com/rapidsai/node.git"
},
"scripts": {
"install": "npx rapidsai-install-native-module",
"clean": "rimraf build doc compile_commands.json",
"doc": "rimraf doc && typedoc --options typedoc.js",
"test": "node -r dotenv/config node_modules/.bin/jest -c jest.config.js",
"build": "yarn tsc:build && yarn cpp:build",
"build:debug": "yarn tsc:build && yarn cpp:build:debug",
"compile": "yarn tsc:build && yarn cpp:compile",
"compile:debug": "yarn tsc:build && yarn cpp:compile:debug",
"rebuild": "yarn tsc:build && yarn cpp:rebuild",
"rebuild:debug": "yarn tsc:build && yarn cpp:rebuild:debug",
"cpp:clean": "npx cmake-js clean -O build/Release",
"cpp:clean:debug": "npx cmake-js clean -O build/Debug",
"cpp:build": "npx cmake-js build -g -O build/Release",
"cpp:build:debug": "npx cmake-js build -g -D -O build/Debug",
"cpp:compile": "npx cmake-js compile -g -O build/Release",
"postcpp:compile": "npx rapidsai-merge-compile-commands",
"cpp:compile:debug": "npx cmake-js compile -g -D -O build/Debug",
"postcpp:compile:debug": "npx rapidsai-merge-compile-commands",
"cpp:configure": "npx cmake-js configure -g -O build/Release",
"postcpp:configure": "npx rapidsai-merge-compile-commands",
"cpp:configure:debug": "npx cmake-js configure -g -D -O build/Debug",
"postcpp:configure:debug": "npx rapidsai-merge-compile-commands",
"cpp:rebuild": "npx cmake-js rebuild -g -O build/Release",
"postcpp:rebuild": "npx rapidsai-merge-compile-commands",
"cpp:rebuild:debug": "npx cmake-js rebuild -g -D -O build/Debug",
"postcpp:rebuild:debug": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure": "npx cmake-js reconfigure -g -O build/Release",
"postcpp:reconfigure": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure:debug": "npx cmake-js reconfigure -g -D -O build/Debug",
"postcpp:reconfigure:debug": "npx rapidsai-merge-compile-commands",
"tsc:clean": "rimraf build/js",
"tsc:build": "yarn tsc:clean && tsc -p ./tsconfig.json",
"tsc:watch": "yarn tsc:clean && tsc -p ./tsconfig.json -w",
"dev:cpack:enabled": "echo $npm_package_name"
},
"dependencies": {
"@rapidsai/cudf": "~22.12.2"
},
"files": [
"LICENSE",
"README.md",
"index.js",
"package.json",
"CMakeLists.txt",
"build/js"
]
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/index.js
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
module.exports = require('./build/js/index');
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/jest.config.js
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
try {
require('dotenv').config();
} catch (e) {}
module.exports = {
'verbose': true,
'testEnvironment': 'node',
'maxWorkers': process.env.PARALLEL_LEVEL || 1,
'globals': {'ts-jest': {'diagnostics': false, 'tsconfig': 'test/tsconfig.json'}},
'rootDir': './',
'roots': ['<rootDir>/test/'],
'moduleFileExtensions': ['js', 'ts', 'tsx'],
'coverageReporters': ['lcov'],
'coveragePathIgnorePatterns': ['test\\/.*\\.(ts|tsx|js)$', '/node_modules/'],
'transform': {'^.+\\.jsx?$': 'ts-jest', '^.+\\.tsx?$': 'ts-jest'},
'transformIgnorePatterns':
['/build/(js|Debug|Release)/*$', '/node_modules/(?!web-stream-tools).+\\.js$'],
'testRegex': '(.*(-|\\.)(test|spec)s?)\\.(ts|tsx|js)$',
'preset': 'ts-jest',
'testMatch': null,
'moduleNameMapper': {
'^@rapidsai\/io(.*)': '<rootDir>/src/$1',
'^\.\.\/(Debug|Release)\/(rapidsai_io.node)$': '<rootDir>/build/$1/$2',
}
};
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE)
option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON)
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
execute_process(COMMAND node -p
"require('@rapidsai/core').cmake_modules_path"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake")
project(rapidsai_io VERSION $ENV{npm_package_version} LANGUAGES C CXX)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/core'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cuda'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDA_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/rmm'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_RMM_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cudf'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDF_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDF.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake")
###################################################################################################
# - rapidsai_io target ----------------------------------------------------------------------------
file(GLOB_RECURSE NODE_IO_CPP_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
file(GLOB_RECURSE NODE_IO_CUDA_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cu")
list(APPEND NODE_IO_SRC_FILES ${NODE_IO_CPP_FILES})
list(APPEND NODE_IO_SRC_FILES ${NODE_IO_CUDA_FILES})
add_library(${PROJECT_NAME} SHARED ${NODE_IO_SRC_FILES} ${CMAKE_JS_SRC})
set_target_properties(${PROJECT_NAME}
PROPERTIES PREFIX ""
SUFFIX ".node"
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
NO_SYSTEM_FROM_IMPORTED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${PROJECT_NAME}
PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>"
)
target_compile_definitions(${PROJECT_NAME}
PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
"$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
)
target_include_directories(${PROJECT_NAME}
PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>"
)
target_link_libraries(${PROJECT_NAME}
PUBLIC ${CMAKE_JS_LIB}
cudf::cudf
"${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node"
"${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node"
"${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node"
"${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake")
generate_arch_specific_custom_targets(
NAME ${PROJECT_NAME}
DEPENDENCIES "cudf::cudf"
)
generate_install_rules(
NAME ${PROJECT_NAME}
CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
# Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin
execute_process(COMMAND
${CMAKE_COMMAND} -E create_symlink
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/README.md
|
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> node-rapids io - GPU-Accelerated IO</div>
### Installation
`npm install @rapidsai/io`
### About
JS bindings providing GPU-accelerated I/O primitives.
For detailed IO API, follow our [API Documentation](https://rapidsai.github.io/node/modules/io_src.html).
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/tsconfig.json
|
{
"include": ["src"],
"exclude": ["node_modules"],
"compilerOptions": {
"baseUrl": "./",
"paths": {
"@rapidsai/io": ["src/index"],
"@rapidsai/io/*": ["src/*"]
},
"target": "ESNEXT",
"module": "commonjs",
"outDir": "./build/js",
/* Decorators */
"experimentalDecorators": false,
/* Basic stuff */
"moduleResolution": "node",
"skipLibCheck": true,
"skipDefaultLibCheck": true,
"lib": ["dom", "esnext", "esnext.asynciterable"],
/* Control what is emitted */
"declaration": true,
"declarationMap": true,
"noEmitOnError": true,
"removeComments": false,
"downlevelIteration": true,
/* Create inline sourcemaps with sources */
"sourceMap": false,
"inlineSources": true,
"inlineSourceMap": true,
/* The most restrictive settings possible */
"strict": true,
"importHelpers": true,
"noEmitHelpers": true,
"noImplicitAny": true,
"noUnusedLocals": true,
"noImplicitReturns": true,
"allowUnusedLabels": false,
"noUnusedParameters": true,
"allowUnreachableCode": false,
"noFallthroughCasesInSwitch": true,
"forceConsistentCasingInFileNames": true
}
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
include/visit_struct/visit_struct.hpp (modified): BSL 1.0
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/io/typedoc.js
|
module.exports = {
entryPoints: ['src/index.ts'],
out: 'doc',
name: '@rapidsai/io',
tsconfig: 'tsconfig.json',
excludePrivate: true,
excludeProtected: true,
excludeExternals: true,
};
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/.vscode/launch.json
|
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"compounds": [
{
"name": "Debug Tests (TS and C++)",
"configurations": [
"Debug Tests (launch gdb)",
// "Debug Tests (launch lldb)",
"Debug Tests (attach node)",
]
}
],
"configurations": [
{
"name": "Debug Tests (TS only)",
"type": "node",
"request": "launch",
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"program": "${workspaceFolder}/node_modules/.bin/jest",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
"env": {
"NODE_NO_WARNINGS": "1",
"NODE_ENV": "production",
"READABLE_STREAM": "disable",
},
"args": [
"--verbose",
"--runInBand",
"-c", "jest.config.js",
"${input:TEST_FILE}"
]
},
// {
// "name": "Debug Tests (launch lldb)",
// // hide the individual configurations from the debug dropdown list
// "presentation": { "hidden": true },
// "type": "lldb",
// "request": "launch",
// "stdio": null,
// "cwd": "${workspaceFolder}",
// "preLaunchTask": "cpp:ensure:debug:build",
// "env": {
// "NODE_DEBUG": "1",
// "NODE_NO_WARNINGS": "1",
// "NODE_ENV": "production",
// "READABLE_STREAM": "disable",
// },
// "stopOnEntry": false,
// "terminal": "console",
// "program": "${input:NODE_BINARY}",
// "initCommands": [
// "settings set target.disable-aslr false",
// ],
// "sourceLanguages": ["cpp", "cuda", "javascript"],
// "args": [
// "--inspect=9229",
// "--expose-internals",
// "${workspaceFolder}/node_modules/.bin/jest",
// "--verbose",
// "--runInBand",
// "-c",
// "jest.config.js",
// "${input:TEST_FILE}"
// ],
// },
{
"name": "Debug Tests (launch gdb)",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"type": "cppdbg",
"request": "launch",
"stopAtEntry": false,
"externalConsole": false,
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"MIMode": "gdb",
"miDebuggerPath": "/usr/bin/gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
],
"program": "${input:NODE_BINARY}",
"environment": [
{ "name": "NODE_DEBUG", "value": "1" },
{ "name": "NODE_NO_WARNINGS", "value": "1" },
{ "name": "NODE_ENV", "value": "production" },
{ "name": "READABLE_STREAM", "value": "disable" },
],
"args": [
"--inspect=9229",
"--expose-internals",
"${workspaceFolder}/node_modules/.bin/jest",
"--verbose",
"--runInBand",
"-c",
"jest.config.js",
"${input:TEST_FILE}"
],
},
{
"name": "Debug Tests (attach node)",
"type": "node",
"request": "attach",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"port": 9229,
"timeout": 60000,
"cwd": "${workspaceFolder}",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
},
],
"inputs": [
{
"type": "command",
"id": "NODE_BINARY",
"command": "shellCommand.execute",
"args": {
"description": "path to node",
"command": "which node",
"useFirstResult": true,
}
},
{
"type": "command",
"id": "TEST_FILE",
"command": "shellCommand.execute",
"args": {
"cwd": "${workspaceFolder}/modules/io",
"description": "Select a file to debug",
"command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"",
}
},
],
}
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/.vscode/tasks.json
|
{
"version": "2.0.0",
"tasks": [
{
"type": "shell",
"label": "Rebuild node_cuio TS and C++ (slow)",
"group": { "kind": "build", "isDefault": true, },
"command": "if [[ \"${input:CMAKE_BUILD_TYPE}\" == \"Release\" ]]; then yarn rebuild; else yarn rebuild:debug; fi",
"problemMatcher": [
"$tsc",
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
{
"type": "npm",
"group": "build",
"label": "Recompile node_cuio TS (fast)",
"script": "tsc:build",
"detail": "yarn tsc:build",
"problemMatcher": ["$tsc"],
},
{
"type": "shell",
"group": "build",
"label": "Recompile node_cuio C++ (fast)",
"command": "ninja -C ${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}",
"problemMatcher": [
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
],
"inputs": [
{
"type": "pickString",
"default": "Release",
"id": "CMAKE_BUILD_TYPE",
"options": ["Release", "Debug"],
"description": "C++ Build Type",
}
]
}
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/index.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export {IO} from './las';
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/las.hpp
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cudf/io/datasource.hpp>
#include <cudf/table/table.hpp>
#include <rmm/device_buffer.hpp>
namespace nv {
struct LasHeader {
char file_signature[4];
uint16_t file_source_id;
uint16_t global_encoding;
char version_major, version_minor;
char system_identifier[32];
char generating_software[32];
uint16_t header_size;
uint32_t point_data_offset, variable_length_records_count;
char point_data_format_id;
uint16_t point_data_size;
uint32_t point_record_count;
uint32_t points_by_return_count[5];
double x_scale, y_scale, z_scale;
double x_offset, y_offset, z_offset;
double max_x, min_x;
double max_y, min_y;
double max_z, min_z;
};
const std::vector<std::string> PointDataFormatZeroColumnNames = {"x",
"y",
"z",
"intensity",
"bit_data",
"classification",
"scan_angle",
"user_data",
"point_source_id"};
struct PointDataFormatZero {
int32_t x, y, z;
uint16_t intensity;
uint8_t bit_data, classification;
char scan_angle;
uint8_t user_data;
uint16_t point_source_id;
};
const std::vector<std::string> PointDataFormatOneColumnNames = {"x",
"y",
"z",
"intensity",
"bit_data",
"classification",
"scan_angle",
"user_data",
"point_source_id",
"gps_time"};
struct PointDataFormatOne {
int32_t x, y, z;
uint16_t intensity;
uint8_t bit_data, classification;
char scan_angle;
uint8_t user_data;
uint16_t point_source_id;
double gps_time;
};
const std::vector<std::string> PointDataFormatTwoColumnNames = {"x",
"y",
"z",
"intensity",
"bit_data",
"classification",
"scan_angle",
"user_data",
"point_source_id",
"red",
"green",
"blue"};
struct PointDataFormatTwo {
int32_t x, y, z;
uint16_t intensity;
uint8_t bit_data, classification;
char scan_angle;
uint8_t user_data;
uint16_t point_source_id;
uint16_t red;
uint16_t green;
uint16_t blue;
};
const std::vector<std::string> PointDataFormatThreeColumnNames = {"x",
"y",
"z",
"intensity",
"bit_data",
"classification",
"scan_angle",
"user_data",
"point_source_id",
"gps_time"};
struct PointDataFormatThree {
int32_t x, y, z;
uint16_t intensity;
uint8_t bit_data, classification;
char scan_angle;
uint8_t user_data;
uint16_t point_source_id;
double gps_time;
uint16_t red;
uint16_t green;
uint16_t blue;
};
std::tuple<std::vector<std::string>, std::unique_ptr<cudf::table>> read_las(
const std::unique_ptr<cudf::io::datasource>& datasource,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(),
rmm::cuda_stream_view stream = rmm::cuda_stream_default);
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/rapidsai_io.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Table} from '@rapidsai/cudf';
import {MemoryResource} from '@rapidsai/rmm';
/** @ignore */
export declare const _cpp_exports: any;
export declare function readLasTable(
input: string, memoryResource?: MemoryResource): {names: string[], table: Table};
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/las.ts
|
import {DataFrame, Series} from '@rapidsai/cudf';
import {readLasTable} from './addon';
export class IO {
public static readLas(path: string) {
const {names, table} = readLasTable(path);
return new DataFrame(names.reduce(
(cols, name, i) => ({...cols, [name]: Series.new(table.getColumnByIndex(i))}), {}));
}
}
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/addon.cpp
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "las.hpp"
#include <node_cudf/table.hpp>
#include <nv_node/utilities/args.hpp>
#include <cudf/io/datasource.hpp>
struct rapidsai_io : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_io> {
rapidsai_io(Napi::Env env, Napi::Object exports) : nv::EnvLocalAddon(env, exports) {
DefineAddon(exports,
{InstanceMethod("init", &rapidsai_io::InitAddon),
InstanceValue("_cpp_exports", _cpp_exports.Value()),
InstanceMethod<&rapidsai_io::read_las>("readLasTable")});
}
private:
Napi::Value read_las(Napi::CallbackInfo const& info) {
nv::CallbackArgs args{info};
auto env = info.Env();
std::string path = args[0];
rmm::mr::device_memory_resource* mr = args[1];
auto [names, table] = nv::read_las(cudf::io::datasource::create(path), mr);
auto result = Napi::Object::New(env);
auto table_names = Napi::Array::New(env, names.size());
for (size_t i = 0; i < names.size(); ++i) {
table_names.Set(i, Napi::String::New(env, names[i]));
}
result.Set("names", table_names);
result.Set("table", nv::Table::New(env, std::move(table)));
return result;
}
};
NODE_API_ADDON(rapidsai_io);
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/las.cu
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "las.hpp"
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <iostream>
namespace nv {
namespace {
const int HEADER_BYTE_SIZE = 227;
#define LAS_UINT16(data, offset) \
(uint16_t)(static_cast<uint32_t>(data[offset]) | (static_cast<uint32_t>(data[offset + 1]) << 8))
#define LAS_INT32(data, offset) \
(int32_t)(static_cast<uint32_t>(data[offset]) | (static_cast<uint32_t>(data[offset + 1]) << 8) | \
(static_cast<uint32_t>(data[offset + 2]) << 16) | \
(static_cast<uint32_t>(data[offset + 3]) << 24))
#define LAS_UINT32(data, offset) \
(uint32_t)(static_cast<uint32_t>(data[offset]) | \
(static_cast<uint32_t>(data[offset + 1]) << 8) | \
(static_cast<uint32_t>(data[offset + 2]) << 16) | \
(static_cast<uint32_t>(data[offset + 3]) << 24))
#define LAS_DOUBLE(data, offset) \
(double)(static_cast<uint64_t>(data[offset]) | (static_cast<uint64_t>(data[offset + 1]) << 8) | \
(static_cast<uint64_t>(data[offset + 2]) << 16) | \
(static_cast<uint64_t>(data[offset + 3]) << 24) | \
(static_cast<uint64_t>(data[offset + 4]) << 32) | \
(static_cast<uint64_t>(data[offset + 5]) << 40) | \
(static_cast<uint64_t>(data[offset + 6]) << 48) | \
(static_cast<uint64_t>(data[offset + 7]) << 56))
__global__ void parse_header(uint8_t const* las_header_data, LasHeader* result) {
size_t byte_offset = 0;
// File signature (4 bytes)
for (int i = 0; i < 4; ++i) { result->file_signature[i] = *(las_header_data + i); }
byte_offset += 4;
// File source id (2 bytes)
result->file_source_id = LAS_UINT16(las_header_data, byte_offset);
byte_offset += 2;
// Global encoding (2 bytes)
result->global_encoding = LAS_UINT16(las_header_data, byte_offset);
byte_offset += 2;
// Project ID (16 bytes)
// not required
byte_offset += 16;
// Version major (1 byte)
result->version_major = *(las_header_data + byte_offset);
byte_offset += 1;
// Version minor (1 byte)
result->version_minor = *(las_header_data + byte_offset);
byte_offset += 1;
// System identifier (32 bytes)
for (int i = 0; i < 32; ++i) {
result->system_identifier[i] = *(las_header_data + byte_offset + i);
}
byte_offset += 32;
// Generating software (32 bytes)
for (int i = 0; i < 32; ++i) {
result->generating_software[i] = *(las_header_data + byte_offset + i);
}
byte_offset += 32;
// File creation day of year (2 bytes)
// not required
byte_offset += 2;
// File creation year (2 bytes)
// not required
byte_offset += 2;
// Header size (2 bytes)
result->header_size = LAS_UINT16(las_header_data, byte_offset);
byte_offset += 2;
// Offset to point data (4 bytes)
result->point_data_offset = LAS_UINT32(las_header_data, byte_offset);
byte_offset += 4;
// Number of variable length records (4 bytes)
result->variable_length_records_count = LAS_UINT32(las_header_data, byte_offset);
byte_offset += 4;
// Point data format id (1 byte)
result->point_data_format_id = *(las_header_data + byte_offset);
if (result->point_data_format_id & 128 || result->point_data_format_id & 64) {
result->point_data_format_id &= 127;
}
byte_offset += 1;
// Point data record length (2 bytes)
result->point_data_size = LAS_UINT16(las_header_data, byte_offset);
byte_offset += 2;
// Number of point records (4 bytes)
result->point_record_count = LAS_UINT32(las_header_data, byte_offset);
byte_offset += 4;
// Number of points by return (20 bytes)
for (int i = 0; i < 4; ++i) {
result->points_by_return_count[i] = LAS_UINT32(las_header_data, byte_offset);
byte_offset += 4;
}
// X scale factor (8 bytes)
result->x_scale = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Y scale factor (8 bytes)
result->y_scale = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Z scale factor (8 bytes)
result->z_scale = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// X offset (8 bytes)
result->x_offset = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Y offset (8 bytes)
result->y_offset = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Z offset (8 bytes)
result->z_offset = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Max X (8 bytes)
result->max_x = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Min X (8 bytes)
result->min_x = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Max Y (8 bytes)
result->max_y = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Min Y (8 bytes)
result->min_y = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Max Z (8 bytes)
result->max_z = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
// Min Z (8 bytes)
result->min_z = LAS_DOUBLE(las_header_data, byte_offset);
byte_offset += 8;
}
std::unique_ptr<cudf::io::datasource::buffer> read(
const std::unique_ptr<cudf::io::datasource>& datasource,
size_t offset,
size_t size,
rmm::cuda_stream_view stream) {
if (datasource->supports_device_read()) { return datasource->device_read(offset, size, stream); }
auto device_buffer = rmm::device_buffer(size, stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(device_buffer.data(),
datasource->host_read(offset, size)->data(),
size,
cudaMemcpyHostToDevice,
stream.value()));
return cudf::io::datasource::buffer::create(std::move(device_buffer));
}
std::unique_ptr<cudf::table> get_point_cloud_records(
const std::unique_ptr<cudf::io::datasource>& datasource,
LasHeader const& header,
rmm::mr::device_memory_resource* mr,
rmm::cuda_stream_view stream) {
auto const& point_record_count = header.point_record_count;
auto const& point_data_offset = header.point_data_offset;
auto const& point_data_size = header.point_data_size;
auto point_data =
read(datasource, point_data_offset, point_data_size * point_record_count, stream);
auto data = point_data->data();
auto idxs = thrust::make_counting_iterator(0);
std::vector<std::unique_ptr<cudf::column>> cols;
switch (header.point_data_format_id) {
// POINT
// FORMAT
// ZERO
case 0: {
cols.resize(PointDataFormatZeroColumnNames.size());
std::vector<cudf::type_id> ids{{
cudf::type_id::INT32, // x
cudf::type_id::INT32, // y
cudf::type_id::INT32, // z
cudf::type_id::INT16, // intensity
cudf::type_id::INT8, // bit_data
cudf::type_id::INT8, // classification
cudf::type_id::INT8, // scan angle
cudf::type_id::INT8, // user data
cudf::type_id::INT16, // point source id
}};
std::transform(ids.begin(), ids.end(), cols.begin(), [&](auto const& type_id) {
return cudf::make_numeric_column(
cudf::data_type{type_id}, point_record_count, cudf::mask_state::UNALLOCATED, stream, mr);
});
auto iter = thrust::make_transform_iterator(idxs, [=] __host__ __device__(int const& i) {
auto ptr = data + (i * (point_data_size));
PointDataFormatZero point_data;
point_data.x = LAS_INT32(ptr, 0);
point_data.y = LAS_INT32(ptr, 4);
point_data.z = LAS_INT32(ptr, 8);
point_data.intensity = LAS_UINT16(ptr, 12);
point_data.bit_data = ptr[14];
point_data.classification = ptr[15];
point_data.scan_angle = ptr[16];
point_data.user_data = ptr[17];
point_data.point_source_id = LAS_UINT16(ptr, 18);
return thrust::make_tuple(point_data.x,
point_data.y,
point_data.z,
point_data.intensity,
point_data.bit_data,
point_data.classification,
point_data.scan_angle,
point_data.user_data,
point_data.point_source_id);
});
thrust::copy(
rmm::exec_policy(stream),
iter,
iter + point_record_count,
thrust::make_zip_iterator(cols[0]->mutable_view().begin<int32_t>(), // x
cols[1]->mutable_view().begin<int32_t>(), // y
cols[2]->mutable_view().begin<int32_t>(), // z
cols[3]->mutable_view().begin<int16_t>(), // intensity
cols[4]->mutable_view().begin<int8_t>(), // bits
cols[5]->mutable_view().begin<int8_t>(), // classification
cols[6]->mutable_view().begin<int8_t>(), // scan angle
cols[7]->mutable_view().begin<int8_t>(), // user data
cols[8]->mutable_view().begin<int16_t>())); // point source id
break;
}
// POINT
// FORMAT
// ONE
case 1: {
cols.resize(PointDataFormatOneColumnNames.size());
std::vector<cudf::type_id> ids{{
cudf::type_id::INT32, // x
cudf::type_id::INT32, // y
cudf::type_id::INT32, // z
cudf::type_id::INT16, // intensity
cudf::type_id::INT8, // bit_data
cudf::type_id::INT8, // classification
cudf::type_id::INT8, // scan angle
cudf::type_id::INT8, // user data
cudf::type_id::INT16, // point source id
cudf::type_id::FLOAT64, // gps time
}};
std::transform(ids.begin(), ids.end(), cols.begin(), [&](auto const& type_id) {
return cudf::make_numeric_column(
cudf::data_type{type_id}, point_record_count, cudf::mask_state::UNALLOCATED, stream, mr);
});
auto iter = thrust::make_transform_iterator(idxs, [=] __host__ __device__(int const& i) {
auto ptr = data + (i * (point_data_size));
PointDataFormatOne point_data;
point_data.x = LAS_INT32(ptr, 0);
point_data.y = LAS_INT32(ptr, 4);
point_data.z = LAS_INT32(ptr, 8);
point_data.intensity = LAS_UINT16(ptr, 12);
point_data.bit_data = ptr[14];
point_data.classification = ptr[15];
point_data.scan_angle = ptr[16];
point_data.user_data = ptr[17];
point_data.point_source_id = LAS_UINT16(ptr, 18);
point_data.gps_time = LAS_DOUBLE(ptr, 20);
return thrust::make_tuple(point_data.x,
point_data.y,
point_data.z,
point_data.intensity,
point_data.bit_data,
point_data.classification,
point_data.scan_angle,
point_data.user_data,
point_data.point_source_id,
point_data.gps_time);
});
thrust::copy(
rmm::exec_policy(stream),
iter,
iter + point_record_count,
thrust::make_zip_iterator(cols[0]->mutable_view().begin<int32_t>(), // x
cols[1]->mutable_view().begin<int32_t>(), // y
cols[2]->mutable_view().begin<int32_t>(), // z
cols[3]->mutable_view().begin<int16_t>(), // intensity
cols[4]->mutable_view().begin<int8_t>(), // bits
cols[5]->mutable_view().begin<int8_t>(), // classification
cols[6]->mutable_view().begin<int8_t>(), // scan angle
cols[7]->mutable_view().begin<int8_t>(), // user data
cols[8]->mutable_view().begin<int16_t>(), // point source id
cols[9]->mutable_view().begin<double_t>())); // gps time
break;
}
// POINT
// FORMAT
// THREE
// TODO: Missing colours
case 2: {
cols.resize(PointDataFormatTwoColumnNames.size());
std::vector<cudf::type_id> ids{{
cudf::type_id::INT32, // x
cudf::type_id::INT32, // y
cudf::type_id::INT32, // z
cudf::type_id::INT16, // intensity
cudf::type_id::INT8, // bit_data
cudf::type_id::INT8, // classification
cudf::type_id::INT8, // scan angle
cudf::type_id::INT8, // user data
cudf::type_id::INT16, // point source id
}};
std::transform(ids.begin(), ids.end(), cols.begin(), [&](auto const& type_id) {
return cudf::make_numeric_column(
cudf::data_type{type_id}, point_record_count, cudf::mask_state::UNALLOCATED, stream, mr);
});
auto iter = thrust::make_transform_iterator(idxs, [=] __host__ __device__(int const& i) {
auto ptr = data + (i * (point_data_size));
PointDataFormatTwo point_data;
point_data.x = LAS_INT32(ptr, 0);
point_data.y = LAS_INT32(ptr, 4);
point_data.z = LAS_INT32(ptr, 8);
point_data.intensity = LAS_UINT16(ptr, 12);
point_data.bit_data = ptr[14];
point_data.classification = ptr[15];
point_data.scan_angle = ptr[16];
point_data.user_data = ptr[17];
point_data.point_source_id = LAS_UINT16(ptr, 18);
return thrust::make_tuple(point_data.x,
point_data.y,
point_data.z,
point_data.intensity,
point_data.bit_data,
point_data.classification,
point_data.scan_angle,
point_data.user_data,
point_data.point_source_id);
});
thrust::copy(
rmm::exec_policy(stream),
iter,
iter + point_record_count,
thrust::make_zip_iterator(cols[0]->mutable_view().begin<int32_t>(), // x
cols[1]->mutable_view().begin<int32_t>(), // y
cols[2]->mutable_view().begin<int32_t>(), // z
cols[3]->mutable_view().begin<int16_t>(), // intensity
cols[4]->mutable_view().begin<int8_t>(), // bits
cols[5]->mutable_view().begin<int8_t>(), // classification
cols[6]->mutable_view().begin<int8_t>(), // scan angle
cols[7]->mutable_view().begin<int8_t>(), // user data
cols[8]->mutable_view().begin<int16_t>())); // point source id
break;
}
// POINT
// FORMAT
// THREE
// TODO: Missing colours
case 3: {
cols.resize(PointDataFormatThreeColumnNames.size());
std::vector<cudf::type_id> ids{{
cudf::type_id::INT32, // x
cudf::type_id::INT32, // y
cudf::type_id::INT32, // z
cudf::type_id::INT16, // intensity
cudf::type_id::INT8, // bit_data
cudf::type_id::INT8, // classification
cudf::type_id::INT8, // scan angle
cudf::type_id::INT8, // user data
cudf::type_id::INT16, // point source id
cudf::type_id::FLOAT64, // gps time
}};
std::transform(ids.begin(), ids.end(), cols.begin(), [&](auto const& type_id) {
return cudf::make_numeric_column(
cudf::data_type{type_id}, point_record_count, cudf::mask_state::UNALLOCATED, stream, mr);
});
auto iter = thrust::make_transform_iterator(idxs, [=] __host__ __device__(int const& i) {
auto ptr = data + (i * (point_data_size));
PointDataFormatThree point_data;
point_data.x = LAS_INT32(ptr, 0);
point_data.y = LAS_INT32(ptr, 4);
point_data.z = LAS_INT32(ptr, 8);
point_data.intensity = LAS_UINT16(ptr, 12);
point_data.bit_data = ptr[14];
point_data.classification = ptr[15];
point_data.scan_angle = ptr[16];
point_data.user_data = ptr[17];
point_data.point_source_id = LAS_UINT16(ptr, 18);
point_data.gps_time = LAS_DOUBLE(ptr, 20);
return thrust::make_tuple(point_data.x,
point_data.y,
point_data.z,
point_data.intensity,
point_data.bit_data,
point_data.classification,
point_data.scan_angle,
point_data.user_data,
point_data.point_source_id,
point_data.gps_time);
});
thrust::copy(
rmm::exec_policy(stream),
iter,
iter + point_record_count,
thrust::make_zip_iterator(cols[0]->mutable_view().begin<int32_t>(), // x
cols[1]->mutable_view().begin<int32_t>(), // y
cols[2]->mutable_view().begin<int32_t>(), // z
cols[3]->mutable_view().begin<int16_t>(), // intensity
cols[4]->mutable_view().begin<int8_t>(), // bits
cols[5]->mutable_view().begin<int8_t>(), // classification
cols[6]->mutable_view().begin<int8_t>(), // scan angle
cols[7]->mutable_view().begin<int8_t>(), // user data
cols[8]->mutable_view().begin<int16_t>(), // point source id
cols[9]->mutable_view().begin<double_t>())); // gps time
break;
}
}
return std::make_unique<cudf::table>(std::move(cols));
}
#undef LAS_UINT16
#undef LAS_UINT32
#undef LAS_DOUBLE
} // namespace
std::tuple<std::vector<std::string>, std::unique_ptr<cudf::table>> read_las(
const std::unique_ptr<cudf::io::datasource>& datasource,
rmm::mr::device_memory_resource* mr,
rmm::cuda_stream_view stream) {
auto header = [&]() {
LasHeader* d_header;
LasHeader* h_header;
cudaMalloc(&d_header, sizeof(LasHeader));
auto data = read(datasource, 0, HEADER_BYTE_SIZE, stream);
parse_header<<<1, 1>>>(data->data(), d_header);
h_header = static_cast<LasHeader*>(malloc(sizeof(LasHeader)));
cudaMemcpy(h_header, d_header, sizeof(LasHeader), cudaMemcpyDefault);
return *h_header;
}();
auto table = get_point_cloud_records(datasource, header, mr, stream);
std::vector<std::string> names;
switch (header.point_data_format_id) {
case 0: {
names = PointDataFormatZeroColumnNames;
break;
}
case 1: {
names = PointDataFormatOneColumnNames;
break;
}
case 2: {
names = PointDataFormatTwoColumnNames;
break;
}
case 3: {
names = PointDataFormatThreeColumnNames;
break;
}
}
return std::make_tuple(names, std::move(table));
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/src/addon.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-redeclare */
import {addon as CORE} from '@rapidsai/core';
import {addon as CUDA} from '@rapidsai/cuda';
import {addon as CUDF} from '@rapidsai/cudf';
import {addon as RMM} from '@rapidsai/rmm';
export const {
_cpp_exports,
readLasTable,
} = require('bindings')('rapidsai_io.node').init(CORE, CUDA, RMM, CUDF) as
typeof import('./rapidsai_io');
export default {_cpp_exports, readLasTable};
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/test/test.las
|
version https://git-lfs.github.com/spec/v1
oid sha256:9c1a4c313621fd7c3cdd8a8b72187e992e26d0e4a89b401cb49662a224a96bdc
size 27473655
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/test/tsconfig.json
|
{
"extends": "../tsconfig.json",
"include": [
"../src/**/*.ts",
"../test/**/*.ts"
],
"compilerOptions": {
"target": "esnext",
"module": "commonjs",
"allowJs": true,
"importHelpers": false,
"noEmitHelpers": false,
"noEmitOnError": false,
"sourceMap": false,
"inlineSources": false,
"inlineSourceMap": false,
"downlevelIteration": false,
"baseUrl": "../",
"paths": {
"@rapidsai/io": ["src/index"],
"@rapidsai/io/*": ["src/*"]
}
}
}
| 0 |
rapidsai_public_repos/node/modules/io
|
rapidsai_public_repos/node/modules/io/test/las-test.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {IO} from '@rapidsai/io';
test('import las', () => {
const df = IO.readLas(`${__dirname}/test.las`);
df.names.forEach((name) => { console.log([...df.get(name)]); });
});
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/devel/notebook.Dockerfile
|
ARG FROM_IMAGE
FROM ${FROM_IMAGE}
ARG TARGETARCH
ADD --chown=rapids:rapids \
https://raw.githubusercontent.com/n-riesco/ijavascript/8637a3e18b89270121f49733d03af0e3e6e0a17a/images/nodejs/js-green-32x32.png \
/opt/rapids/.local/share/jupyter/kernels/javascript/logo-32x32.png
ADD --chown=rapids:rapids \
https://raw.githubusercontent.com/n-riesco/ijavascript/8637a3e18b89270121f49733d03af0e3e6e0a17a/images/nodejs/js-green-64x64.png \
/opt/rapids/.local/share/jupyter/kernels/javascript/logo-64x64.png
ADD --chown=root:root \
https://github.com/jupyterlab/jupyterlab-desktop/releases/download/v3.3.2-1/JupyterLab-Setup-Debian.deb \
/tmp/JupyterLab-Setup-Debian.deb
USER root
# Manually install jupyter-kernelspec binary (for ijavascript)
RUN bash -c "echo -e '#!/usr/bin/python3\n\
import re\n\
import sys\n\
from jupyter_client.kernelspecapp import KernelSpecApp\n\
if __name__ == \"__main__\":\n\
sys.argv[0] = re.sub(r\"(-script\\.pyw?|\\.exe)?\$\", \"\", sys.argv[0])\n\
sys.exit(KernelSpecApp.launch_instance())\n\
' > /usr/bin/jupyter-kernelspec" \
&& chmod +x /usr/bin/jupyter-kernelspec \
# Install ijavascript kernel
&& bash -c "echo -e '{\n\
\"argv\": [\n\
\"ijskernel\",\n\
\"--hide-undefined\",\n\
\"{connection_file}\",\n\
\"--protocol=5.0\",\n\
\"--session-working-dir=/opt/rapids/node\"\n\
],\n\
\"name\": \"javascript\",\n\
\"language\": \"javascript\",\n\
\"display_name\": \"Javascript (Node.js)\"\n\
}' > /opt/rapids/.local/share/jupyter/kernels/javascript/kernel.json" \
&& chmod 0644 /opt/rapids/.local/share/jupyter/kernels/javascript/logo-{32x32,64x64}.png \
&& ln -s /opt/rapids/node/node_modules /opt/rapids/node_modules \
&& mkdir -p /opt/rapids/.jupyter \
&& mkdir -p /opt/rapids/.config/jupyterlab-desktop/lab/user-settings/@jupyterlab/apputils-extension \
&& bash -c "echo -e '{\n\
\"theme\": \"JupyterLab Dark\"\n\
}' > /opt/rapids/.config/jupyterlab-desktop/lab/user-settings/@jupyterlab/apputils-extension/themes.jupyterlab-settings" \
&& chown -R rapids:rapids /opt/rapids \
# Install Jupyter Desktop
&& apt update \
&& DEBIAN_FRONTEND=noninteractive \
apt install -y --no-install-recommends \
python3-minimal libasound2 jupyter-notebook /tmp/JupyterLab-Setup-Debian.deb \
# Remove python3 kernelspec
&& jupyter kernelspec remove -f python3 \
# Install ijavascript
&& npm install --location=global --unsafe-perm --no-audit --no-fund --no-update-notifier ijavascript \
&& ijsinstall --install=global --spec-path=full \
\
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER rapids
WORKDIR /opt/rapids/node
SHELL ["/bin/bash", "-l"]
CMD ["jlab"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/devel/main.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG AMD64_BASE
ARG ARM64_BASE
ARG NODE_VERSION=16.15.1
FROM node:$NODE_VERSION-bullseye-slim as node
FROM ${AMD64_BASE} as base-amd64
FROM ${ARM64_BASE} as base-arm64
ONBUILD RUN \
if [[ -d /usr/local/cuda/lib64 ] && [ ! -f /usr/local/cuda/lib64/libcudart.so ]]; then \
minor="$(nvcc --version | head -n4 | tail -n1 | cut -d' ' -f5 | cut -d',' -f1)"; \
major="$(nvcc --version | head -n4 | tail -n1 | cut -d' ' -f5 | cut -d',' -f1 | cut -d'.' -f1)"; \
ln -s /usr/local/cuda/lib64/libcudart.so.$minor /usr/local/cuda/lib64/libcudart.so.$major; \
ln -s /usr/local/cuda/lib64/libcudart.so.$major /usr/local/cuda/lib64/libcudart.so; \
rm /etc/ld.so.cache && ldconfig; \
fi
FROM base-${TARGETARCH} as compilers
SHELL ["/bin/bash", "-c"]
ENV CUDA_HOME="/usr/local/cuda"
ENV PATH="$PATH:\
${CUDA_HOME}/bin:\
${CUDA_HOME}/nvvm/bin"
ENV LD_LIBRARY_PATH="\
/usr/lib/aarch64-linux-gnu:\
/usr/lib/x86_64-linux-gnu:\
/usr/lib/i386-linux-gnu:\
${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}\
${CUDA_HOME}/lib64:\
${CUDA_HOME}/nvvm/lib64:\
${CUDA_HOME}/lib64/stubs"
ARG GCC_VERSION=9
ARG CMAKE_VERSION=3.26.0-rc2
ARG SCCACHE_VERSION=0.2.15
ARG LINUX_VERSION=ubuntu20.04
ARG NODE_VERSION=16.15.1
ENV NODE_VERSION=$NODE_VERSION
# Install node
COPY --from=node /usr/local/bin/node /usr/local/bin/node
COPY --from=node /usr/local/include/node /usr/local/include/node
COPY --from=node /usr/local/lib/node_modules /usr/local/lib/node_modules
# Install yarn
COPY --from=node /opt/yarn-v*/bin/* /usr/local/bin/
COPY --from=node /opt/yarn-v*/lib/* /usr/local/lib/
# Copy entrypoint
COPY --from=node /usr/local/bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
ADD --chown=root:root https://gitlab.com/nvidia/container-images/opengl/-/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json /usr/share/glvnd/egl_vendor.d/10_nvidia.json
# Install compilers
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install --no-install-recommends -y \
gpg wget software-properties-common lsb-release \
&& add-apt-repository --no-update -y ppa:git-core/ppa \
&& add-apt-repository --no-update -y ppa:ubuntu-toolchain-r/test \
# Install kitware cmake apt repository
&& wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null \
| gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null \
&& bash -c 'echo -e "\
deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main\n\
" | tee /etc/apt/sources.list.d/kitware.list >/dev/null' \
\
&& apt update \
&& apt install --no-install-recommends -y \
git \
# Needed for CMake to find static `liblapack.a`
gfortran \
ninja-build \
gcc-${GCC_VERSION} g++-${GCC_VERSION} gdb \
curl libssl-dev libcurl4-openssl-dev xz-utils zlib1g-dev liblz4-dev \
# From opengl/glvnd:devel
pkg-config \
libxau6 libxdmcp6 libxcb1 libxext6 libx11-6 \
libglvnd-dev libgl1-mesa-dev libegl1-mesa-dev libgles2-mesa-dev \
\
&& chmod 0644 /usr/share/glvnd/egl_vendor.d/10_nvidia.json \
&& echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \
&& echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf \
\
# Install cmake
&& wget --no-hsts -q -O /tmp/cmake_${CMAKE_VERSION}.sh \
https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-$(uname -p).sh \
&& bash /tmp/cmake_${CMAKE_VERSION}.sh --skip-license --exclude-subdir --prefix=/usr \
\
# Install sccache
&& curl -SsL "https://github.com/mozilla/sccache/releases/download/v$SCCACHE_VERSION/sccache-v$SCCACHE_VERSION-$(uname -m)-unknown-linux-musl.tar.gz" \
| tar -C /usr/bin -zf - --wildcards --strip-components=1 -x */sccache \
&& chmod +x /usr/bin/sccache \
\
# Install npm
&& bash -c 'echo -e "\
fund=false\n\
audit=false\n\
save-prefix=\n\
--omit=optional\n\
save-exact=true\n\
package-lock=false\n\
update-notifier=false\n\
scripts-prepend-node-path=true\n\
registry=https://registry.npmjs.org/\n\
" | tee /root/.npmrc >/dev/null' \
&& ln -s /usr/local/bin/node /usr/local/bin/nodejs \
&& ln -s /usr/local/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /usr/local/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
# Smoke tests
&& echo "node version: $(node --version)" \
&& echo " npm version: $(npm --version)" \
&& echo "yarn version: $(yarn --version)" \
\
# Clean up
&& add-apt-repository --remove -y ppa:git-core/ppa \
&& add-apt-repository --remove -y ppa:ubuntu-toolchain-r/test \
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/cache/apt/* \
/var/lib/apt/lists/*
ENTRYPOINT ["docker-entrypoint.sh"]
WORKDIR /
FROM compilers as main-arm64
ONBUILD ARG ADDITIONAL_GROUPS="--groups sudo,video"
FROM compilers as main-amd64
ONBUILD ARG LLDB_VERSION=17
ONBUILD ARG CLANGD_VERSION=17
ONBUILD ARG CLANG_FORMAT_VERSION=17
# Install dependencies and dev tools (llnode etc.)
ONBUILD RUN export DEBIAN_FRONTEND=noninteractive \
# Install LLVM apt sources
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - \
&& bash -c 'echo -e "\
deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${LLDB_VERSION} main\n\
deb-src http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${LLDB_VERSION} main\n\
" | tee /etc/apt/sources.list.d/llvm-${LLDB_VERSION}.list >/dev/null' \
&& bash -c 'echo -e "\
deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${CLANGD_VERSION} main\n\
deb-src http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${CLANGD_VERSION} main\n\
" | tee /etc/apt/sources.list.d/llvm-${CLANGD_VERSION}.list >/dev/null' \
&& bash -c 'echo -e "\
deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${CLANG_FORMAT_VERSION} main\n\
deb-src http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${CLANG_FORMAT_VERSION} main\n\
" | tee /etc/apt/sources.list.d/llvm-${CLANG_FORMAT_VERSION}.list >/dev/null' \
\
&& apt update \
&& apt install --no-install-recommends -y \
# lldb (for llnode)
lldb-${LLDB_VERSION} libllvm${LLDB_VERSION} \
# clangd for C++ intellisense and debugging
clangd-${CLANGD_VERSION} \
# clang-format for automatically formatting C++ and TS/JS
clang-format-${CLANG_FORMAT_VERSION} \
\
# Set alternatives for clangd
&& (update-alternatives --remove-all clangd >/dev/null 2>&1 || true) \
&& update-alternatives --install /usr/bin/clangd clangd /usr/bin/clangd-${CLANGD_VERSION} 100 \
# Set clangd-${CLANGD_VERSION} as the default clangd
&& update-alternatives --set clangd /usr/bin/clangd-${CLANGD_VERSION} \
# Set alternatives for clang-format
&& (update-alternatives --remove-all clang-format >/dev/null 2>&1 || true) \
&& update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-${CLANG_FORMAT_VERSION} 100 \
# Set clang-format-${CLANG_FORMAT_VERSION} as the default clang-format
&& update-alternatives --set clang-format /usr/bin/clang-format-${CLANG_FORMAT_VERSION} \
# Set alternatives for lldb and llvm-config so it's in the path for llnode
&& (update-alternatives --remove-all lldb >/dev/null 2>&1 || true) \
&& (update-alternatives --remove-all llvm-config >/dev/null 2>&1 || true) \
&& update-alternatives \
--install /usr/bin/lldb lldb /usr/bin/lldb-${LLDB_VERSION} 100 \
--slave /usr/bin/llvm-config llvm-config /usr/bin/llvm-config-${LLDB_VERSION} \
# Set lldb-${LLDB_VERSION} as the default lldb, llvm-config-${LLDB_VERSION} as default llvm-config
&& update-alternatives --set lldb /usr/bin/lldb-${LLDB_VERSION} \
\
# Globally install llnode
&& npm install --location global --unsafe-perm --no-audit --no-fund --no-update-notifier llnode \
&& echo "llnode: $(which -a llnode)" \
&& echo "llnode version: $(llnode --version)" \
\
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/cache/apt/* \
/var/lib/apt/lists/* \
/usr/local/lib/llnode \
/etc/apt/sources.list.d/llvm-${LLDB_VERSION}.list \
/etc/apt/sources.list.d/llvm-${CLANGD_VERSION}.list \
/etc/apt/sources.list.d/llvm-${CLANG_FORMAT_VERSION}.list
FROM main-${TARGETARCH}
ENV NVIDIA_DRIVER_CAPABILITIES all
ARG TARGETARCH
ARG ADDITIONAL_GROUPS
ARG UCX_VERSION=1.12.1
ARG FIXUID_VERSION=0.5.1
ARG NODE_WEBRTC_VERSION=0.4.7
# Install dependencies (llnode etc.)
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install --no-install-recommends -y \
jq entr ssh vim nano sudo less bash-completion ripgrep fzf \
# X11 dependencies
libxi-dev libxrandr-dev libxinerama-dev libxcursor-dev \
# node-canvas dependencies
libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev \
# GLFW Wayland dependencies
extra-cmake-modules libwayland-dev wayland-protocols libxkbcommon-dev \
# GLEW dependencies
build-essential libxmu-dev libgl1-mesa-dev libegl1-mesa-dev libglu1-mesa-dev \
# cuSpatial dependencies
libgdal-dev \
# SQL dependencies
maven openjdk-8-jdk-headless openjdk-8-jre-headless libboost-regex-dev libboost-system-dev libboost-filesystem-dev \
# UCX runtime dependencies
libibverbs-dev librdmacm-dev libnuma-dev \
\
# Install UCX
&& wget -O /var/cache/apt/archives/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb \
https://github.com/openucx/ucx/releases/download/v${UCX_VERSION}/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb \
&& dpkg -i /var/cache/apt/archives/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb || true && apt --fix-broken install -y \
\
# Install fixuid
&& curl -SsL "https://github.com/boxboat/fixuid/releases/download/v$FIXUID_VERSION/fixuid-$FIXUID_VERSION-linux-${TARGETARCH}.tar.gz" \
| tar -C /usr/bin -xzf - \
&& chown root:root /usr/bin/fixuid && chmod 4755 /usr/bin/fixuid && mkdir -p /etc/fixuid \
&& bash -c 'echo -e "\
user: rapids\n\
group: rapids\n\
paths:\n\
- /opt/rapids\n\
- /opt/rapids/node\n\
" | tee /etc/fixuid/config.yml >/dev/null' \
\
# Add a non-root user
&& useradd \
--uid 1000 --shell /bin/bash \
--user-group ${ADDITIONAL_GROUPS} \
--create-home --home-dir /opt/rapids \
rapids \
&& mkdir -p /opt/rapids/node/.cache \
&& mkdir -p -m 0700 /opt/rapids/.ssh \
\
# Add GitHub's public keys to known_hosts
&& curl -s https://api.github.com/meta | jq -r '.ssh_keys | map("github.com \(.)") | .[]' > /opt/rapids/.ssh/known_hosts \
&& cp /root/.npmrc /opt/rapids/.npmrc \
&& ln -s /opt/rapids/node/.vscode/server /opt/rapids/.vscode-server \
&& ln -s /opt/rapids/node/.vscode/server-insiders /opt/rapids/.vscode-server-insiders \
&& chown -R rapids:rapids /opt/rapids \
&& bash -c 'echo "rapids ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/nopasswd' \
\
# yellow + blue terminal prompt
&& sed -ri "s/32m/33m/g" /opt/rapids/.bashrc \
&& sed -ri "s/34m/36m/g" /opt/rapids/.bashrc \
# Persist infinite bash history on the host
&& bash -c 'echo -e "\
\n\
# Infinite bash history\n\
export HISTSIZE=-1;\n\
export HISTFILESIZE=-1;\n\
export HISTCONTROL=ignoreboth;\n\
\n\
# Change the file location because certain bash sessions truncate .bash_history file upon close.\n\
# http://superuser.com/questions/575479/bash-history-truncated-to-500-lines-on-each-login\n\
export HISTFILE=/opt/rapids/node/.cache/.eternal_bash_history;\n\
\n\
mkdir -p \$(dirname \$HISTFILE) && touch \$HISTFILE;\n\
mkdir -p /opt/rapids/node/.vscode/server{,-insiders}\n\
\n\
# flush commands to .bash_history immediately\n\
export PROMPT_COMMAND=\"history -a; \$PROMPT_COMMAND\";\n\
"' >> /opt/rapids/.bashrc \
\
# Add npm and yarn completions
&& mkdir -p /etc/bash_completion.d \
&& npm completion > /etc/bash_completion.d/npm \
&& curl -fsSL --compressed \
https://raw.githubusercontent.com/dsifford/yarn-completion/5bf2968493a7a76649606595cfca880a77e6ac0e/yarn-completion.bash \
| tee /etc/bash_completion.d/yarn >/dev/null \
\
# Install NVENC-enabled wrtc
&& wget -O /opt/rapids/wrtc-dev.tgz \
https://github.com/trxcllnt/node-webrtc-builds/releases/download/v${NODE_WEBRTC_VERSION}/wrtc-${NODE_WEBRTC_VERSION}-linux-${TARGETARCH}.tgz \
&& npm install --location=global --unsafe-perm --no-audit --no-fund --no-update-notifier /opt/rapids/wrtc-dev.tgz \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/cache/apt/* \
/var/lib/apt/lists/*
ENV NO_UPDATE_NOTIFIER=1
ENV RAPIDSAI_SKIP_DOWNLOAD=1
ENV npm_config_nodedir=/usr/local
ENV NODE_PATH=/usr/local/lib/node_modules
ENV NODE_OPTIONS="--experimental-vm-modules --trace-uncaught"
USER rapids
WORKDIR /opt/rapids/node
ENTRYPOINT ["fixuid", "-q", "docker-entrypoint.sh"]
CMD ["/bin/bash", "-l"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/devel/package.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
FROM ${FROM_IMAGE} as build
WORKDIR /opt/rapids/node
ENV NVIDIA_DRIVER_CAPABILITIES all
ARG CUDAARCHS=ALL
ARG PARALLEL_LEVEL
ARG NVCC_APPEND_FLAGS
ARG RAPIDS_VERSION
ARG SCCACHE_REGION
ARG SCCACHE_BUCKET
ARG SCCACHE_IDLE_TIMEOUT
RUN echo -e "build env:\n$(env)"
COPY --chown=rapids:rapids .npmrc /home/node/.npmrc
COPY --chown=rapids:rapids .npmrc .npmrc
COPY --chown=rapids:rapids .yarnrc .yarnrc
COPY --chown=rapids:rapids .eslintrc.js .eslintrc.js
COPY --chown=rapids:rapids LICENSE LICENSE
COPY --chown=rapids:rapids typedoc.js typedoc.js
COPY --chown=rapids:rapids lerna.json lerna.json
COPY --chown=rapids:rapids tsconfig.json tsconfig.json
COPY --chown=rapids:rapids package.json package.json
COPY --chown=rapids:rapids yarn.lock yarn.lock
COPY --chown=rapids:rapids scripts scripts
COPY --chown=rapids:rapids modules modules
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=ssh,uid=1000,gid=1000,required=true \
--mount=type=secret,id=sccache_credentials,uid=1000,gid=1000 \
--mount=type=bind,source=dev/.ssh,target=/opt/rapids/.ssh,rw \
--mount=type=bind,source=dev/.gitconfig,target=/opt/rapids/.gitconfig \
sudo chown -R $(id -u):$(id -g) /opt/rapids; \
if [ -f /run/secrets/sccache_credentials ]; then \
export $(grep -v '^#' /run/secrets/sccache_credentials | xargs -d '\n'); \
fi; \
# Add GitHub's public keys to known_hosts
if [ ! -f /opt/rapids/.ssh/known_hosts ]; then \
curl -s https://api.github.com/meta | jq -r '.ssh_keys | map("github.com \(.)") | .[]' > /opt/rapids/.ssh/known_hosts; \
fi; \
echo -e "build context:\n$(find .)" \
&& bash -c 'echo -e "\
CUDAARCHS=$CUDAARCHS\n\
PARALLEL_LEVEL=$PARALLEL_LEVEL\n\
NVCC_APPEND_FLAGS=$NVCC_APPEND_FLAGS\n\
RAPIDS_VERSION=$RAPIDS_VERSION\n\
SCCACHE_REGION=$SCCACHE_REGION\n\
SCCACHE_BUCKET=$SCCACHE_BUCKET\n\
SCCACHE_IDLE_TIMEOUT=$SCCACHE_IDLE_TIMEOUT\n\
" > .env' \
&& yarn --pure-lockfile --network-timeout 1000000 \
&& yarn build \
&& yarn dev:npm:pack \
&& chown rapids:rapids build/*.{tgz,tar.gz} \
&& mv build/*.tgz ../ && mv build/*.tar.gz ../
FROM alpine:latest
COPY --from=build /opt/rapids/*.tgz /opt/rapids/
COPY --from=build /opt/rapids/*.tar.gz /opt/rapids/
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/notebook.Dockerfile
|
ARG FROM_IMAGE
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
ARG TARGETARCH
ADD --chown=node:node \
https://raw.githubusercontent.com/n-riesco/ijavascript/8637a3e18b89270121f49733d03af0e3e6e0a17a/images/nodejs/js-green-32x32.png \
/home/node/.local/share/jupyter/kernels/javascript/logo-32x32.png
ADD --chown=node:node \
https://raw.githubusercontent.com/n-riesco/ijavascript/8637a3e18b89270121f49733d03af0e3e6e0a17a/images/nodejs/js-green-64x64.png \
/home/node/.local/share/jupyter/kernels/javascript/logo-64x64.png
ADD --chown=root:root \
https://github.com/jupyterlab/jupyterlab-desktop/releases/download/v3.3.2-1/JupyterLab-Setup-Debian.deb \
/tmp/JupyterLab-Setup-Debian.deb
USER root
# Manually install jupyter-kernelspec binary (for ijavascript)
RUN bash -c "echo -e '#!/usr/bin/python3\n\
import re\n\
import sys\n\
from jupyter_client.kernelspecapp import KernelSpecApp\n\
if __name__ == \"__main__\":\n\
sys.argv[0] = re.sub(r\"(-script\\.pyw?|\\.exe)?\$\", \"\", sys.argv[0])\n\
sys.exit(KernelSpecApp.launch_instance())\n\
' > /usr/bin/jupyter-kernelspec" \
&& chmod +x /usr/bin/jupyter-kernelspec \
# Install ijavascript kernel
&& bash -c "echo -e '{\n\
\"argv\": [\n\
\"ijskernel\",\n\
\"--hide-undefined\",\n\
\"{connection_file}\",\n\
\"--protocol=5.0\",\n\
\"--session-working-dir=/home/node\"\n\
],\n\
\"name\": \"javascript\",\n\
\"language\": \"javascript\",\n\
\"display_name\": \"Javascript (Node.js)\"\n\
}' > /home/node/.local/share/jupyter/kernels/javascript/kernel.json" \
&& chmod 0644 /home/node/.local/share/jupyter/kernels/javascript/logo-{32x32,64x64}.png \
&& mkdir -p /home/node/.config/jupyterlab-desktop/lab/user-settings/@jupyterlab/apputils-extension \
&& bash -c "echo -e '{\n\
\"theme\": \"JupyterLab Dark\"\n\
}' > /home/node/.config/jupyterlab-desktop/lab/user-settings/@jupyterlab/apputils-extension/themes.jupyterlab-settings" \
\
&& chown -R node:node /home/node/.{local,config} \
# Install Jupyter desktop
&& apt update \
&& DEBIAN_FRONTEND=noninteractive \
apt install -y --no-install-recommends \
build-essential libasound2 jupyter-notebook /tmp/JupyterLab-Setup-Debian.deb \
# Remove python3 kernelspec
&& jupyter kernelspec remove -f python3 \
# Install ijavascript
&& npm install --location=global --unsafe-perm --no-audit --no-fund --no-update-notifier ijavascript \
&& ijsinstall --install=global --spec-path=full \
\
# Clean up
&& apt remove -y build-essential \
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
COPY --chown=node:node modules/cudf/notebooks /home/node/cudf
COPY --chown=node:node modules/demo/umap/*.ipynb /home/node/cugraph/
COPY --chown=node:node modules/demo/graph/*.ipynb /home/node/cugraph/
USER node
WORKDIR /home/node
SHELL ["/bin/bash", "-l"]
CMD ["jlab"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/cugraph.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-cugraph-*.tgz; \
for x in cuda rmm cudf cugraph; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/demo.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/wrtc-dev.tgz \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-glfw-*.tgz \
/tmp/rapids/rapidsai-webgl-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-sql-*.tgz \
/tmp/rapids/rapidsai-cuml-*.tgz \
/tmp/rapids/rapidsai-cugraph-*.tgz \
/tmp/rapids/rapidsai-cuspatial-*.tgz \
/tmp/rapids/rapidsai-io-*.tgz \
/tmp/rapids/rapidsai-deck.gl-*.tgz \
/tmp/rapids/rapidsai-jsdom-*.tgz \
/tmp/rapids/rapidsai-demo-*.tgz; \
for x in cuda rmm cudf cuml cugraph cuspatial sql io; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done; \
tar -C node_modules/@rapidsai/sql/build/Release \
-f /tmp/rapids/rapidsai_sql-*.tar.gz \
--wildcards --strip-components=2 \
-x "*/blazingsql-*.jar" ;
FROM scratch as ucx-deb-amd64
ONBUILD ARG UCX_VERSION=1.12.1
ONBUILD ARG LINUX_VERSION=ubuntu20.04
ONBUILD ADD https://github.com/openucx/ucx/releases/download/v${UCX_VERSION}/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb /ucx.deb
FROM ucx-deb-${TARGETARCH} as ucx-deb
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
RUN --mount=type=bind,from=ucx-deb,target=/usr/src/ucx \
# Install dependencies
export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# cuSpatial dependencies
libgdal-dev \
# X11 dependencies
libxrandr2 libxinerama1 libxcursor1 \
# Wayland dependencies
wayland-protocols \
libwayland-{bin,egl1,cursor0,client0,server0} \
libxkbcommon0 libxkbcommon-x11-0 \
# GLEW dependencies
libglvnd0 libgl1 libglx0 libegl1 libgles2 libglu1-mesa \
# UCX runtime dependencies
libibverbs1 librdmacm1 libnuma1 numactl \
# node-canvas dependencies
libcairo2 libpango-1.0-0 libpangocairo-1.0-0 libjpeg8 libgif7 librsvg2-2 \
# SQL dependencies
openjdk-8-jre-headless libboost-regex-dev libboost-system-dev libboost-filesystem-dev \
# Install UCX
&& dpkg -i /usr/src/ucx/ucx.deb || true && apt install --fix-broken \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/cuml.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-cuml-*.tgz; \
for x in cuda rmm cudf cuml; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
# Install dependencies
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# cuML dependencies
libblas3 liblapack3 libgomp1 libgfortran5 libquadmath0 \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/main.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/wrtc-dev.tgz \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-glfw-*.tgz \
/tmp/rapids/rapidsai-webgl-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-sql-*.tgz \
/tmp/rapids/rapidsai-cuml-*.tgz \
/tmp/rapids/rapidsai-cugraph-*.tgz \
/tmp/rapids/rapidsai-cuspatial-*.tgz \
/tmp/rapids/rapidsai-io-*.tgz \
/tmp/rapids/rapidsai-deck.gl-*.tgz \
/tmp/rapids/rapidsai-jsdom-*.tgz; \
for x in cuda rmm cudf cuml cugraph cuspatial sql io; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done; \
tar -C node_modules/@rapidsai/sql/build/Release \
-f /tmp/rapids/rapidsai_sql-*.tar.gz \
--wildcards --strip-components=2 \
-x "*/blazingsql-*.jar" ;
FROM scratch as ucx-deb-amd64
ONBUILD ARG UCX_VERSION=1.12.1
ONBUILD ARG LINUX_VERSION=ubuntu20.04
ONBUILD ADD https://github.com/openucx/ucx/releases/download/v${UCX_VERSION}/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb /ucx.deb
FROM ucx-deb-${TARGETARCH} as ucx-deb
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
RUN --mount=type=bind,from=ucx-deb,target=/usr/src/ucx \
# Install dependencies
export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# cuSpatial dependencies
libgdal-dev \
# X11 dependencies
libxrandr2 libxinerama1 libxcursor1 \
# Wayland dependencies
wayland-protocols \
libwayland-{bin,egl1,cursor0,client0,server0} \
libxkbcommon0 libxkbcommon-x11-0 \
# GLEW dependencies
libglvnd0 libgl1 libglx0 libegl1 libgles2 libglu1-mesa \
# UCX runtime dependencies
libibverbs1 librdmacm1 libnuma1 numactl \
# node-canvas dependencies
libcairo2 libpango-1.0-0 libpangocairo-1.0-0 libjpeg8 libgif7 librsvg2-2 \
# SQL dependencies
openjdk-8-jre-headless libboost-regex-dev libboost-system-dev libboost-filesystem-dev \
# Install UCX
&& dpkg -i /usr/src/ucx/ucx.deb || true && apt install --fix-broken \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/sql.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-sql-*.tgz; \
for x in cuda rmm cudf sql; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done; \
tar -C node_modules/@rapidsai/sql/build/Release \
-f /tmp/rapids/rapidsai_sql-*.tar.gz \
--wildcards --strip-components=2 \
-x "*/blazingsql-*.jar" ;
FROM scratch as ucx-deb-amd64
ONBUILD ARG UCX_VERSION=1.12.1
ONBUILD ARG LINUX_VERSION=ubuntu20.04
ONBUILD ADD https://github.com/openucx/ucx/releases/download/v${UCX_VERSION}/ucx-v${UCX_VERSION}-${LINUX_VERSION}-mofed5-cuda11.deb /ucx.deb
FROM ucx-deb-${TARGETARCH} as ucx-deb
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
RUN --mount=type=bind,from=ucx-deb,target=/usr/src/ucx \
# Install dependencies
export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# UCX runtime dependencies
libibverbs1 librdmacm1 libnuma1 numactl \
# SQL dependencies
openjdk-8-jre-headless libboost-regex-dev libboost-system-dev libboost-filesystem-dev \
# Install UCX
&& dpkg -i /usr/src/ucx/ucx.deb || true && apt install --fix-broken \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/cuspatial.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz \
/tmp/rapids/rapidsai-cuspatial-*.tgz; \
for x in cuda rmm cudf cuspatial; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
# Install dependencies
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# cuSpatial dependencies
libgdal-dev \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/cudf.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
ENV RAPIDSAI_SKIP_DOWNLOAD=1
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-cuda-*.tgz \
/tmp/rapids/rapidsai-rmm-*.tgz \
/tmp/rapids/rapidsai-cudf-*.tgz; \
for x in cuda rmm cudf; do \
mkdir node_modules/@rapidsai/${x}/build/Release; \
tar -C node_modules/@rapidsai/${x}/build/Release \
-f /tmp/rapids/rapidsai_${x}-*-Linux.tar.gz \
--wildcards --strip-components=2 \
-x "**/lib/rapidsai_${x}.node" ; \
done
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/glfw.Dockerfile
|
# syntax=docker/dockerfile:1.3
ARG FROM_IMAGE
ARG BUILD_IMAGE
ARG DEVEL_IMAGE
FROM ${BUILD_IMAGE} as build
FROM ${DEVEL_IMAGE} as devel
WORKDIR /home/node
RUN --mount=type=bind,from=build,source=/opt/rapids/,target=/tmp/rapids/ \
npm install --omit=dev --omit=peer --omit=optional --legacy-peer-deps --force \
/tmp/rapids/wrtc-dev.tgz \
/tmp/rapids/rapidsai-core-*.tgz \
/tmp/rapids/rapidsai-glfw-*.tgz \
/tmp/rapids/rapidsai-webgl-*.tgz \
/tmp/rapids/rapidsai-jsdom-*.tgz ;
FROM ${FROM_IMAGE}
SHELL ["/bin/bash", "-c"]
USER root
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt update \
&& apt install -y --no-install-recommends \
# X11 dependencies
libxrandr2 libxinerama1 libxcursor1 \
# Wayland dependencies
wayland-protocols \
libwayland-{bin,egl1,cursor0,client0,server0} \
libxkbcommon0 libxkbcommon-x11-0 \
# GLEW dependencies
libglvnd0 libgl1 libglx0 libegl1 libgles2 libglu1-mesa \
# node-canvas dependencies
libcairo2 libpango-1.0-0 libpangocairo-1.0-0 libjpeg8 libgif7 librsvg2-2 \
# Clean up
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
USER node
WORKDIR /home/node
COPY --from=devel --chown=node:node /home/node/node_modules node_modules
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node/dev/dockerfiles
|
rapidsai_public_repos/node/dev/dockerfiles/runtime/base.Dockerfile
|
ARG AMD64_BASE
ARG ARM64_BASE
ARG DEVEL_IMAGE
FROM ${DEVEL_IMAGE} as devel
FROM ${AMD64_BASE} as base-amd64
FROM ${ARM64_BASE} as base-arm64
ONBUILD RUN cd /usr/local/cuda/lib64 \
&& ln -s \
libcudart.so.$(nvcc --version | head -n4 | tail -n1 | cut -d' ' -f5 | cut -d',' -f1) \
libcudart.so.$(nvcc --version | head -n4 | tail -n1 | cut -d' ' -f5 | cut -d',' -f1 | cut -d'.' -f1) \
&& ln -s \
libcudart.so.$(nvcc --version | head -n4 | tail -n1 | cut -d' ' -f5 | cut -d',' -f1 | cut -d'.' -f1) \
libcudart.so \
&& rm /etc/ld.so.cache && ldconfig
ONBUILD ARG ADDITIONAL_GROUPS="--groups video"
FROM base-${TARGETARCH}
SHELL ["/bin/bash", "-c"]
ENV NVIDIA_DRIVER_CAPABILITIES all
ENV CUDA_HOME="/usr/local/cuda"
ENV LD_LIBRARY_PATH="\
/usr/lib/aarch64-linux-gnu:\
/usr/lib/x86_64-linux-gnu:\
/usr/lib/i386-linux-gnu:\
${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}\
${CUDA_HOME}/lib64:\
${CUDA_HOME}/nvvm/lib64:\
${CUDA_HOME}/lib64/stubs"
ADD --chown=root:root https://gitlab.com/nvidia/container-images/opengl/-/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json /usr/share/glvnd/egl_vendor.d/10_nvidia.json
# Install gcc-9 toolchain
RUN export DEBIAN_FRONTEND=noninteractive \
# Workaround for https://forums.developer.nvidia.com/t/notice-cuda-linux-repository-key-rotation/212772
&& apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$( \
. /etc/os-release; echo $NAME$VERSION_ID | tr -d '.' | tr '[:upper:]' '[:lower:]' \
)/$(uname -p)/3bf863cc.pub \
\
&& apt update \
&& apt install --no-install-recommends -y \
software-properties-common \
&& add-apt-repository --no-update -y ppa:ubuntu-toolchain-r/test \
&& apt update \
&& apt install --no-install-recommends -y \
libstdc++6 \
# From opengl/glvnd:runtime
libxau6 libxdmcp6 libxcb1 libxext6 libx11-6 \
libglvnd0 libopengl0 libgl1 libglx0 libegl1 libgles2 \
\
&& chmod 0644 /usr/share/glvnd/egl_vendor.d/10_nvidia.json \
&& echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf \
&& echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf \
# Clean up
&& add-apt-repository --remove -y ppa:ubuntu-toolchain-r/test \
&& apt remove -y software-properties-common \
&& apt autoremove -y && apt clean \
&& rm -rf \
/tmp/* \
/var/tmp/* \
/var/lib/apt/lists/* \
/var/cache/apt/archives/*
# Install node
COPY --from=devel /usr/local/bin/node /usr/local/bin/node
COPY --from=devel /usr/local/include/node /usr/local/include/node
COPY --from=devel /usr/local/lib/node_modules /usr/local/lib/node_modules
# Install yarn
COPY --from=devel /usr/local/bin/yarn /usr/local/bin/yarn
COPY --from=devel /usr/local/bin/yarn.js /usr/local/bin/yarn.js
COPY --from=devel /usr/local/bin/yarn.cmd /usr/local/bin/yarn.cmd
COPY --from=devel /usr/local/bin/yarnpkg /usr/local/bin/yarnpkg
COPY --from=devel /usr/local/bin/yarnpkg.cmd /usr/local/bin/yarnpkg.cmd
COPY --from=devel /usr/local/lib/cli.js /usr/local/lib/cli.js
COPY --from=devel /usr/local/lib/v8-compile-cache.js /usr/local/lib/v8-compile-cache.js
# Copy entrypoint
COPY --from=devel /usr/local/bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
# Copy nvrtc libs
COPY --from=devel /usr/local/cuda/lib64/libnvrtc* /usr/local/cuda/lib64/
ARG UID=1000
ARG ADDITIONAL_GROUPS
RUN useradd --uid $UID --user-group ${ADDITIONAL_GROUPS} --shell /bin/bash --create-home node \
&& ln -s /usr/local/bin/node /usr/local/bin/nodejs \
&& ln -s /usr/local/lib/node_modules/npm/bin/npm-cli.js /usr/local/bin/npm \
&& ln -s /usr/local/lib/node_modules/npm/bin/npx-cli.js /usr/local/bin/npx \
# smoke tests
&& node --version && npm --version && yarn --version
ENV npm_config_fund=false
ENV npm_config_update_notifier=false
ENV NODE_OPTIONS="--experimental-vm-modules --trace-uncaught"
WORKDIR /home/node
ENTRYPOINT ["docker-entrypoint.sh"]
SHELL ["/bin/bash", "-l"]
CMD ["node"]
| 0 |
rapidsai_public_repos/node
|
rapidsai_public_repos/node/scripts/exec.js
|
#!/usr/bin/env node
try {
require('dotenv').config();
} catch (e) { }
var name = (() => {
switch (require('os').platform()) {
case 'win32': return 'win32.sh';
default: return 'linux.sh';
}
})();
var Path = require('path');
var rootdir = Path.join(__dirname, '../');
var cmdpath = Path.join(__dirname, process.argv[2]);
var cwd = Path.join(cmdpath, Path.relative(cmdpath, rootdir));
process.exitCode = require('child_process').spawnSync(
Path.join(cmdpath, name),
process.argv.slice(3),
{ stdio: 'inherit', cwd }
).status;
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/run/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
exec lerna run $@ --stream --include-dependencies --scope '@rapidsai/*'
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/npm-pack/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
rm -rf "$PWD/build"
mkdir -p "$PWD/build"
args="--stream --no-sort --parallel";
echo "running npm pack..."
lerna exec ${args} "npm pack --pack-destination $PWD/build \$PWD";
echo "running cpack..."
pkgs="$(lerna run ${args} --no-prefix --scope '@rapidsai/*' dev:cpack:enabled)";
args+=" $(for name in ${pkgs}; do echo "--scope $name"; done)";
lerna exec ${args} "\
cd build/Release \
&& cpack -G TGZ && rm -rf _CPack_Packages \
&& mv ./rapidsai_*-*-*.tar.gz \$LERNA_ROOT_PATH/build/"
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/demo/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
find node_modules -name .cache -type d -exec rm -rf "{}" +
fuzzy-find() {
(
for p in ${@}; do
path="${p#./}"; # remove leading ./ (if exists)
ext="${p##*.}"; # extract extension (if exists)
if [[ $ext == $p ]];
then echo $(find . -print0 | grep -FzZ $path | tr '\0' '\n');
else echo $(find . -name "*.$ext" -print0 | grep -FzZ $path | tr '\0' '\n');
fi;
done
)
}
DEMO=""
if [[ "$1" =~ "modules/demo" ]]; then
DEMO="$(fuzzy-find "$1/package.json" || echo '')";
DEMO="${DEMO%\/package.json}"
shift;
fi
if [[ "$DEMO" == "" ]]; then
DEMOS="
$(echo modules/demo/{graph,luma,spatial,xterm,client-server,umap,viz-app,deck}/package.json)
$(find modules/demo/{tfjs,ipc,ssr,sql} -maxdepth 2 -type f -name 'package.json')
";
DEMOS="$(echo -e "$DEMOS" | grep -v node_modules | sort -Vr)";
DEMOS=(${DEMOS});
DEMOS=("${DEMOS[@]/%\/package.json}")
echo "Please select a demo to run:"
select DEMO in "${DEMOS[@]}" "Quit"; do
if [[ $REPLY -lt $(( ${#DEMOS[@]}+1 )) ]]; then
break;
elif [[ $REPLY -eq $(( ${#DEMOS[@]}+1 )) ]]; then
exit 0;
else
echo "Invalid option, please select a demo (or quit)"
fi
done;
echo "Run this demo directly via:"
echo "\`yarn demo $DEMO${@:+ ${@:-}}\`"
fi
if [[ "$DEMO" = "modules/demo/deck" ]]; then
DEMOS="$(find modules/demo/deck -maxdepth 2 -type f -name 'package.json')"
DEMOS="$(echo -e "$DEMOS" | grep -v node_modules | sort -Vr)";
DEMOS=(${DEMOS});
DEMOS=("${DEMOS[@]/%\/package.json}")
echo "Please select a deck.gl demo to run:"
select DEMO in "${DEMOS[@]}" "Quit"; do
if [[ $REPLY -lt $(( ${#DEMOS[@]}+1 )) ]]; then
break;
elif [[ $REPLY -eq $(( ${#DEMOS[@]}+1 )) ]]; then
exit 0;
else
echo "Invalid option, please select a demo (or quit)"
fi
done;
echo "Run this demo directly via:"
echo "\`yarn demo $DEMO${@:+ ${@:-}}\`"
fi
ARGS="${@:-}";
if [[ "$DEMO" =~ "modules/demo/luma" && -z "$ARGS" ]]; then
DEMOS="$(find modules/demo/luma/lessons -type f -name 'package.json')"
DEMOS="$(echo -e "$DEMOS" | grep -v node_modules | sort -n)";
DEMOS=(${DEMOS});
DEMOS=("${DEMOS[@]/%\/package.json}")
DEMOS=("${DEMOS[@]/#modules\/demo\/luma\/lessons\/}")
echo "Please enter the luma lesson number to run (01 to 16)";
select ARGS in "${DEMOS[@]}" "Quit"; do
if [[ $REPLY -lt $(( ${#DEMOS[@]}+1 )) ]]; then
break;
elif [[ $REPLY -eq $(( ${#DEMOS[@]}+1 )) ]]; then
exit 0;
else
echo "Invalid option, please select a demo (or quit)"
fi
done;
echo "Run this demo directly via:"
echo "\`yarn demo modules/demo/luma $ARGS\`"
fi
ARGS="${@:-$ARGS}";
if [[ "$DEMO" =~ "modules/demo/ipc/umap" ]]; then ARGS="${@:-tcp://0.0.0.0:6000}";
fi
if [[ "$DEMO" =~ "modules/demo/client-server" ]]; then
NODE_ENV=${NODE_ENV:-production} \
NODE_NO_WARNINGS=${NODE_NO_WARNINGS:-1} \
exec npm --prefix="$DEMO" ${ARGS} start
elif [[ "$DEMO" =~ "modules/demo/deck/playground-ssr" ]]; then
NODE_ENV=${NODE_ENV:-production} \
NODE_NO_WARNINGS=${NODE_NO_WARNINGS:-1} \
exec npm --prefix="$DEMO" ${ARGS} start
else
NODE_ENV=${NODE_ENV:-production} \
NODE_NO_WARNINGS=${NODE_NO_WARNINGS:-1} \
exec npm --prefix="$DEMO" start -- ${ARGS}
# exec node --experimental-vm-modules --trace-uncaught "$DEMO" ${ARGS}
fi
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/postversion/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
lerna version \
--yes --no-push \
--ignore-scripts \
--force-publish="*" \
--no-git-tag-version \
${npm_package_version:-patch}
# Replace ^ with ~
find modules -type f -name 'package.json' -exec \
sed -i -E -e 's+(@rapidsai/.*)": "\^+\1": "~+g' {} \;
sed -i -E -e "s/$npm_old_version/$npm_package_version/g" USAGE.md
sed -i -E -e "s/$npm_old_version/$npm_package_version/g" docker-compose.devel.yml
sed -i -E -e "s/$npm_old_version/$npm_package_version/g" docker-compose.runtime.yml
sed -i -E -e "s/$npm_old_version/$npm_package_version/g" .github/workflows/release.yml
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/clean/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
set -Eeo pipefail
echo "cleaning node-rapids"
if [[ ! -d node_modules || ! -d node_modules/lerna || ! -d node_modules/rimraf ]]; then
yarn --silent --non-interactive --no-node-version-check --ignore-engines;
fi
# clean modules/*/build dirs
lerna run --no-bail clean || true;
lerna clean --loglevel error --yes || true;
rimraf yarn.lock node_modules doc compile_commands.json .cache/{binary,source}
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/relink-bin-dirs/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
TOP="$(pwd)"
BIN="$(realpath node_modules/.bin)"
DIRS=$(lerna exec --scope "@rapidsai/*" "echo \$PWD")
RAPIDS_CORE_PATH=$(lerna exec --scope "@rapidsai/core" "echo \$PWD" | head -n1)
# ensure the cache dirs exist (clangd index, etc.)
mkdir -p "$TOP"/.cache/{binary,clangd,source}
for DIR in $DIRS; do
# symlink node_modules/.bin dirs to the root node_modules/.bin
mkdir -p "$DIR/node_modules"
if [[ "$BIN" != $DIR/node_modules/.bin ]]; then
rm -rf "$DIR/node_modules/.bin"
ln -sf "$BIN" "$DIR/node_modules/.bin"
# copy the ESLint settings file (for the VSCode ESLint plugin)
# cp ".eslintrc.js" "$DIR/.eslintrc.js"
# remove the local .cache symlink
rm -rf "$DIR/.cache"
# symlink to the shared top-level .cache dir
ln -sf "$(realpath --relative-to="$DIR" "$TOP/.cache")" "$DIR/.cache"
# symlink to the shared .env settings file
touch ".env" && ln -sf "$(realpath --relative-to="$DIR" "$TOP/.env")" "$DIR/.env"
# symlink to the shared .clangd settings file
touch ".clangd" && ln -sf "$(realpath --relative-to="$DIR" "$TOP/.clangd")" "$DIR/.clangd"
# symlink to the shared .eslintrc.js settings file
touch ".eslintrc.js" && ln -sf "$(realpath --relative-to="$DIR" "$TOP/.eslintrc.js")" "$DIR/.eslintrc.js"
fi;
done
# use `which npm` because yarn prepends its own path to /tmp/yarn-XXX/node
NPM_BIN_PATH="${npm_node_execpath:-$(which npm)}"
NAPI_INCLUDE_DIR="$PWD/node_modules/node-addon-api"
NODE_INCLUDE_DIR="${NPM_BIN_PATH%/bin/npm}/include"
# symlink node headers
ln -sf "$NODE_INCLUDE_DIR/node/node_api.h" "$RAPIDS_CORE_PATH/include/node_api.h"
# symlink napi headers
ln -sf "$NAPI_INCLUDE_DIR/napi.h" "$RAPIDS_CORE_PATH/include/napi.h"
ln -sf "$NAPI_INCLUDE_DIR/napi-inl.h" "$RAPIDS_CORE_PATH/include/napi-inl.h"
ln -sf "$NAPI_INCLUDE_DIR/napi-inl.deprecated.h" "$RAPIDS_CORE_PATH/include/napi-inl.deprecated.h"
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/test/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
exec lerna run --no-bail --scope '@rapidsai/*' --stream --concurrency 1 test
| 0 |
rapidsai_public_repos/node/scripts
|
rapidsai_public_repos/node/scripts/lint/linux.sh
|
#!/usr/bin/env bash
set -Eeo pipefail
args="";
fix_="";
jobs="${JOBS:-${PARALLEL_LEVEL:-$(nproc --ignore=2)}}";
while [[ "$#" -gt 0 ]]; do
case $1 in
-j*)
J="${1#-j}";
if [[ ${J} =~ ^[[:digit:]]+$ ]]; then
jobs="${J}";
else
jobs="$(nproc --ignore=2)";
fi;;
--fix) fix_="$1";;
*) args="${args:+$args }$1";;
esac; shift;
done
tsc_files="";
cpp_files="";
cmd_input="$(tr ' ' '\n' <<< "$args")";
tsc_regex="^(\.\/)?modules\/\w+?\/(src|test)\/.*?\.ts$";
cpp_regex="^(\.\/)?modules\/\w+?\/(src|include)\/.*?\.(h|cc?|cuh?|(c|h)pp)$";
if test -n "$(head -n1 <<< "$cmd_input")"; then
tsc_files="$(grep -Eiox --color=never "$tsc_regex" <<< "$cmd_input" || echo "")";
cpp_files="$(grep -Eiox --color=never "$cpp_regex" <<< "$cmd_input" || echo "")";
else
tsc_files="$(find . -type f -regextype posix-extended -iregex "$tsc_regex" || echo "")";
cpp_files="$(find . -type f -regextype posix-extended -iregex "$cpp_regex" || echo "")";
fi
echo "Running clang-format...";
time \
xargs -d'\n' -t -n1 -I% -P$jobs \
<<< "$(echo -e "$cpp_files\n$tsc_files")" \
clang-format-17 -i %;
echo "";
echo "Running ESLint (on up to $jobs cores)...";
time \
xargs -d'\n' -n1 -I% -P$jobs \
<<< "$tsc_files" \
node_modules/.bin/eslint --ignore-path .gitignore $fix_ %;
echo "";
| 0 |
rapidsai_public_repos/node
|
rapidsai_public_repos/node/docs/develop-on-bare-metal.md
|
# Developing on bare-metal
This document describes how to build and test on a properly configured Ubuntu installation outside docker.
Note: Due to the complexity of installing, updating, and managing native dependencies, we recommend [using the devel containers](https://github.com/rapidsai/node/blob/main/DEVELOP.md) for day-to-day development.
## Quick links
* [Common tools and dependencies](#common-tools-and-dependencies)
* [Additional per-module dependencies](#additional-per-module-dependencies)
* [Command to install most native dependencies](#command-to-install-most-native-dependencies)
* [Troubleshooting](#troubleshooting)
## Common tools and dependencies
The following dependencies are necessary to build any of the `node-rapids` native modules:
* [CUDA Toolkit v11.0+ and compatible driver](https://developer.nvidia.com/cuda-downloads).
* [node, npm](https://github.com/nvm-sh/nvm#installing-and-updating), and [yarn](https://yarnpkg.com/getting-started/install).
* [CMake v3.20.2+](https://cmake.org/) (recommend either the [apt repository](https://apt.kitware.com/) or self-installing shell script).
* `gcc-9` toolchain (available in Ubuntu via the official toolchain PPA `ppa:ubuntu-toolchain-r/test`)
* ```txt
ninja-build sccache jq zlib1g-dev liblz4-dev clang-format-17 clangd-17 lldb-17
```
### Additional per-module dependencies
* `@rapidsai/cuspatial`
```txt
libgdal-dev
```
* `@rapidsai/sql`
```txt
maven openjdk-8-jdk libboost-regex-dev libboost-system-dev libboost-filesystem-dev
```
(`openjdk-11-jdk` also acceptable)
* [UCX v1.11.x](https://github.com/openucx/ucx.git)
```txt
libibverbs-dev librdmacm-dev libnuma-dev libhwloc-dev
```
* `node-canvas`, `@rapidsai/glfw`, `@rapidsai/webgl`
```txt
libxi-dev libxrandr-dev libxinerama-dev libxcursor-dev libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev extra-cmake-modules libwayland-dev wayland-protocols libxkbcommon-dev build-essential libxmu-dev libxi-dev libgl1-mesa-dev libegl1-mesa-dev libglu1-mesa-dev
```
## Command to install most native dependencies
We include a one-shot command for installing most C++ dependencies (in Ubuntu):
```bash
# Bootstrap a new dev environment -- only necessary to run once.
# Installs VSCode, C++ intellisense plugins, and system libraries.
# Checks whether individual components are already installed,
# and asks permission before installing new components.
yarn dev:install-cpp-dependencies
```
This script does not install GCC or the SQL module's dependencies. You should to install and manage those separately (via [`update-alternatives`](http://manpages.ubuntu.com/manpages/trusty/man8/update-alternatives.8.html) or similar). See [dev/dockerfiles/devel/main.Dockerfile](https://github.com/rapidsai/node/blob/main/dev/dockerfiles/devel/main.Dockerfile) for an example of installing gcc-9, the RDMA/Infiniband drivers, and building UCX.
## Troubleshooting
Some rememedies for potential error messages you may encounter.
* > unsupported GNU version! gcc versions later than 8 are not supported!
Install a [compatible CUDA host compiler](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#system-requirements) for your CUDA toolkit and OS versions.
* > No CMAKE_CUDA_COMPILER could be found.
This likely means your CUDA toolkit bin directory isn't in your environment's `$PATH`.
Run the following commands to append the CUDA toolkit bin directory to your path,
then reinitialize your current shell environment:
```bash
echo '
export CUDA_HOME="/usr/local/cuda"
export PATH="$PATH:$CUDA_HOME/bin"
' >> ~/.bashrc
source ~/.bashrc
```
* > ninja: error: loading 'build.ninja': No such file or directory
This means the CMake "configure" step did not succeed. You need to execute `yarn rebuild` from the top of the repo (to rebuild everything), or in the specific module that failed to configure.
| 0 |
rapidsai_public_repos/node/docs
|
rapidsai_public_repos/node/docs/docker/installation.md
|
# Installing docker, docker-compose, and the nvidia-container-runtime
## Quick links
* [Installing docker](#installing-docker)
* [Installing the nvidia-container-toolkit](#installing-the-nvidia-container-toolkit)
* [Installing docker-compose](#installing-docker-compose)
* [Using the nvidia-container-runtime with docker-compose](#using-the-nvidia-container-runtime-with-docker-compose)
## Installing docker
Follow the [official docker installation instructions](https://docs.docker.com/get-docker/) to install docker for your OS.
<details>
<summary>Click here to see Ubuntu 18.04+ docker-ce installation commands:</summary>
<pre>
# Install docker-ce in one command. Adds your current user to the docker user group.<br/>
release=$(lsb_release -cs) \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - \
&& sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $release stable" \
&& sudo apt install -y docker-ce \
&& sudo usermod -aG docker $USER
</pre>
</details>
## Installing the nvidia-container-toolkit
Follow the [official nvidia-container-toolkit installation instructions](https://github.com/NVIDIA/nvidia-docker#quickstart) to install the nvidia-container-toolkit for your OS.
<details>
<summary>Click here to see Ubuntu 18.04+ nvidia-container-toolkit installation commands:</summary>
<pre>
# Add nvidia-container-toolkit apt package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list \
| sudo tee /etc/apt/sources.list.d/nvidia-docker.list<br/>
# Install the nvidia-container-toolkit
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit<br/>
# Restart the docker service to make the nvidia-container-toolkit available
sudo systemctl restart docker
</pre>
</details>
## Installing docker-compose
Follow the [official docker-compose installation instructions](https://docs.docker.com/compose/install/) to install docker-compose v1.28.5+ for your OS.
<details>
<summary>Click here to see Ubuntu 18.04+ docker-compose installation commands:</summary>
<pre>
# Install docker-compose v1.28.5, or select any newer release in https://github.com/docker/compose/releases
DOCKER_COMPOSE_VERSION=1.28.5<br/>
sudo curl \
-L https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-`uname -s`-`uname -m` \
-o /usr/local/bin/docker-compose && sudo chmod +x /usr/local/bin/docker-compose
</pre>
</details>
Note: If you installed docker-compose v2+, also install [compose-switch](https://github.com/docker/compose-switch).
### Using the nvidia-container-runtime with docker-compose before v1.28.5
Prior to docker-compose v1.28.5, using the nvidia-container-runtime with docker-compose [requires](https://github.com/docker/compose/issues/6691) `nvidia-container-runtime` is set as the default docker runtime. To do this, you will need to create or edit the `/etc/docker/daemon.json` file and update the "default-runtime" and "runtimes" settings.
<details>
<summary>Click here to see an example daemon.json:</summary>
<pre>
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "nvidia-container-runtime",
"runtimeArgs": []
}
}
}
</pre>
</details>
If you created or edited the `/etc/docker/daemon.json` file, restart the docker service so the new settings are applied:
```bash
sudo systemctl restart docker
```
If you're unsure whether the changes you made were successful, you can run a quick test to verify `nvidia-container-runtime` is the default docker runtime.
<details>
<summary>Click here to see a successful test of whether NVIDIA devices are available in a docker container:</summary>
<pre>
docker run --rm -it nvidia/cuda nvidia-smi<br/>
> Fri Jul 31 20:39:59 2020
> +-----------------------------------------------------------------------------+
> | NVIDIA-SMI 450.51.06 Driver Version: 450.51.06 CUDA Version: 11.0 |
> |-------------------------------+----------------------+----------------------+
> | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
> | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
> | | | MIG M. |
> |===============================+======================+======================|
> | 0 Quadro RTX 8000 On | 00000000:15:00.0 On | Off |
> | 33% 46C P8 35W / 260W | 1453MiB / 48584MiB | 1% Default |
> | | | N/A |
> +-------------------------------+----------------------+----------------------+
> | 1 Quadro RTX 8000 On | 00000000:99:00.0 Off | Off |
> | 33% 34C P8 14W / 260W | 6MiB / 48601MiB | 0% Default |
> | | | N/A |
> +-------------------------------+----------------------+----------------------+
>
> +-----------------------------------------------------------------------------+
> | Processes: |
> | GPU GI CI PID Type Process name GPU Memory |
> | ID ID Usage |
> |=============================================================================|
> +-----------------------------------------------------------------------------+
</pre>
</details>
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/.pre-commit-config.yaml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 'v4.4.0'
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: check-builtin-literals
- id: check-executables-have-shebangs
- id: check-json
- id: check-yaml
- id: debug-statements
- id: requirements-txt-fixer
- repo: https://github.com/asottile/pyupgrade
rev: 'v3.3.1'
hooks:
- id: pyupgrade
args:
- --py38-plus
- repo: https://github.com/PyCQA/isort
rev: '5.12.0'
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: '23.1.0'
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
rev: '6.0.0'
hooks:
- id: flake8
args:
- --show-source
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/.pre-commit-hooks.yaml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/pyproject.toml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "rapids-pre-commit-hooks"
version = "0.0.0"
authors = [
{ name = "RAPIDS Development Team", email = "[email protected]" }
]
urls = { homepage = "https://github.com/rapidsai/pre-commit-hooks" }
description = "pre-commit hooks for RAPIDS"
readme = { file = "README.md", content-type = "text/markdown" }
license = { file = "LICENSE" }
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
]
requires-python = ">=3.8"
[tool.setuptools]
packages = { "find" = { where = ["src"] } }
[tool.isort]
profile = "black"
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/.flake8
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[flake8]
max-line-length = 88
select = E,F,W
ignore = E123,E126,E203,E226,E241,E704,W503,W504
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/README.md
|
# pre-commit-hooks
This repository contains [pre-commit](https://pre-commit.com) hooks used by RAPIDS projects.
## Using hooks
Copy the following into your repository's `.pre-commit-config.yaml`:
```yaml
- repo: https://github.com/rapidsai/pre-commit-hooks
rev: v0.0.1 # Use the ref you want to point at
hooks:
- id: copyright-checker # Hook names
```
## Included hooks
All hooks are listed in `.pre-commit-hooks.yaml`.
- (No hooks exist yet)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/pre-commit-hooks/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/pre-commit-hooks/src
|
rapidsai_public_repos/pre-commit-hooks/src/rapids_pre_commit_hooks/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/rapids_triton_pca_example/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.21 FATAL_ERROR)
##############################################################################
# - Target names -------------------------------------------------------------
set(BACKEND_NAME "rapids_pca")
set(BACKEND_TARGET "triton_${BACKEND_NAME}")
##############################################################################
# - Prepare rapids-cmake -----------------------------------------------------
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-21.10/RAPIDS.cmake
${CMAKE_BINARY_DIR}/RAPIDS.cmake)
include(${CMAKE_BINARY_DIR}/RAPIDS.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
rapids_cuda_init_architectures(RAPIDS_TRITON_BACKEND)
project(RAPIDS_TRITON_BACKEND VERSION 21.10.00 LANGUAGES CXX CUDA)
##############################################################################
# - build type ---------------------------------------------------------------
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# this is needed for clang-tidy runs
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
list(APPEND RAPIDS_TRITON_BACKEND_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
##############################################################################
# - User Options ------------------------------------------------------------
option(BUILD_BACKEND_TESTS "Build RAPIDS_TRITON_BACKEND unit-tests" ON)
option(CUDA_ENABLE_KERNEL_INFO "Enable kernel resource usage info" OFF)
option(CUDA_ENABLE_LINE_INFO "Enable lineinfo in nvcc" OFF)
option(DETECT_CONDA_ENV "Enable detection of conda environment for dependencies" ON)
option(DISABLE_DEPRECATION_WARNINGS "Disable depreaction warnings " ON)
option(NVTX "Enable nvtx markers" OFF)
message(VERBOSE "RAPIDS_TRITON_BACKEND: Enabling detection of conda environment for dependencies: ${DETECT_CONDA_ENV}")
message(VERBOSE "RAPIDS_TRITON_BACKEND: Enabling kernelinfo in nvcc: ${CUDA_ENABLE_KERNEL_INFO}")
message(VERBOSE "RAPIDS_TRITON_BACKEND: Enabling lineinfo in nvcc: ${CUDA_ENABLE_LINE_INFO}")
message(VERBOSE "RAPIDS_TRITON_BACKEND: Enabling nvtx markers: ${NVTX}")
message(VERBOSE "RAPIDS_TRITON_BACKEND: Build RAPIDS_TRITON_BACKEND unit-tests: ${BUILD_TESTS}")
# Set RMM logging level
set(RMM_LOGGING_LEVEL "INFO" CACHE STRING "Choose the logging level.")
set_property(CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF")
message(VERBOSE "RAPIDS_TRITON_BACKEND: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")
##############################################################################
# - Conda environment detection ----------------------------------------------
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env( conda_env MODIFY_PREFIX_PATH )
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(STATUS "RAPIDS_TRITON_BACKEND: No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}")
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
##############################################################################
# - compiler options ---------------------------------------------------------
# * find CUDAToolkit package
# * determine GPU architectures
# * enable the CMake CUDA language
# * set other CUDA compilation flags
rapids_find_package(CUDAToolkit REQUIRED
BUILD_EXPORT_SET ${BACKEND_TARGET}-exports
INSTALL_EXPORT_SET ${BACKEND_TARGET}-exports
)
include(cmake/modules/ConfigureCUDA.cmake)
##############################################################################
# - Requirements -------------------------------------------------------------
# add third party dependencies using CPM
rapids_cpm_init()
include(cmake/thirdparty/get_rapids-triton.cmake)
include(cmake/thirdparty/get_raft.cmake)
if(BUILD_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
endif()
##############################################################################
# - install targets-----------------------------------------------------------
add_library(
${BACKEND_TARGET} SHARED
src/gpu_infer.cu
src/api.cc
)
set_target_properties(${BACKEND_TARGET}
PROPERTIES BUILD_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${BACKEND_TARGET}
PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${RAPIDS_TRITON_BACKEND_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${RAPIDS_TRITON_BACKEND_CUDA_FLAGS}>"
)
target_include_directories(${BACKEND_TARGET}
PRIVATE "$<BUILD_INTERFACE:${RAPIDS_TRITON_BACKEND_SOURCE_DIR}/include>"
"${CMAKE_CURRENT_SOURCE_DIR}/src"
)
target_link_libraries(${BACKEND_TARGET}
PRIVATE
rapids_triton::rapids_triton
triton-core-serverstub
triton-backend-utils
"${TRITONSERVER_LIB}"
$<TARGET_NAME_IF_EXISTS:conda_env>
raft::raft
)
install(
TARGETS ${BACKEND_TARGET}
LIBRARY DESTINATION /opt/tritonserver/backends/${BACKEND_NAME}
)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/rapids_triton_pca_example/README.md
|
<!--
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
[](https://opensource.org/licenses/Apache-2.0)
# The RAPIDS-Triton Backend Template
This template repo offers a starting place for those wishing to create a Triton
backend with
[RAPIDS-Triton](https://github.com/rapidsai/rapids-triton). For an example of
how to use this template with detailed commentary, check out the [Linear
Example](https://github.com/rapidsai/rapids-triton-linear-example) repo.
Throughout the repo, you will find comments labeled `TODO(template)`,
indicating places where you will need to insert your own code or make changes.
Working through each of these should allow you to create a working Triton
backend.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/rapids_triton_pca_example/Dockerfile
|
###########################################################################################
# Arguments for controlling build details
###########################################################################################
# Version of Triton to use
ARG TRITON_VERSION=21.08
# Base container image
ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:${TRITON_VERSION}-py3
# Whether or not to build indicated components
ARG BUILD_TESTS=OFF
ARG BUILD_EXAMPLE=ON
FROM ${BASE_IMAGE} as base
ENV PATH="/root/miniconda3/bin:${PATH}"
RUN apt-get update \
&& apt-get install --no-install-recommends -y wget patchelf \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ENV PYTHONDONTWRITEBYTECODE=true
RUN wget \
https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
&& mkdir /root/.conda \
&& bash Miniconda3-latest-Linux-x86_64.sh -b \
&& rm -f Miniconda3-latest-Linux-x86_64.sh
COPY ./conda/environments/rapids_triton_dev_cuda11.4.yml /environment.yml
RUN conda env update -f /environment.yml \
&& rm /environment.yml \
&& conda clean -afy \
&& find /root/miniconda3/ -follow -type f -name '*.pyc' -delete \
&& find /root/miniconda3/ -follow -type f -name '*.js.map' -delete
ENV PYTHONDONTWRITEBYTECODE=false
RUN mkdir /rapids_triton
COPY ./src /rapids_triton/src
COPY ./CMakeLists.txt /rapids_triton
COPY ./cmake /rapids_triton/cmake
WORKDIR /rapids_triton
SHELL ["conda", "run", "--no-capture-output", "-n", "rapids_triton_dev", "/bin/bash", "-c"]
FROM base as build-stage
ARG TRITON_VERSION
ENV TRITON_VERSION=$TRITON_VERSION
ARG BUILD_TYPE=Release
ENV BUILD_TYPE=$BUILD_TYPE
ARG BUILD_TESTS
ENV BUILD_TESTS=$BUILD_TESTS
ARG BUILD_EXAMPLE
ENV BUILD_EXAMPLE=$BUILD_EXAMPLE
RUN mkdir /rapids_triton/build /rapids_triton/install
WORKDIR /rapids_triton/build
RUN cmake \
-GNinja \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
-DBUILD_TESTS="${BUILD_TESTS}" \
-DCMAKE_INSTALL_PREFIX=/rapids_triton/install \
-DTRITON_COMMON_REPO_TAG="r${TRITON_VERSION}" \
-DTRITON_CORE_REPO_TAG="r${TRITON_VERSION}" \
-DTRITON_BACKEND_REPO_TAG="r${TRITON_VERSION}" \
..
RUN ninja install
FROM ${BASE_IMAGE}
ARG BACKEND_NAME
ENV BACKEND_NAME=$BACKEND_NAME
RUN mkdir /models
# Remove existing backend install
RUN if [ -d /opt/tritonserver/backends/${BACKEND_NAME} ]; \
then \
rm -rf /opt/tritonserver/backends/${BACKEND_NAME}/*; \
fi
# TODO(template): If linking against any shared libraries, copy them over as
# well
COPY --from=build-stage \
/opt/tritonserver/backends/$BACKEND_NAME \
/opt/tritonserver/backends/$BACKEND_NAME
ENTRYPOINT ["tritonserver", "--model-repository=/models"]
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/conda
|
rapidsai_public_repos/rapids_triton_pca_example/conda/environments/rapids_triton_dev_cuda11.4.yml
|
---
name: rapids_triton_dev
channels:
- nvidia
- conda-forge
dependencies:
- cmake>=3.21
- cudatoolkit=11.4
- ninja
- rapidjson
# TODO(template): Add any build dependencies for your backend
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/conda
|
rapidsai_public_repos/rapids_triton_pca_example/conda/environments/rapids_triton_test.yml
|
---
name: rapids_triton_test
channels:
- conda-forge
dependencies:
- flake8
- pip
- python
- pytest
- numpy
- pip:
- nvidia-pyindex
# TODO(template): Add any test dependencies for your tests here
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/cmake
|
rapidsai_public_repos/rapids_triton_pca_example/cmake/modules/ConfigureCUDA.cmake
|
#=============================================================================
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
if(DISABLE_DEPRECATION_WARNINGS)
list(APPEND RAPIDS_TRITON_CXX_FLAGS -Wno-deprecated-declarations)
list(APPEND RAPIDS_TRITON_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations)
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND RAPIDS_TRITON_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations)
endif()
list(APPEND RAPIDS_TRITON_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
# set warnings as errors
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND RAPIDS_TRITON_CUDA_FLAGS -Werror=all-warnings)
endif()
list(APPEND RAPIDS_TRITON_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
# Option to enable line info in CUDA device compilation to allow introspection when profiling / memchecking
if(CUDA_ENABLE_LINEINFO)
list(APPEND RAPIDS_TRITON_CUDA_FLAGS -lineinfo)
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(VERBOSE "RAPIDS_TRITON: Building with debugging flags")
list(APPEND RAPIDS_TRITON_CUDA_FLAGS -G -Xcompiler=-rdynamic)
endif()
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/cmake
|
rapidsai_public_repos/rapids_triton_pca_example/cmake/thirdparty/get_raft.cmake
|
#=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_raft)
set(oneValueArgs VERSION FORK PINNED_TAG)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
if(DEFINED CPM_raft_SOURCE OR NOT DISABLE_FORCE_CLONE_RAFT)
set(CPM_DL_ALL_CACHE ${CPM_DOWNLOAD_ALL})
set(CPM_DOWNLOAD_ALL ON)
endif()
rapids_cpm_find(raft ${PKG_VERSION}
GLOBAL_TARGETS raft::raft
BUILD_EXPORT_SET ${BACKEND_TARGET}-exports
INSTALL_EXPORT_SET ${BACKEND_TARGET}-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/raft.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
)
if(raft_ADDED)
message(VERBOSE "RAPIDS_TRITON_BACKEND: Using RAFT located in ${raft_SOURCE_DIR}")
else()
message(VERBOSE "RAPIDS_TRITON_BACKEND: Using RAFT located in ${raft_DIR}")
endif()
if(DEFINED CPM_raft_SOURCE OR NOT DISABLE_FORCE_CLONE_RAFT)
set(CPM_DOWNLOAD_ALL ${CPM_DL_ALL_CACHE})
endif()
endfunction()
set(RAFT_MIN_VERSION "21.10.00")
set(RAFT_BRANCH_VERSION "21.10")
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_raft(VERSION ${RAFT_MIN_VERSION}
FORK rapidsai
PINNED_TAG branch-${RAFT_BRANCH_VERSION}
)
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/cmake
|
rapidsai_public_repos/rapids_triton_pca_example/cmake/thirdparty/get_gtest.cmake
|
#=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_gtest)
include(${rapids-cmake-dir}/cpm/gtest.cmake)
rapids_cpm_gtest()
endfunction()
find_and_configure_gtest()
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/cmake
|
rapidsai_public_repos/rapids_triton_pca_example/cmake/thirdparty/get_rapids-triton.cmake
|
#=============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
function(find_and_configure_rapids_triton)
set(oneValueArgs VERSION FORK PINNED_TAG)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN} )
rapids_cpm_find(rapids_triton ${PKG_VERSION}
GLOBAL_TARGETS rapids_triton::rapids_triton
BUILD_EXPORT_SET ${BACKEND_TARGET}-exports
INSTALL_EXPORT_SET ${BACKEND_TARGET}-exports
CPM_ARGS
GIT_REPOSITORY https://github.com/${PKG_FORK}/rapids-triton.git
GIT_TAG ${PKG_PINNED_TAG}
SOURCE_SUBDIR cpp
OPTIONS
"BUILD_TESTS OFF"
"BUILD_EXAMPLE OFF"
)
message(VERBOSE "${BACKEND_TARGET}: Using RAPIDS-Triton located in ${rapids_triton_SOURCE_DIR}")
endfunction()
# Change pinned tag here to test a commit in CI
# To use a different RAFT locally, set the CMake variable
# CPM_raft_SOURCE=/path/to/local/raft
find_and_configure_rapids_triton(VERSION 21.10
FORK rapidsai
PINNED_TAG branch-21.10
)
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/gpu_infer.h
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <names.h>
#include <cstddef>
#include <rapids_triton/batch/batch.hpp>
#include <rapids_triton/tensor/tensor.hpp>
namespace triton {
namespace backend {
namespace NAMESPACE {
void gpu_infer(const float* X_input, float* X_transformed, const float* mu, const float* components, float* X_workplace,
std::size_t n_components, std::size_t n_cols, std::size_t n_rows, cudaStream_t stream);
}
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/api.cc
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <model.h>
#include <names.h>
#include <shared_state.h>
#include <stdint.h>
#include <triton/backend/backend_common.h>
#include <triton/backend/backend_model.h>
#include <triton/backend/backend_model_instance.h>
#include <rapids_triton/triton/api/execute.hpp>
#include <rapids_triton/triton/api/initialize.hpp>
#include <rapids_triton/triton/api/instance_finalize.hpp>
#include <rapids_triton/triton/api/instance_initialize.hpp>
#include <rapids_triton/triton/api/model_finalize.hpp>
#include <rapids_triton/triton/api/model_initialize.hpp>
#include <rapids_triton/triton/model_instance_state.hpp>
#include <rapids_triton/triton/model_state.hpp>
namespace triton {
namespace backend {
namespace NAMESPACE {
using ModelState = rapids::TritonModelState<RapidsSharedState>;
using ModelInstanceState =
rapids::ModelInstanceState<RapidsModel, RapidsSharedState>;
extern "C" {
/** Confirm that backend is compatible with Triton's backend API version
*/
TRITONSERVER_Error* TRITONBACKEND_Initialize(TRITONBACKEND_Backend* backend) {
return rapids::triton_api::initialize(backend);
}
TRITONSERVER_Error* TRITONBACKEND_ModelInitialize(TRITONBACKEND_Model* model) {
return rapids::triton_api::model_initialize<ModelState>(model);
}
TRITONSERVER_Error* TRITONBACKEND_ModelFinalize(TRITONBACKEND_Model* model) {
return rapids::triton_api::model_finalize<ModelState>(model);
}
TRITONSERVER_Error* TRITONBACKEND_ModelInstanceInitialize(
TRITONBACKEND_ModelInstance* instance) {
return rapids::triton_api::instance_initialize<ModelState,
ModelInstanceState>(instance);
}
TRITONSERVER_Error* TRITONBACKEND_ModelInstanceFinalize(
TRITONBACKEND_ModelInstance* instance) {
return rapids::triton_api::instance_finalize<ModelInstanceState>(instance);
}
TRITONSERVER_Error* TRITONBACKEND_ModelInstanceExecute(
TRITONBACKEND_ModelInstance* instance, TRITONBACKEND_Request** raw_requests,
uint32_t const request_count) {
return rapids::triton_api::execute<ModelState, ModelInstanceState>(
instance, raw_requests, static_cast<std::size_t>(request_count));
}
} // extern "C"
} // namespace NAMESPACE
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/shared_state.h
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <names.h>
#include <memory>
#include <rapids_triton/model/shared_state.hpp>
#include <rapids_triton/triton/logging.hpp>
namespace triton {
namespace backend {
namespace NAMESPACE {
struct RapidsSharedState : rapids::SharedModelState {
RapidsSharedState(std::unique_ptr<common::TritonJson::Value>&& config)
: rapids::SharedModelState{std::move(config)} {}
void load() {
n_components = get_config_param<std::size_t>("n_components");
n_cols = get_config_param<std::size_t>("n_cols");
}
void unload() {
rapids::log_info(__FILE__, __LINE__) << "Unloading shared state...";
}
std::size_t n_cols = 0;
std::size_t n_components = 0;
};
} // namespace NAMESPACE
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/gpu_infer.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <names.h>
#include <shared_state.h>
#include <gpu_infer.h>
#include <cstddef>
#include <raft/handle.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/transpose.h>
#include <raft/stats/mean_center.cuh>
#include <rapids_triton/batch/batch.hpp>
#include <rapids_triton/tensor/tensor.hpp>
namespace triton { namespace backend { namespace NAMESPACE {
void gpu_infer(const float* X_input, float* X_transformed, const float* mu, const float* components, float* X_workplace,
std::size_t n_components, std::size_t n_cols, std::size_t n_rows, cudaStream_t stream) {
raft::stats::meanCenter(X_workplace, X_input, mu, n_cols, n_rows, true, true, stream);
float alpha = 1;
float beta = 0;
auto handle = raft::handle_t(1);
handle.set_stream(stream);
raft::linalg::gemm(handle,
X_workplace,
static_cast<int>(n_cols),
static_cast<int>(n_rows),
components,
X_transformed,
static_cast<int>(n_rows),
static_cast<int>(n_components),
CUBLAS_OP_T,
CUBLAS_OP_T,
alpha,
beta,
stream);
raft::linalg::transpose(handle, X_transformed, X_workplace,
static_cast<int>(n_rows), static_cast<int>(n_components), stream);
}
}}}
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/model.h
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <names.h>
#include <shared_state.h>
#include <gpu_infer.h>
#include <cstddef>
#include <filesystem>
#include <fstream>
#include <rapids_triton/batch/batch.hpp> // rapids::Batch
#include <rapids_triton/model/model.hpp> // rapids::Model
#include <rapids_triton/triton/deployment.hpp> // rapids::DeploymentType
#include <rapids_triton/triton/device.hpp> // rapids::device_id_t
#include <rapids_triton/triton/logging.hpp>
#include <rapids_triton/memory/buffer.hpp>
namespace triton {
namespace backend {
namespace NAMESPACE {
struct RapidsModel : rapids::Model<RapidsSharedState> {
RapidsModel(std::shared_ptr<RapidsSharedState> shared_state,
rapids::device_id_t device_id, cudaStream_t default_stream,
rapids::DeploymentType deployment_type,
std::string const& filepath)
: rapids::Model<RapidsSharedState>(shared_state, device_id,
default_stream, deployment_type,
filepath) {}
void cpu_infer(const float* X_input, float* X_transformed, const float* mu, const float* components, float* X_workplace,
std::size_t n_components, std::size_t n_cols, std::size_t n_rows) const
{
// Mean center
for (std::size_t i = 0; i < n_rows; ++i) {
for (std::size_t j = 0; j < n_cols; ++j) {
X_workplace[i * n_cols + j] = X_input[i * n_cols + j] - mu[j];
}
}
// Dot product
for (std::size_t i = 0; i < n_rows; i++)
for (std::size_t j = 0; j < n_cols; j++)
for (std::size_t k = 0; k < n_components; k++)
X_transformed[i * n_components + k] += \
X_workplace[i * n_cols + j] * components[j * n_components + k];
}
void predict(rapids::Batch& batch) const {
auto X_input = get_input<float>(batch, "X_input");
auto X_transformed = get_output<float>(batch, "X_transformed");
auto n_components = get_shared_state()->n_components;
auto n_cols = get_shared_state()->n_cols;
auto n_rows = X_input.shape()[0];
auto memory_type = X_input.mem_type();
auto X_workplace = rapids::Buffer<float>(n_cols * n_rows, memory_type, get_device_id(), get_stream());
if (memory_type == rapids::DeviceMemory) {
gpu_infer(X_input.data(), X_transformed.data(), mu.data(), components.data(), X_workplace.data(),
n_components, n_cols, n_rows, get_stream());
rapids::copy(X_transformed.buffer(), X_workplace, 0, X_transformed.size());
}
else {
cpu_infer(X_input.data(), X_transformed.data(), mu.data(), components.data(), X_workplace.data(),
n_components, n_cols, n_rows);
}
X_transformed.finalize();
}
auto load_file(const std::string& file_path, std::size_t expected_size, const rapids::MemoryType& memory_type) {
std::ifstream data_file(file_path, std::ios::binary);
std::vector<unsigned char> data_vector(std::istreambuf_iterator<char>(data_file), {});
if (data_vector.size() != expected_size) {
throw "Invalid size. Expected " + std::to_string(expected_size) + " but got " + std::to_string(data_vector.size());
}
auto result = rapids::Buffer<float>(data_vector.size() / sizeof (float), memory_type, get_device_id());
rapids::copy(result, rapids::Buffer<float>(reinterpret_cast<float*>(data_vector.data()),
data_vector.size() / sizeof (float),
rapids::HostMemory));
return result;
}
void load() {
rapids::log_info(__FILE__, __LINE__) << "Starting loading ...";
auto n_components = get_shared_state()->n_components;
auto n_cols = get_shared_state()->n_cols;
auto memory_type = rapids::MemoryType{};
if (get_deployment_type() == rapids::GPUDeployment) {
memory_type = rapids::DeviceMemory;
} else {
memory_type = rapids::HostMemory;
}
auto path = std::filesystem::path(get_filepath());
/* If the config file does not specify a filepath for the model,
* get_filepath returns the directory where the serialized model should be
* found. It is generally good practice to provide logic to allow the use
* of a default filename so that model configurations do not always have to
* specify a path to their model */
if (!std::filesystem::is_directory(path)) {
throw std::exception();
}
rapids::log_info(__FILE__, __LINE__) << "Loading components vector";
components = load_file(get_filepath() + "/components.bin",
n_components * n_cols * sizeof(float),
memory_type);
rapids::log_info(__FILE__, __LINE__) << "Loading mu vector";
mu = load_file(get_filepath() + "/mu.bin",
n_cols * sizeof(float),
memory_type);
}
void unload() {}
private:
rapids::Buffer<float> components{};
rapids::Buffer<float> mu{};
};
} // namespace NAMESPACE
} // namespace backend
} // namespace triton
| 0 |
rapidsai_public_repos/rapids_triton_pca_example
|
rapidsai_public_repos/rapids_triton_pca_example/src/names.h
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define NAMESPACE rapids_pca
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/qa
|
rapidsai_public_repos/rapids_triton_pca_example/qa/L0_e2e/test_model.py
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from rapids_triton import Client
from rapids_triton.testing import arrays_close, get_random_seed
N_ROWS = 8192
N_COLS = 25
N_COMPONENTS = 5
@pytest.fixture
def model_inputs():
np.random.seed(get_random_seed())
return {
"X_input": np.random.rand(N_ROWS, N_COLS).astype('float32')
}
@pytest.fixture
def model_output_sizes():
return {"X_transformed": N_ROWS * N_COMPONENTS * np.dtype('float32').itemsize}
def get_ground_truth(inputs):
x = inputs['X_input']
x_c = x - MU
return {'X_transformed': x_c.dot(COMPONENTS)}
@pytest.mark.parametrize(
"model_name", ['pca_example']
)
def test_model(model_name, model_inputs, model_output_sizes):
client = Client()
result = client.predict(model_name, model_inputs, model_output_sizes)
shm_result = client.predict(
model_name, model_inputs, model_output_sizes, shared_mem='cuda'
)
ground_truth = get_ground_truth(model_inputs)
for output_name in sorted(ground_truth.keys()):
arrays_close(
result[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
arrays_close(
shm_result[output_name],
ground_truth[output_name],
atol=1e-5,
assert_close=True
)
MU = np.array([0.3054301 , 0.53497523, 0.02903529, 0.23445411, 0.41508475,
0.73335785, 0.89488304, 0.31067532, 0.9334298 , 0.02269967,
0.75677216, 0.32904336, 0.63879555, 0.75856906, 0.93770117,
0.80694044, 0.14879903, 0.8788233 , 0.36914352, 0.89124376,
0.76835155, 0.01684399, 0.1580411 , 0.35072792, 0.38621086],
dtype=np.float32)
COMPONENTS = np.array([[0.1077859 , 0.0152536 , 0.14996086, 0.27519643, 0.5466197 ],
[0.47137365, 0.7524288 , 0.16581082, 0.6583814 , 0.6733525 ],
[0.5419624 , 0.53981566, 0.4943707 , 0.60533386, 0.9173961 ],
[0.49503392, 0.4416264 , 0.12268677, 0.26787782, 0.910786 ],
[0.71058154, 0.3931972 , 0.78567946, 0.8114448 , 0.28378612],
[0.76400083, 0.710263 , 0.9714428 , 0.59266746, 0.63176847],
[0.47967914, 0.7907602 , 0.14844431, 0.17678756, 0.9410757 ],
[0.13820966, 0.3714162 , 0.19777128, 0.9384368 , 0.69669586],
[0.46815118, 0.20329583, 0.3123208 , 0.6186174 , 0.2085056 ],
[0.4300877 , 0.84767324, 0.42783308, 0.1778231 , 0.3636397 ],
[0.1769452 , 0.5860459 , 0.37256172, 0.71824384, 0.9448562 ],
[0.49792168, 0.42727843, 0.8448393 , 0.77229506, 0.09547652],
[0.33963397, 0.85927695, 0.31496638, 0.35328254, 0.10459802],
[0.39113268, 0.91155696, 0.73254997, 0.26312187, 0.777164 ],
[0.07265835, 0.09515466, 0.13576192, 0.26306516, 0.38162884],
[0.8208812 , 0.33372718, 0.6603761 , 0.14251982, 0.63563746],
[0.6512604 , 0.41092023, 0.7265426 , 0.9646286 , 0.21258278],
[0.4980957 , 0.38877907, 0.8429187 , 0.09256837, 0.811749 ],
[0.13165434, 0.22899932, 0.50088805, 0.9763909 , 0.50195044],
[0.9490048 , 0.60583454, 0.03239321, 0.04777756, 0.51496094],
[0.6111744 , 0.35173875, 0.6366924 , 0.56868726, 0.6552913 ],
[0.41361338, 0.59937996, 0.41819212, 0.52223563, 0.6873631 ],
[0.07992661, 0.5735988 , 0.49894568, 0.07927666, 0.5696119 ],
[0.7249317 , 0.25087562, 0.42774037, 0.2647722 , 0.5418794 ],
[0.19648804, 0.9403854 , 0.25328928, 0.76671 , 0.5263434 ]],
dtype=np.float32)
| 0 |
rapidsai_public_repos/rapids_triton_pca_example/qa/L0_e2e/model_repository
|
rapidsai_public_repos/rapids_triton_pca_example/qa/L0_e2e/model_repository/pca_example/config.pbtxt
|
name: "pca_example"
backend: "rapids_pca"
max_batch_size: 32768
input [
{
name: "X_input"
data_type: TYPE_FP32
dims: [ 25 ]
}
]
output [
{
name: "X_transformed"
data_type: TYPE_FP32
dims: [ 5 ]
}
]
instance_group [{ kind: KIND_GPU }]
parameters [
{
key: "n_components"
value: { string_value: "5"}
},
{
key: "n_cols"
value: { string_value: "25"}
}
]
dynamic_batching {
max_queue_delay_microseconds: 100
}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/.pre-commit-config.yaml
|
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black-jupyter
# It is recommended to specify the latest version of Python
# supported by your project here, or alternatively use
# pre-commit's default_language_version, see
# https://pre-commit.com/#top_level-default_language_version
language_version: python3.9
- repo: https://github.com/adamchainz/blacken-docs
rev: 1.13.0 # replace with latest tag on GitHub
hooks:
- id: blacken-docs
additional_dependencies:
- black==23.1.0
args: [--skip-errors]
- repo: https://github.com/pre-commit/mirrors-prettier
rev: "v2.5.1" # Use the sha or tag you want to point at
hooks:
- id: prettier
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.31.1
hooks:
- id: markdownlint
args: [--ignore-path=.markdownlintignore]
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: "v0.0.231"
hooks:
- id: ruff
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/pyproject.toml
|
[tool.ruff]
line-length = 120
select = ["E", "F", "I", "UP", "B"]
fix = true
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/.markdownlint.json
|
{
"default": true,
"MD013": false,
"MD014": false,
"MD033": false,
"MD041": false
}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/README.md
|
# RAPIDS Deployment Documentation
## Building
In order to build the documentation install the dependencies to build the deployment docs from source.
```bash
conda env create -f conda/environments/deployment_docs.yml
```
We recommend building with [sphinx-autobuild](https://github.com/executablebooks/sphinx-autobuild).
This tool will build your docs and host them on a local web server.
It will watch for file changes, rebuild automatically and tell the browser page to reload. Magic!
```bash
$ sphinx-autobuild source build/html
[sphinx-autobuild] > sphinx-build ./source ./build/html
Running Sphinx v4.5.0
...
build succeeded.
The HTML pages are in build.
[I 220413 12:13:40 server:335] Serving on http://127.0.0.1:8000
```
Alternatively you can build the static site into `build/html` with `sphinx`.
```bash
$ make dirhtml
```
## Writing
Content in these docs are written in markdown using the [MyST Sphinx extension](https://myst-parser.readthedocs.io/en/v0.15.1/syntax/syntax.html).
### Custom admonitions
This Sphinx site has some custom admonitions to help when writing.
#### Docref
You can link to another documentation page with a `docref` admonition.
````markdown
```{docref} /cloud/gcp/gke
For more detailed information on launching GPU powered Kubernetes clusters on Google Cloud see the documentation.
```
````
Renders as:

> **Note**
> The `Visit the documentation >>` link is added automatically in the bottom right based on the page that is referenced in the directive argument.
### Notebooks
The `examples` section of these docs are written in Jupyter Notebooks and built with [MyST-NB](https://myst-nb.readthedocs.io/en/latest/).
There is also a custom Sphinx extension which shows the examples in a gallery with helpful cross linking throughout the docs. This does mean there
are a few assumptions about how notebooks should be written.
#### Adding examples
1. Create a new directory inside `source/examples`.
2. Create a new notebook file in that directory and give it a name like `notebook.ipynb`.
- The first cell of your notebook should be a markdown cell and contain at least a top level header.
- You can add tags to your notebook by adding [cell metadata tags to the first cell](https://jupyterbook.org/en/stable/content/metadata.html).
3. Place any supporting files such as scripts, Dockerfiles, etc in the same directory. These files will be discovered and listed on the rendered notebook page.
4. Update the `notebookgallerytoctree` section in `source/examples/index.md` with the relative path to your new notebook.
#### Tags
The notebook gallery extension uses cell tags to organize and cross-reference files.

Tags are hierarchical and use slashes to separate their namespaces. For example if your notebook uses AWS Sagemaker you should add the tag `cloud/aws/sagemaker`. This aligns with the Sphinx doc path to the RAPIDS Sagemaker documentation page which you can find in `source/cloud/aws/sagemaker.md`.
The extension will use this information to ensure the notebook is linked from the Sagemaker page under the "Related Examples" section.
The example gallery will also allow you to filter based on these tags. The root of the tag namespace is used to create the filtering categories. So in the above example the `cloud/aws/sagemaker` tag would create a filter for `cloud` with an option of `aws/sagemaker`. You can create new filter sections simply by creating new tags with unique root namespaces, but be mindful that keeping the number of filtering sections to a minimum will provide users with the best user experience and there may already be a suitable root namespace for the tag you want to create.
##### Styling
By default tags are styled with RAPIDS purple backgrounds and white text. They also have a `0.5em` left hand border to use as an accent that is also purple by default which can be styled for a two-tone effect.
<div style="width: 100%; text-align: center;">
<img alt="Diagram showing the tag and css side-by-side with arrows to show color sets the text, background-color sets the background and border-left sets the accent" src="source/images/theme-tag-style.png" style="max-width: 450px;" />
</div>
This can be overridden for each tag by adding a new class with the format `.tag-{name}` to `source/_static/css/custom.css`. For example the Scikit-Learn logo is orange and blue with grey text, so the custom CSS sets an orange background with a blue accent and grey text.
```css
.tag-scikit-learn {
color: #030200;
background-color: #f09436;
border-left: 0.5em #3194c7 solid;
}
```
Tag styling can be added at any domain level, for example the `cloud/aws/sagemaker` tag uses the `.tag-cloud`, `.tag-aws` and `.tag-sagemaker` classes. They will be applied in that order too so we can set a default AWS style which can be overridden on a service-by-service basis.
## Linting
This project uses [prettier](https://prettier.io/) and [markdownlint](https://github.com/DavidAnson/markdownlint) to enforce automatic formatting and consistent style as well as identify rendering issues early.
It is recommended to run this automatically on each commit using [pre-commit](https://pre-commit.com/) as linting rules are enforced via CI checks and linting locally will save time in code review.
```console
$ pre-commit install
pre-commit installed at .git/hooks/pre-commit
$ git commit -am "My awesome commit"
prettier.................................................................Passed
markdownlint.............................................................Passed
```
## Releasing
This repository is continuously deployed to the [nightly docs at docs.rapids.ai](https://docs.rapids.ai/deployment/nightly/) via the [build-and-deploy](https://github.com/rapidsai/deployment/blob/main/.github/workflows/build-and-deploy.yml) workflow. All commits to main are built to static HTML and pushed to the [`deployment/nightly` subdirectory in the rapidsai/docs repo](https://github.com/rapidsai/docs/tree/gh-pages/deployment) which in turn is published to GitHub Pages.
We can also update the [stable documentation at docs.rapids.ai](https://docs.rapids.ai/deployment/stable/) by creating and pushing a tag which will cause the `build-and-deploy` workflow to push to the [`deployment/stable` subdirectory](https://github.com/rapidsai/docs/tree/gh-pages/deployment) instead.
The RAPIDS versions for things like container images and install instructions are templated into the documentation pages and are stored in `source/conf.py`.
```python
versions = {
"stable": {
"rapids_container": "nvcr.io/nvidia/rapidsai/rapidsai-core:23.02-cuda11.8-runtime-ubuntu22.04-py3.10",
},
"nightly": {
"rapids_container": "rapidsai/rapidsai-core-nightly:23.04-cuda11.8-runtime-ubuntu22.04-py3.10",
},
}
```
You can then use the value in any documentation page or notebook like this.
```markdown
# My doc page
The latest container image is {{ rapids_container }}.
```
All builds will use the nightly section by default which allows you to test with the latest and greatest containers when developing locally or previewing nightly docs builds. To build the docs using the stable images you need to set the environment variable `DEPLOYMENT_DOCS_BUILD_STABLE` to `true`. This is done automatically when building from a tag in CI.
Before you publish a new version for a release ensure that the latest container images are available and then update the `stable` config to use the new release version and update `nightly` to use the next upcoming nightly.
Then you can push a tag to release.
```bash
# Set next version number
# See https://docs.rapids.ai/resources/versions/ and past releases for version scheme
export RELEASE=x.x.x
# Create tags
git commit --allow-empty -m "Release $RELEASE"
git tag -a $RELEASE -m "Version $RELEASE"
# Push
git push upstream --tags
```
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/make.bat
|
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
if "%1" == "" goto help
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/Makefile
|
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/Dockerfile
|
# Maybe have to transfer from Debian
ARG RAPIDS_IMAGE
FROM $RAPIDS_IMAGE as rapids
RUN conda list -n rapids --explicit > /rapids/rapids-spec.txt
FROM gcr.io/deeplearning-platform-release/rapids-gpu.21-12
COPY --from=rapids /rapids/rapids-spec.txt /tmp/spec.txt
RUN conda create --name rapids --file /tmp/spec.txt && \
rm -f /tmp/spec.txt
ENV CONDA_DEFAULT_ENV=rapids
#CMD python -c "import platform; print(platform.python_version())"
# 3.7.12
#CMD python -c "import platform; print(platform.platform())"
# Linux-5.10.109-0-virt-x86_64-with-debian-bullseye-sid
#CMD printenv
# GDAL_DATA=/opt/conda/share/gdal
# NV_LIBNPP_DEV_VERSION=11.1.0.245-1
# CUDA_PATH=/opt/conda
# NVIDIA_VISIBLE_DEVICES=all
# NCCL_VERSION=2.12.10-1
# CONDA_PREFIX=/opt/conda
# GSETTINGS_SCHEMA_DIR_CONDA_BACKUP=
# CONDA_EXE=/opt/conda/bin/conda
# JAVA_HOME=/opt/conda
# PWD=/
# JAVA_LD_LIBRARY_PATH_BACKUP=
# PROJ_LIB=/opt/conda/share/proj
# LC_ALL=C.UTF-8
# NV_CUDNN_VERSION=8.0.5.39
# NV_NVTX_VERSION=11.0.167-1
# NV_LIBNPP_VERSION=11.1.0.245-1
# NV_LIBNCCL_DEV_PACKAGE=libnccl-dev=2.12.10-1+cuda11.0
# CONDA_DEFAULT_ENV=base
# SHELL=/bin/bash
# NV_LIBCUBLAS_DEV_PACKAGE=libcublas-dev-11-0=11.2.0.252-1
# GSETTINGS_SCHEMA_DIR=/opt/conda/share/glib-2.0/schemas
# CONDA_PYTHON_EXE=/opt/conda/bin/python
# NV_CUDA_CUDART_DEV_VERSION=11.0.221-1
# CONTAINER_NAME=rapids-gpu/21-12+cu110
# LANG=C.UTF-8
# NV_LIBNCCL_DEV_PACKAGE_NAME=libnccl-dev
# NV_LIBCUSPARSE_DEV_VERSION=11.1.1.245-1
# NV_LIBNCCL_PACKAGE=libnccl2=2.12.10-1+cuda11.0
# DL_ANACONDA_HOME=/opt/conda
# NV_LIBCUBLAS_DEV_PACKAGE_NAME=libcublas-dev-11-0
# NV_LIBCUBLAS_PACKAGE=libcublas-11-0=11.2.0.252-1
# NVARCH=x86_64
# PROJ_NETWORK=ON
# PATH=/opt/conda/bin:/opt/conda/condabin:/opt/conda/bin:/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# NV_CUDA_CUDART_VERSION=11.0.221-1
# ANACONDA_PYTHON_VERSION=3.7
# _CE_CONDA=
# NV_CUDNN_PACKAGE_DEV=libcudnn8-dev=8.0.5.39-1+cuda11.0
# NV_NVML_DEV_VERSION=11.0.167-1
# NV_LIBNPP_DEV_PACKAGE=libnpp-dev-11-0=11.1.0.245-1
# CONTAINER_URL=us-docker.pkg.dev/deeplearning-platform-release/gcr.io/rapids-gpu.21-12:nightly-2022-05-26
# TERM=xterm
# NV_LIBCUSPARSE_VERSION=11.1.1.245-1
# NVIDIA_DRIVER_CAPABILITIES=compute,utility
# NV_CUDA_LIB_VERSION=11.0.3-1
# NV_LIBNCCL_PACKAGE_NAME=libnccl2
# NVIDIA_REQUIRE_CUDA=cuda>=11.0 brand=tesla,driver>=418,driver<419
# NV_LIBCUBLAS_PACKAGE_NAME=libcublas-11-0
# NV_NVPROF_VERSION=11.0.221-1
# NV_CUDNN_PACKAGE=libcudnn8=8.0.5.39-1+cuda11.0
# CUDA_VERSION=11.0.3
# _CE_M=
# NV_LIBNPP_PACKAGE=libnpp-11-0=11.1.0.245-1
# NV_LIBNCCL_DEV_PACKAGE_VERSION=2.12.10-1
# CPL_ZIP_ENCODING=UTF-8
# NV_CUDNN_PACKAGE_NAME=libcudnn8
# JAVA_LD_LIBRARY_PATH=/opt/conda/lib/server
# NV_LIBCUBLAS_DEV_VERSION=11.2.0.252-1
# HOME=/root
# CONDA_SHLVL=1
# SHLVL=0
# LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/lib:/usr/local/lib/x86_64-linux-gnu:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
# NV_LIBNCCL_PACKAGE_VERSION=2.12.10-1
# HOSTNAME=6b8a39109ad6
# JAVA_HOME_CONDA_BACKUP=
# NV_LIBCUBLAS_VERSION=11.2.0.252-1
# NV_NVPROF_DEV_PACKAGE=cuda-nvprof-11-0=11.0.221-1
# CONDA_PROMPT_MODIFIER=(base)
# NV_CUDA_COMPAT_PACKAGE=cuda-compat-11-0
# LIBRARY_PATH=/usr/local/cuda/lib64/stubs
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/deployment/.readthedocs.yaml
|
version: 2
build:
os: "ubuntu-20.04"
tools:
python: "mambaforge-4.10"
conda:
environment: conda/environments/deployment_docs.yml
sphinx:
builder: dirhtml
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/extensions/rapids_grid_toctree.py
|
from functools import partial
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.directives.other import TocTree
from sphinx_design.grids import GridDirective
def find_linked_documents(node):
"""Find all referenced documents in a node tree.
Walks the nodes and yield the reftarget attribute for any that have it set.
"""
for child in node.traverse():
try:
if child.attributes["reftarget"]:
yield child.attributes["reftarget"]
except (AttributeError, KeyError):
pass
class CardGridTocTree(GridDirective):
"""An extension of sphinx_design.grids.GridDirective that also add referenced docs to the toctree.
For any element within the grid which links to another page with the ``link-type`` ``doc`` the
doc gets added to the toctree of that page.
"""
def run(self) -> list[nodes.Node]:
output = nodes.container()
# Generate the card grid
grid = nodes.section(ids=["toctreegrid"])
grid += super().run()[0]
output += grid
# Update the content with the document names referenced in the card grid ready for toctree generation
self.content.data = [doc for doc in find_linked_documents(grid)]
# Generate the actual toctree but ensure it is hidden
self.options["hidden"] = True
self.parse_content = partial(TocTree.parse_content, self)
toctree = TocTree.run(self)[0]
output += toctree
return [output]
def setup(app: Sphinx) -> dict:
app.add_directive("gridtoctree", CardGridTocTree)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/extensions/rapids_version_templating.py
|
import re
from docutils import nodes
class TextNodeVisitor(nodes.SparseNodeVisitor):
def __init__(self, app, *args, **kwargs):
self.app = app
super().__init__(*args, **kwargs)
def visit_Text(self, node):
new_node = nodes.Text(
re.sub(r"(?<!\$)\{\{.*?\}\}", self.template_func, node.astext())
)
node.parent.replace(node, new_node)
def template_func(self, match):
return self.app.builder.templates.render_string(
match.group(), self.app.config.rapids_version
)
def version_template(app, doctree, docname):
"""Substitute versions into each page.
This allows documentation pages and notebooks to substiture in values like
the latest container image using jinja2 syntax.
E.g
# My doc page
The latest container image is {{ rapids_container }}.
"""
doctree.walk(TextNodeVisitor(app, doctree))
def setup(app):
app.add_config_value("rapids_version", {}, "html")
app.connect("doctree-resolved", version_template)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/extensions/rapids_related_examples.py
|
from functools import cache
import nbformat
from docutils import nodes
from docutils.parsers.rst.states import RSTState
from docutils.statemachine import ViewList
from markdown_it import MarkdownIt
from sphinx.application import Sphinx
from sphinx.directives.other import TocTree
from sphinx.environment import BuildEnvironment
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import nested_parse_with_titles
@cache
def read_notebook_tags(path: str) -> list[str]:
"""Read metadata tags from first cell of a notebook file."""
notebook = nbformat.read(path, as_version=4)
try:
return notebook.cells[0]["metadata"]["tags"]
except KeyError:
return []
def generate_notebook_grid_myst(
notebooks: list[str], env: BuildEnvironment
) -> list[str]:
"""Generate sphinx-design grid of notebooks in MyST markdown.
Take a list of notebook documents and render out some MyST markdown displaying those
documents in a grid of cards.
"""
md = []
md.append("`````{grid} 1 2 2 3")
md.append(":gutter: 2 2 2 2")
md.append("")
for notebook in notebooks:
md.append("````{grid-item-card}")
md.append(":link: /" + notebook)
md.append(":link-type: doc")
try:
md.append(get_title_for_notebook(env.doc2path(notebook)))
except ValueError:
md.append(notebook)
md.append("^" * len(notebook))
md.append("")
for tag in read_notebook_tags(env.doc2path(notebook)):
md.append("{bdg}`" + tag + "`")
md.append("````")
md.append("")
md.append("`````")
return md
def parse_markdown(markdown: list[str], state: RSTState) -> list[nodes.Node]:
"""Render markdown into nodes."""
node = nodes.section()
node.document = state.document
vl = ViewList(markdown, "fakefile.md")
nested_parse_with_titles(state, vl, node)
return node.children
def get_title_for_notebook(path: str) -> str:
"""Read a notebook file and find the top-level heading."""
notebook = nbformat.read(path, as_version=4)
for cell in notebook.cells:
if cell["cell_type"] == "markdown":
cell_source = MarkdownIt().parse(cell["source"])
for i, token in enumerate(cell_source):
if i == len(cell_source) - 1: # no next_token
continue
next_token = cell_source[i + 1]
if (
token.type == "heading_open"
and token.tag == "h1"
and next_token.type == "inline"
):
return next_token.content
raise ValueError("No top-level heading found")
class RelatedExamples(SphinxDirective):
def run(self) -> list[nodes.Node]:
output = nodes.section(ids=["relatedexamples"])
if self.env.docname in self.env.notebook_tag_map:
output += nodes.title("Related Examples", "Related Examples")
grid_markdown = generate_notebook_grid_myst(
notebooks=self.env.notebook_tag_map[self.env.docname],
env=self.env,
)
for node in parse_markdown(
markdown=grid_markdown,
state=self.state,
):
output += node
return [output]
return []
def build_tag_map(app: Sphinx, env: BuildEnvironment, docnames: list[str]):
"""Walk notebooks and update tag map.
Once Sphinx has decided which pages to build, iterate over the notebooks
and build the ``env.notebook_tag_map`` based on the tags of the first cell.
If any notebooks have been updated as part of this build then add all of the
pages with related tags to the build to ensure they are up to date.
"""
env.notebook_tag_map = {}
# Build notebook tag map
for doc in env.found_docs:
path = app.env.doc2path(doc)
if path.endswith("ipynb"):
for tag in read_notebook_tags(path):
try:
env.notebook_tag_map[tag].append(doc)
except KeyError:
env.notebook_tag_map[tag] = [doc]
# If notebooks have been modified add all docnames from env.found_docs that match the tags to docnames
if any([app.env.doc2path(doc).endswith("ipynb") for doc in docnames]):
for tag in env.notebook_tag_map.keys():
if tag in env.found_docs:
# FIXME This doesn't seem to be working correctly as the pages aren't being rebuilt
docnames.append(doc)
def add_notebook_tag_map_to_context(app, pagename, templatename, context, doctree):
context["sorted"] = sorted
context["notebook_tag_map"] = app.env.notebook_tag_map
tag_tree = {}
for tag in app.env.notebook_tag_map:
root, suffix = tag.split("/", 1)
try:
tag_tree[root].append(suffix)
except KeyError:
tag_tree[root] = [suffix]
context["notebook_tag_tree"] = tag_tree
context["notebook_tags"] = [
tag for tag, pages in app.env.notebook_tag_map.items() if pagename in pages
]
class NotebookGalleryTocTree(TocTree):
def run(self) -> list[nodes.Node]:
output = nodes.container()
gallery = nodes.section(ids=["examplegallery"])
# Generate the actual toctree but ensure it is hidden
self.options["hidden"] = True
toctree = super().run()
output += toctree
# Generate the card grid for all items in the toctree
notebooks = [
notebook for _, notebook in toctree[0].children[0].attributes["entries"]
]
grid_markdown = generate_notebook_grid_myst(notebooks=notebooks, env=self.env)
for node in parse_markdown(markdown=grid_markdown, state=self.state):
gallery += node
output += gallery
return [output]
def setup(app: Sphinx) -> dict:
app.connect("env-before-read-docs", build_tag_map)
app.connect("html-page-context", add_notebook_tag_map_to_context)
app.add_directive("relatedexamples", RelatedExamples)
app.add_directive("notebookgallerytoctree", NotebookGalleryTocTree)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/extensions/rapids_notebook_files.py
|
import contextlib
import os
import pathlib
import re
import shutil
import tempfile
from functools import partial
def template_func(app, match):
return app.builder.templates.render_string(match.group(), app.config.rapids_version)
def walk_files(app, dir, outdir):
outdir.mkdir(parents=True, exist_ok=True)
related_notebook_files = {}
for page in dir.glob("*"):
if page.is_dir():
related_notebook_files[page.name] = walk_files(
app, page, outdir / page.name
)
else:
with contextlib.suppress(OSError):
os.remove(str(outdir / page.name))
if "ipynb" in page.name:
with open(str(page)) as reader:
notebook = reader.read()
with open(str(outdir / page.name), "w") as writer:
writer.write(
re.sub(
r"(?<!\$)\{\{.*?\}\}",
partial(template_func, app),
notebook,
)
)
else:
shutil.copy(str(page), str(outdir / page.name))
related_notebook_files[page.name] = page.name
return related_notebook_files
def find_notebook_related_files(app, pagename, templatename, context, doctree):
"""Find related files for Jupyter Notebooks in the examples section.
Example notebooks should be placed in /source/examples in their own directories.
This extension walks through the directory when each notebook is rendered and generates
a list of all the other files in the directory.
The goal is to set a list of GitHub URLs in the template context so we can render
them in the sidebar. To get the GitHub url we use the ``rapids_deployment_notebooks_base_url`` config
option which shows the base url for where the source files are on GitHub.
"""
if "examples/" in pagename and context["page_source_suffix"] == ".ipynb":
source_root = pathlib.Path(__file__).parent / ".." / "source"
output_root = pathlib.Path(app.builder.outdir)
rel_page_parent = pathlib.Path(pagename).parent
path_to_page_parent = source_root / rel_page_parent
path_to_output_parent = output_root / rel_page_parent
# Copy all related files to output and apply templating
related_notebook_files = walk_files(
app, path_to_page_parent, path_to_output_parent
)
# Make archive of related files
if related_notebook_files and len(related_notebook_files) > 1:
archive_path = path_to_output_parent / "all_files.zip"
with contextlib.suppress(OSError):
os.remove(str(archive_path))
with tempfile.NamedTemporaryFile() as tmpf:
shutil.make_archive(
tmpf.name,
"zip",
str(path_to_output_parent.parent),
str(path_to_output_parent.name),
)
shutil.move(tmpf.name + ".zip", str(archive_path))
context["related_notebook_files_archive"] = archive_path.name
context["related_notebook_files"] = related_notebook_files
def setup(app):
app.add_config_value("rapids_deployment_notebooks_base_url", "", "html")
app.connect("html-page-context", find_notebook_related_files)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/extensions/rapids_admonitions.py
|
from docutils.nodes import Text, admonition, inline, paragraph
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.util.docutils import SphinxDirective
class Docref(BaseAdmonition, SphinxDirective):
node_class = admonition
required_arguments = 1
def run(self):
doc = self.arguments[0]
self.arguments = ["See Documentation"]
self.options["classes"] = ["docref"]
nodes = super().run()
custom_xref = pending_xref(
reftype="myst",
refdomain="std",
refexplicit=True,
reftarget=doc,
refdoc=self.env.docname,
refwarn=True,
)
text_wrapper = inline()
text_wrapper += Text("Visit the documentation >>")
custom_xref += text_wrapper
wrapper = paragraph()
wrapper["classes"] = ["visit-link"]
wrapper += custom_xref
nodes[0] += wrapper
return nodes
def setup(app: Sphinx) -> dict:
app.add_directive("docref", Docref)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
rapidsai_public_repos/deployment/conda
|
rapidsai_public_repos/deployment/conda/environments/deployment_docs.yml
|
name: deployment-docs-dev
channels:
- conda-forge
- defaults
dependencies:
- myst-nb
- myst-parser
- nbsphinx
- numpydoc
- pydata-sphinx-theme>=0.12.0
- python=3.9
- pre-commit
- sphinx
- sphinx-autobuild
- sphinx-copybutton
- sphinx-design
- sphinxcontrib-mermaid
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/scripts/gen_release_checklist_issue.sh
|
#!/bin/bash
# Run this script to generate the release issue checklist for easy pasting into GitHub
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
SCRIPT_NAME=$(basename $SCRIPT_DIR)/$(basename "${BASH_SOURCE[0]}")
cd $SCRIPT_DIR/..
cat << EOF
# Release checklist
For the upcoming release we need to verify our documentation. This is a best efforts activity
so please refer to the checklist from the previous release and focus on pages that were not
verified last time.
## Verify pages
- Look at the nightly build of each page listed below
- Check page renders correctly
- Check for spelling/grammar problems
- Check that the instructions work as expected
- Ensure legacy pages with out of date instructions have a content warning
- If page needs updating convert the task to an issue and open a PR that closes the issue
\`\`\`[tasklist]
### Pages
EOF
find source -type f -name "*.ipynb" -o -name "*.md" \
| grep -v "_includes" \
| sed -e "s/^source/- [ ] https\:\/\/docs.rapids.ai\/deployment\/nightly/" \
| sed -e "s/index.md//" \
| sed -e "s/.md/\//"\
| sed -e "s/.ipynb/\//" \
| sort -u
cat << EOF
\`\`\`
_Issue text generated by \`${SCRIPT_NAME#./}\`._
EOF
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/source/hpc.md
|
# HPC
RAPIDS works extremely well in traditional HPC (High Performance Computing) environments where GPUs are often co-located with accelerated networking hardware such as InfiniBand. Deploying on HPC often means using queue management systems such as SLURM, LSF, PBS, etc.
## SLURM
```{warning}
This is a legacy page and may contain outdated information. We are working hard to update our documentation with the latest and greatest information, thank you for bearing with us.
```
If you are unfamiliar with SLURM or need a refresher, we recommend the [quickstart guide](https://slurm.schedmd.com/quickstart.html).
Depending on how your nodes are configured, additional settings may be required such as defining the number of GPUs `(--gpus)` desired or the number of gpus per node `(--gpus-per-node)`.
In the following example, we assume each allocation runs on a DGX1 with access to all eight GPUs.
### Start Scheduler
First, start the scheduler with the following SLURM script. This and the following scripts can deployed with `salloc` for interactive usage or `sbatch` for batched run.
```bash
#!/usr/bin/env bash
#SBATCH -J dask-scheduler
#SBATCH -n 1
#SBATCH -t 00:10:00
module load cuda/11.0.3
CONDA_ROOT=/nfs-mount/user/miniconda3
source $CONDA_ROOT/etc/profile.d/conda.sh
conda activate rapids
LOCAL_DIRECTORY=/nfs-mount/dask-local-directory
mkdir $LOCAL_DIRECTORY
CUDA_VISIBLE_DEVICES=0 dask-scheduler \
--protocol tcp \
--scheduler-file "$LOCAL_DIRECTORY/dask-scheduler.json" &
dask-cuda-worker \
--rmm-pool-size 14GB \
--scheduler-file "$LOCAL_DIRECTORY/dask-scheduler.json"
```
Notice that we configure the scheduler to write a `scheduler-file` to a NFS accessible location. This file contains metadata about the scheduler and will
include the IP address and port for the scheduler. The file will serve as input to the workers informing them what address and port to connect.
The scheduler doesn't need the whole node to itself so we can also start a worker on this node to fill out the unused resources.
### Start Dask CUDA Workers
Next start the other [dask-cuda workers](https://dask-cuda.readthedocs.io/). Dask-CUDA extends the traditional Dask `Worker` class with specific options and enhancements for GPU environments. Unlike the scheduler and client, the workers script should be _scalable_ and allow the users to tune how many workers are created.
For example, we can scale the number of nodes to 3: `sbatch/salloc -N3 dask-cuda-worker.script` . In this case, because we have 8 GPUs per node and we have 3 nodes,
our job will have 24 workers.
```bash
#!/usr/bin/env bash
#SBATCH -J dask-cuda-workers
#SBATCH -t 00:10:00
module load cuda/11.0.3
CONDA_ROOT=/nfs-mount/miniconda3
source $CONDA_ROOT/etc/profile.d/conda.sh
conda activate rapids
LOCAL_DIRECTORY=/nfs-mount/dask-local-directory
mkdir $LOCAL_DIRECTORY
dask-cuda-worker \
--rmm-pool-size 14GB \
--scheduler-file "$LOCAL_DIRECTORY/dask-scheduler.json"
```
### cuDF Example Workflow
Lastly, we can now run a job on the established Dask Cluster.
```bash
#!/usr/bin/env bash
#SBATCH -J dask-client
#SBATCH -n 1
#SBATCH -t 00:10:00
module load cuda/11.0.3
CONDA_ROOT=/nfs-mount/miniconda3
source $CONDA_ROOT/etc/profile.d/conda.sh
conda activate rapids
LOCAL_DIRECTORY=/nfs-mount/dask-local-directory
cat <<EOF >>/tmp/dask-cudf-example.py
import cudf
import dask.dataframe as dd
from dask.distributed import Client
client = Client(scheduler_file="$LOCAL_DIRECTORY/dask-scheduler.json")
cdf = cudf.datasets.timeseries()
ddf = dd.from_pandas(cdf, npartitions=10)
res = ddf.groupby(['id', 'name']).agg(['mean', 'sum', 'count']).compute()
print(res)
EOF
python /tmp/dask-cudf-example.py
```
### Confirm Output
Putting the above together will result in the following output:
```bash
x y
mean sum count mean sum count
id name
1077 Laura 0.028305 1.868120 66 -0.098905 -6.527731 66
1026 Frank 0.001536 1.414839 921 -0.017223 -15.862306 921
1082 Patricia 0.072045 3.602228 50 0.081853 4.092667 50
1007 Wendy 0.009837 11.676199 1187 0.022978 27.275216 1187
976 Wendy -0.003663 -3.267674 892 0.008262 7.369577 892
... ... ... ... ... ... ...
912 Michael 0.012409 0.459119 37 0.002528 0.093520 37
1103 Ingrid -0.132714 -1.327142 10 0.108364 1.083638 10
998 Tim 0.000587 0.747745 1273 0.001777 2.262094 1273
941 Yvonne 0.050258 11.358393 226 0.080584 18.212019 226
900 Michael -0.134216 -1.073729 8 0.008701 0.069610 8
[6449 rows x 6 columns]
```
<br/><br/>
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/source/local.md
|
---
html_theme.sidebar_secondary.remove: true
---
# Local
## Conda
Installation instructions for conda are hosted at the [RAPIDS Conda Installation Docs Page](https://docs.rapids.ai/install#conda).
## Docker
Installation instructions for Docker are hosted at the [RAPIDS Docker Installation Docs Page](https://docs.rapids.ai/install#docker).
## pip
RAPIDS packages can be installed with pip. See [RAPIDS pip Installation Docs Page](https://docs.rapids.ai/install#pip) for installation instructions and requirements.
## WSL2
RAPIDS can be installed on Windows using Windows Subsystem for Linux version 2 (WSL2). See [RAPIDS WSL2 Installation Docs Page](https://docs.rapids.ai/install#wsl2) for installation instructions and requirements.
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/source/index.md
|
---
html_theme.sidebar_secondary.remove: true
---
# Deploying RAPIDS
Deployment documentation to get you up and running with RAPIDS anywhere.
`````{gridtoctree} 1 2 2 3
:gutter: 2 2 2 2
````{grid-item-card}
:link: local
:link-type: doc
{fas}`desktop;sd-text-primary` Local Machine
^^^
Use RAPIDS on your local workstation or server.
{bdg}`docker`
{bdg}`conda`
{bdg}`pip`
{bdg}`WSL2`
````
````{grid-item-card}
:link: cloud/index
:link-type: doc
{fas}`cloud;sd-text-primary` Cloud
^^^
Use RAPIDS on the cloud.
{bdg}`Amazon Web Services`
{bdg}`Google Cloud Platform`
{bdg}`Microsoft Azure`
{bdg}`IBM Cloud`
````
````{grid-item-card}
:link: hpc
:link-type: doc
{fas}`server;sd-text-primary` HPC
^^^
Use RAPIDS on high performance computers and supercomputers.
{bdg}`SLURM`
````
````{grid-item-card}
:link: platforms/index
:link-type: doc
{fas}`network-wired;sd-text-primary` Platforms
^^^
Use RAPIDS on compute platforms.
{bdg}`Kubernetes`
{bdg}`Kubeflow`
{bdg}`Coiled`
{bdg}`Databricks`
{bdg}`Google Colab`
````
````{grid-item-card}
:link: tools/index
:link-type: doc
{fas}`hammer;sd-text-primary` Tools
^^^
There are many tools to deploy RAPIDS.
{bdg}`containers`
{bdg}`dask-kubernetes`
{bdg}`dask-operator`
{bdg}`dask-helm-chart`
{bdg}`dask-gateway`
````
````{grid-item-card}
:link: examples/index
:link-type: doc
{fas}`book;sd-text-primary` Workflow examples
^^^
For inspiration see our example notebooks with opinionated deployments of RAPIDS to boost machine learning workflows.
{bdg}`xgboost`
{bdg}`optuna`
{bdg}`mlflow`
{bdg}`ray tune`
````
````{grid-item-card}
:link: guides/index
:link-type: doc
{fas}`book;sd-text-primary` Guides
^^^
Detailed guides on how to deploy and optimize RAPIDS.
{bdg}`Microsoft Azure`
{bdg}`Infiniband`
{bdg}`MIG`
````
`````
| 0 |
rapidsai_public_repos/deployment
|
rapidsai_public_repos/deployment/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
# -- Project information -----------------------------------------------------
project = "RAPIDS Deployment Documentation"
copyright = f"{datetime.date.today().year}, NVIDIA"
author = "NVIDIA"
# Single modifiable version for all of the docs - easier for future updates
stable_version = "23.10"
nightly_version = "23.12"
versions = {
"stable": {
"rapids_version": stable_version,
"rapids_container": f"nvcr.io/nvidia/rapidsai/base:{stable_version}-cuda11.8-py3.10",
"rapids_notebooks_container": f"nvcr.io/nvidia/rapidsai/notebooks:{stable_version}-cuda11.8-py3.10",
"rapids_conda_channels": "-c rapidsai -c conda-forge -c nvidia",
"rapids_conda_packages": f"rapids={stable_version} python=3.10 cudatoolkit=11.8",
},
"nightly": {
"rapids_version": f"{nightly_version}-nightly",
"rapids_container": f"rapidsai/base:{nightly_version + 'a'}-cuda11.8-py3.10",
"rapids_notebooks_container": f"rapidsai/notebooks:{nightly_version + 'a'}-cuda11.8-py3.10",
"rapids_conda_channels": "-c rapidsai-nightly -c conda-forge -c nvidia",
"rapids_conda_packages": f"rapids={nightly_version} python=3.10 cudatoolkit=11.8",
},
}
rapids_version = (
versions["stable"]
if os.environ.get("DEPLOYMENT_DOCS_BUILD_STABLE", "false") == "true"
else versions["nightly"]
)
rapids_version["rapids_conda_channels_list"] = [
channel
for channel in rapids_version["rapids_conda_channels"].split(" ")
if channel != "-c"
]
rapids_version["rapids_conda_packages_list"] = rapids_version[
"rapids_conda_packages"
].split(" ")
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath("../extensions"))
extensions = [
"IPython.sphinxext.ipython_console_highlighting",
"sphinx.ext.intersphinx",
"myst_nb",
"sphinxcontrib.mermaid",
"sphinx_design",
"sphinx_copybutton",
"rapids_notebook_files",
"rapids_related_examples",
"rapids_grid_toctree",
"rapids_version_templating",
"rapids_admonitions",
]
myst_enable_extensions = ["colon_fence", "dollarmath"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
suppress_warnings = ["myst.header", "myst.nested_header"]
# -- Options for notebooks -------------------------------------------------
nb_execution_mode = "off"
rapids_deployment_notebooks_base_url = (
"https://github.com/rapidsai/deployment/blob/main/source/"
)
# -- Options for HTML output -------------------------------------------------
html_theme_options = {
"header_links_before_dropdown": 7,
# https://github.com/pydata/pydata-sphinx-theme/issues/1220
"icon_links": [],
"logo": {
"link": "https://docs.rapids.ai/",
},
"github_url": "https://github.com/rapidsai/",
"show_toc_level": 1,
"navbar_align": "right",
"secondary_sidebar_items": [
"page-toc",
"notebooks-extra-files-nav",
"notebooks-tags",
],
}
html_sidebars = {
"**": ["sidebar-nav-bs", "sidebar-ethical-ads"],
"index": [],
"examples/index": ["notebooks-tag-filter", "sidebar-ethical-ads"],
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_logo = "_static/RAPIDS-logo-purple.png"
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"dask": ("https://docs.dask.org/en/latest/", None),
"distributed": ("https://distributed.dask.org/en/latest/", None),
"dask_kubernetes": ("https://kubernetes.dask.org/en/latest/", None),
"dask_cuda": ("https://docs.rapids.ai/api/dask-cuda/stable/", None),
}
def setup(app):
app.add_css_file("https://docs.rapids.ai/assets/css/custom.css")
app.add_css_file("css/custom.css")
app.add_js_file(
"https://docs.rapids.ai/assets/js/custom.js", loading_method="defer"
)
app.add_js_file("js/nav.js", loading_method="defer")
app.add_js_file("js/notebook-gallery.js", loading_method="defer")
| 0 |
rapidsai_public_repos/deployment/source/_static
|
rapidsai_public_repos/deployment/source/_static/css/custom.css
|
nav.bd-links fieldset legend {
color: var(--pst-color-text-base);
font-weight: var(--pst-sidebar-header-font-weight);
font-size: 1em;
}
nav.bd-links fieldset input {
margin-left: 1em;
margin-right: 0.25em;
}
.bd-links__title small {
float: right;
padding-right: 2em;
}
nav.related-files {
font-family: var(--pst-font-family-monospace);
}
nav.bd-links fieldset .sd-badge {
font-size: 0.9em;
}
/* Admonitions */
.docref {
border-color: var(--sd-color-primary) !important;
}
.docref .admonition-title {
color: var(--sd-color-primary-text);
background: var(--sd-color-primary);
}
.docref > .admonition-title::after,
div.docref > .admonition-title::after {
color: var(--sd-color-primary-text);
content: "\f02d";
}
.docref .visit-link {
width: 100%;
text-align: right;
padding-right: 2em;
margin-top: -1.15rem;
}
.tagwrapper {
margin-top: 0.25em;
}
/* Tag colours */
.sd-badge {
/* Defaults */
color: var(--sd-color-primary-text);
background-color: var(--sd-color-primary);
border-left: 0.5em var(--sd-color-primary) solid;
padding-left: 0.25em !important;
}
.tag-dask,
.tag-dask-kubernetes,
.tag-dask-operator,
.tag-dask-yarn,
.tag-dask-gateway,
.tag-dask-jobqueue,
.tag-dask-helm-chart,
.tag-dask-cloudprovider,
.tag-dask-ml {
color: #262326;
background-color: #ffc11e !important;
border-left: 0.5em #ffc11e solid;
}
.tag-kubernetes,
.tag-kubeflow {
background-color: #3069de;
border-left: 0.5em #3069de solid;
}
.tag-aws {
color: #222e3c;
background-color: #f79700;
border-left: 0.5em #f79700 solid;
}
.tag-gcp {
background-color: #0f9d58;
border-left: 0.5em #0f9d58 solid;
}
.tag-optuna {
background-color: #045895;
border-left: 0.5em #045895 solid;
}
.tag-numpy {
background-color: #4ba6c9;
border-left: 0.5em #4670c8 solid;
}
.tag-scikit-learn {
color: #030200;
background-color: #f09436;
border-left: 0.5em #3194c7 solid;
}
.tag-data-format {
background-color: #cc539d;
border-left: 0.5em #cc539d solid;
}
.tag-data-storage {
background-color: #53a8cc;
border-left: 0.5em #53a8cc solid;
}
.tag-workflow {
background-color: #348653;
border-left: 0.5em #348653 solid;
}
| 0 |
rapidsai_public_repos/deployment/source/_static
|
rapidsai_public_repos/deployment/source/_static/js/notebook-gallery.js
|
document.addEventListener("DOMContentLoaded", function () {
var setURLFilters = function (filters) {
var newAdditionalURL = "";
var tempArray = window.location.href.split("?");
var baseURL = tempArray[0];
var additionalURL = tempArray[1];
var temp = "";
if (additionalURL) {
tempArray = additionalURL.split("&");
for (var i = 0; i < tempArray.length; i++) {
if (tempArray[i].split("=")[0] != "filters") {
newAdditionalURL += temp + tempArray[i];
temp = "&";
}
}
}
if (filters.length) {
newAdditionalURL += temp + "filters=" + filters.join(",");
}
if (newAdditionalURL) {
window.history.replaceState("", "", baseURL + "?" + newAdditionalURL);
} else {
window.history.replaceState("", "", baseURL);
}
};
var getUrlFilters = function () {
let search = new URLSearchParams(window.location.search);
let filters = search.get("filters");
if (filters) {
return filters.split(",");
}
};
var tagFilterListener = function () {
// Get filter checkbox status
filterTagRoots = []; // Which sections are we filtering on
filterTags = []; // Which tags are being selected
Array.from(document.getElementsByClassName("tag-filter")).forEach(
(checkbox) => {
if (checkbox.checked) {
let tag = checkbox.getAttribute("id");
filterTags.push(checkbox.getAttribute("id"));
let root = tag.split("/")[0];
if (!filterTagRoots.includes(root)) {
filterTagRoots.push(root);
}
}
}
);
setURLFilters(filterTags);
// Iterate notebook cards
Array.from(document.getElementsByClassName("sd-col")).forEach(
(notebook) => {
let isFiltered = false;
// Get tags from the card
let tags = [];
Array.from(notebook.getElementsByClassName("sd-badge")).forEach(
(tag) => {
tags.push(tag.getAttribute("aria-label"));
}
);
// Iterate each of the sections we are filtering on
filterTagRoots.forEach((rootTag) => {
// If a notebook has no tags with the current root tag then it is definitely filtered
if (
!tags.some((tag) => {
return tag.startsWith(rootTag);
})
) {
isFiltered = true;
} else {
// Get filter tags with the current root we are testing
let tagsWithRoot = [];
filterTags.forEach((filteredTag) => {
if (filteredTag.startsWith(rootTag)) {
tagsWithRoot.push(filteredTag);
}
});
// If the notebook tags and filter tags don't intersect it is filtered
if (!tags.some((item) => tagsWithRoot.includes(item))) {
isFiltered = true;
}
}
});
// Show/hide the card
if (isFiltered) {
notebook.setAttribute("style", "display:none !important");
} else {
notebook.setAttribute("style", "display:flex");
}
}
);
};
// Add listener for resetting the filters
let resetButton = document.getElementById("resetfilters");
if (resetButton != undefined) {
resetButton.addEventListener(
"click",
function () {
Array.from(document.getElementsByClassName("tag-filter")).forEach(
(checkbox) => {
checkbox.checked = false;
}
);
tagFilterListener();
},
false
);
}
// Add listeners to all checkboxes for triggering filtering
Array.from(document.getElementsByClassName("tag-filter")).forEach(
(checkbox) => {
checkbox.addEventListener("change", tagFilterListener, false);
}
);
// Simplify tags and add class for styling
// It's not possible to control these attributes in Sphinx otherwise we would
Array.from(document.getElementsByClassName("sd-badge")).forEach((tag) => {
tag.setAttribute("aria-label", tag.innerHTML);
try {
tag
.getAttribute("aria-label")
.split("/")
.forEach((subtag) => tag.classList.add(`tag-${subtag}`));
} catch (err) {}
if (tag.innerHTML.includes("/")) {
tag.innerHTML = tag.innerHTML.split("/").slice(1).join("/");
}
});
// Set checkboxes initial state
var initFilters = getUrlFilters();
if (initFilters) {
Array.from(document.getElementsByClassName("tag-filter")).forEach(
(checkbox) => {
if (initFilters.includes(checkbox.id)) {
checkbox.checked = true;
}
}
);
tagFilterListener();
}
});
| 0 |
rapidsai_public_repos/deployment/source/_static
|
rapidsai_public_repos/deployment/source/_static/js/nav.js
|
document.addEventListener("DOMContentLoaded", function () {
let sidebar = document.getElementsByClassName("bd-sidebar-primary")[0];
sidebar.innerHTML =
`
<div id="rapids-pydata-container">
<div class="rapids-home-container">
<a class="rapids-home-container__home-btn" href="https://docs.rapids.ai/">Docs Home</a>
</div>
<div class="rapids-home-container">
<a class="rapids-home-container__home-btn" href="https://docs.rapids.ai/deployment/stable/">Deployment Home</a>
</div>
<div id="rapids-selector__container-version" class="rapids-selector__container rapids-selector--hidden">
<div class="rapids-selector__selected"></div>
<div class="rapids-selector__menu" style="height: 65px;">
<a class="rapids-selector__menu-item" href="https://docs.rapids.ai/deployment/nightly">nightly</a>
<a class="rapids-selector__menu-item" href="https://docs.rapids.ai/deployment/stable">stable</a>
</div>
</div>
</div>
` + sidebar.innerHTML;
let versionSection = document.getElementById(
"rapids-selector__container-version"
);
let selectorSelected = versionSection.getElementsByClassName(
"rapids-selector__selected"
)[0];
if (window.location.href.includes("/deployment/stable")) {
selectorSelected.innerHTML = "stable";
versionSection
.getElementsByClassName("rapids-selector__menu-item")
.forEach((element) => {
if (element.innerHTML.includes("stable")) {
element.classList.add("rapids-selector__menu-item--selected");
}
});
} else if (window.location.href.includes("/deployment/nightly")) {
selectorSelected.innerHTML = "nightly";
versionSection
.getElementsByClassName("rapids-selector__menu-item")
.forEach((element) => {
if (element.innerHTML.includes("nightly")) {
element.classList.add("rapids-selector__menu-item--selected");
}
});
} else {
selectorSelected.innerHTML = "dev";
let menu = versionSection.getElementsByClassName(
"rapids-selector__menu"
)[0];
menu.innerHTML =
menu.innerHTML +
'<a class="rapids-selector__menu-item rapids-selector__menu-item--selected" href="/">dev</a>';
menu.style["height"] = "97px";
}
});
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/guides/mig.md
|
# Multi-Instance GPU (MIG)
[Multi-Instance GPU](https://www.nvidia.com/en-us/technologies/multi-instance-gpu/) is a technology that allows partitioning a single GPU into multiple instances, making each one seem as a completely independent GPU. Each instance then receives a certain slice of the GPU computational resources and a pre-defined block of memory that is detached from the other instances by on-chip protections.
Due to the protection layer to make MIG secure, certain limitations exist. One such limitation that is generally important for HPC applications is the lack of support for [CUDA Inter-Process Communication (IPC)](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#interprocess-communication), which enables transfers over NVLink and NVSwitch to greatly speed up communication between physical GPUs. When using MIG, [NVLink and NVSwitch](https://www.nvidia.com/en-us/data-center/nvlink/) are thus completely unavailable, forcing the application to take a more expensive communication channel via the system (CPU) memory.
Given limitations in communication capability, we advise users to first understand the tradeoffs that have to be made when attempting to setup a cluster of MIG instances. While the partitioning could be beneficial to certain applications that need only a certain amount of compute capability, communication bottlenecks may be a problem and thus need to be thought of carefully.
## Dask Cluster
Dask clusters of MIG instances are supported via Dask-CUDA as long as all MIG instances are identical with respect to memory. Much like a cluster of physical GPUs, mixing GPUs with different memory sizes is generally not a good idea as Dask may not be able to balance work correctly and eventually could lead to more frequent out-of-memory errors.
For example, partitioning two GPUs into 7 x 10GB instances each and setting up a cluster with all 14 instances should be ok. However, partitioning one of the GPUs into 7 x 10GB instances and another with 3 x 20GB should be avoided.
Unlike for a system composed of unpartitioned GPUs, Dask-CUDA cannot automatically infer the GPUs to be utilized for the cluster. In a MIG setup, the user is then required to specify the GPU instances to be used by the cluster. This is achieved by specifying either the `CUDA_VISIBLE_DEVICES` environment variable for either {class}`dask_cuda.LocalCUDACluster` or `dask-cuda-worker`, or the homonymous argument for {class}`dask_cuda.LocalCUDACluster`.
Physical GPUs can be addressed by their indices `[0..N)` (where `N` is the total number of GPUs installed) or by its name composed of the `GPU-` prefix followed by its UUID. MIG instances have no indices and can only be addressed by their names, composed of the `MIG-` prefix followed by its UUID. The name of a MIG instance will the look similar to: `MIG-41b3359c-e721-56e5-8009-12e5797ed514`.
### Determine MIG Names
The simplest way to determine the names of MIG instances is to run `nvidia-smi -L` on the command line.
```bash
$ nvidia-smi -L
GPU 0: NVIDIA A100-PCIE-40GB (UUID: GPU-84fd49f2-48ad-50e8-9f2e-3bf0dfd47ccb)
MIG 2g.10gb Device 0: (UUID: MIG-41b3359c-e721-56e5-8009-12e5797ed514)
MIG 2g.10gb Device 1: (UUID: MIG-65b79fff-6d3c-5490-a288-b31ec705f310)
MIG 2g.10gb Device 2: (UUID: MIG-c6e2bae8-46d4-5a7e-9a68-c6cf1f680ba0)
```
In the example case above the system has one NVIDIA A100 with 3 x 10GB MIG instances. In the next sections we will see how to use the instance names to startup a Dask cluster composed of MIG GPUs. Please note that once a GPU is partitioned, the physical GPU (named `GPU-84fd49f2-48ad-50e8-9f2e-3bf0dfd47ccb` above) is inaccessible for CUDA compute and cannot be used as part of a Dask cluster.
Alternatively, MIG instance names can be obtained programatically using [NVML](https://developer.nvidia.com/nvidia-management-library-nvml) or [PyNVML](https://github.com/gpuopenanalytics/pynvml). Please refer to the [NVML API](https://docs.nvidia.com/deploy/nvml-api/) to write appropriate utilities for that purpose.
### LocalCUDACluster
Suppose you have 3 MIG instances on the local system:
- `MIG-41b3359c-e721-56e5-8009-12e5797ed514`
- `MIG-65b79fff-6d3c-5490-a288-b31ec705f310`
- `MIG-c6e2bae8-46d4-5a7e-9a68-c6cf1f680ba0`
To start a {class}`dask_cuda.LocalCUDACluster`, the user would run the following:
```python
from dask_cuda import LocalCUDACluster
cluster = LocalCUDACluster(
CUDA_VISIBLE_DEVICES=[
"MIG-41b3359c-e721-56e5-8009-12e5797ed514",
"MIG-65b79fff-6d3c-5490-a288-b31ec705f310",
"MIG-c6e2bae8-46d4-5a7e-9a68-c6cf1f680ba0",
],
# Other `LocalCUDACluster` arguments
)
```
### dask-cuda-worker
Suppose you have 3 MIG instances on the local system:
- `MIG-41b3359c-e721-56e5-8009-12e5797ed514`
- `MIG-65b79fff-6d3c-5490-a288-b31ec705f310`
- `MIG-c6e2bae8-46d4-5a7e-9a68-c6cf1f680ba0`
To start a `dask-cuda-worker` that the address to the scheduler is located in the `scheduler.json` file, the user would run the following:
```bash
CUDA_VISIBLE_DEVICES="MIG-41b3359c-e721-56e5-8009-12e5797ed514,MIG-65b79fff-6d3c-5490-a288-b31ec705f310,MIG-c6e2bae8-46d4-5a7e-9a68-c6cf1f680ba0" dask-cuda-worker scheduler.json # --other-arguments
```
Please note that in the example above we created 3 Dask-CUDA workers on one node, for a multi-node cluster, the correct MIG names need to be specified, and they will always be different for each host.
## XGBoost with Dask Cluster
Currently [XGBoost](https://www.nvidia.com/en-us/glossary/data-science/xgboost/) only exposes support for GPU communication via NCCL, which does not support MIG. For this reason, A Dask cluster that utilizes XGBoost would have to utilize TCP instead for all communications which will likely cause in considerable performance degradation. Therefore, using XGBoost with MIG is not recommended.
```{relatedexamples}
```
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/guides/index.md
|
---
html_theme.sidebar_secondary.remove: true
---
# Guides
`````{gridtoctree} 1 2 2 3
:gutter: 2 2 2 2
````{grid-item-card}
:link: mig
:link-type: doc
Multi-Instance GPUs
^^^
Use RAPIDS with Multi-Instance GPUs
{bdg}`Dask Cluster`
{bdg}`XGBoost with Dask Cluster`
````
````{grid-item-card}
:link: azure/infiniband
:link-type: doc
Infiniband on Azure
^^^
How to setup InfiniBand on Azure.
{bdg}`Microsoft Azure`
````
````{grid-item-card}
:link: scheduler-gpu-requirements
:link-type: doc
Does the Dask scheduler need a GPU?
^^^
Guidance on Dask scheduler software and hardware requirements.
{bdg-primary}`Dask`
````
````{grid-item-card}
:link: l4-gcp
:link-type: doc
L4 on Google Cloud Platform
^^^
How to setup a VM instance on GCP with an L4 GPU.
{bdg-primary}`Google Cloud Platform`
````
`````
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/guides/l4-gcp.md
|
# L4 GPUs on a Google Cloud Platform (GCP)
[L4 GPUs](https://www.nvidia.com/en-us/data-center/l4/) are a more energy and computationally efficient option compared to T4 GPUs. L4 GPUs are [generally available on GCP](https://cloud.google.com/blog/products/compute/introducing-g2-vms-with-nvidia-l4-gpus) to run your workflows with RAPIDS.
## Compute Engine Instance
### Create the Virtual Machine
To create a VM instance with an L4 GPU to run RAPIDS:
1. Open [**Compute Engine**](https://console.cloud.google.com/compute/instances).
1. Select **Create Instance**.
1. Under the **Machine configuration** section, select **GPUs** and then select `NVIDIA L4` in the **GPU type** dropdown.
1. Under the **Boot Disk** section, click **CHANGE** and select `Deep Learning on Linux` in the **Operating System** dropdown.
1. It is also recommended to increase the default boot disk size to something like `100GB`.
1. Once you have customized other attributes of the instance, click **CREATE**.
### Allow network access
To access Jupyter and Dask we will need to set up some firewall rules to open up some ports.
#### Create the firewall rule
1. Open [**VPC Network**](https://console.cloud.google.com/networking/networks/list).
2. Select **Firewall** and **Create firewall rule**
3. Give the rule a name like `rapids` and ensure the network matches the one you selected for the VM.
4. Add a tag like `rapids` which we will use to assign the rule to our VM.
5. Set your source IP range. We recommend you restrict this to your own IP address or your corporate network rather than `0.0.0.0/0` which will allow anyone to access your VM.
6. Under **Protocols and ports** allow TCP connections on ports `22,8786,8787,8888`.
#### Assign it to the VM
1. Open [**Compute Engine**](https://console.cloud.google.com/compute/instances).
2. Select your VM and press **Edit**.
3. Scroll down to **Networking** and add the `rapids` network tag you gave your firewall rule.
4. Select **Save**.
### Connect to the VM
Next we need to connect to the VM.
1. Open [**Compute Engine**](https://console.cloud.google.com/compute/instances).
2. Locate your VM and press the **SSH** button which will open a new browser tab with a terminal.
### Install CUDA and NVIDIA Container Toolkit
Since [GCP recommends CUDA 12](https://cloud.google.com/compute/docs/gpus/install-drivers-gpu#no-secure-boot) on L4 VM, we will be upgrading CUDA.
1. [Install CUDA Toolkit 12](https://developer.nvidia.com/cuda-downloads) in your VM and accept the default prompts with the following commands.
```bash
$ wget https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run
$ sudo sh cuda_12.1.1_530.30.02_linux.run
```
1. [Install NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit) with the following commands.
```bash
$ sudo apt-get update
$ sudo apt-get install -y nvidia-container-toolkit
$ sudo nvidia-ctk runtime configure --runtime=docker
$ sudo systemctl restart docker
```
### Install RAPIDS
```{include} ../_includes/install-rapids-with-docker.md
```
### Test RAPIDS
```{include} ../_includes/test-rapids-docker-vm.md
```
### Clean up
Once you are finished head back to the [Deployments](https://console.cloud.google.com/compute/instances) page and delete the instance you created.
```{relatedexamples}
```
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/guides/scheduler-gpu-requirements.md
|
# Does the Dask scheduler need a GPU?
A common question from users deploying Dask clusters is whether the scheduler has different minimum requirements to the workers. This question is compounded when using RAPIDS and GPUs.
```{warning}
This guide outlines our current advice on scheduler hardware requirements, but this may be subject to change.
```
**TLDR; It is strongly suggested that your Dask scheduler has matching hardware/software capabilities to the other components in your cluster.**
Therefore, if your workers have GPUs and the RAPIDS libraries installed we recommend that your scheduler does too. However the GPU attached to your scheduler doesn't need to be as powerful as the GPUs on your workers, as long as it has the same capabilities and driver/CUDA versions.
## What does the scheduler use a GPU for?
The Dask client generates a task graph of operations that it wants to be performed and serializes any data that needs to be sent to the workers. The scheduler handles allocating those tasks to the various Dask workers and passes serialized data back and forth. The workers deserialize the data, perform calculations, serialize the result and pass it back.
This can lead users to logically ask if the scheduler needs the same capabilities as the workers/client. It doesn't handle the actual data or do any of the user calculations, it just decides where work should go.
Taking this even further you could even ask "Does the Dask scheduler even need to be written in Python?". Some folks even [experimented with a Rust implementation of the scheduler](https://github.com/It4innovations/rsds) a couple of years ago.
There are two primary reasons why we recommend that the scheduler has the same capabilities:
- There are edge cases where the scheduler does deserialize data.
- Some scheduler optimizations require high-level graphs to be pickled on the client and unpickled on the scheduler.
If your workload doesn't trigger any edge-cases and you're not using the high-level graph optimizations then you could likely get away with not having a GPU. But it is likely you will run into problems eventually and the failure-modes will be potentially hard to debug.
### Known edge cases
When calling [`client.submit`](https://docs.dask.org/en/latest/futures.html#distributed.Client.submit) and passing data directly to a function the whole graph is serialized and sent to the scheduler. In order for the scheduler to figure out what to do with it the graph is deserialized. If the data uses GPUs this can cause the scheduler to import RAPIDS libraries, attempt to instantiate a CUDA context and populate the data into GPU memory. If those libraries are missing and/or there are no GPUs this will cause the scheduler to fail.
Many Dask collections also have a meta object which represents the overall collection but without any data. For example a Dask Dataframe has a meta Pandas Dataframe which has the same meta properties and is used during scheduling. If the underlying data is instead a cuDF Dataframe then the meta object will be too, which is deserialized on the scheduler.
### Example failure modes
When using the default TCP communication protocol, the scheduler generally does _not_ inspect data communicated between clients and workers, so many workflows will not provoke failure. For example, suppose we set up a Dask cluster and do not provide the scheduler with a GPU. The following simple computation with [CuPy](https://cupy.dev)-backed Dask arrays completes successfully
```python
import cupy
from distributed import Client, wait
import dask.array as da
client = Client(scheduler_file="scheduler.json")
x = cupy.arange(10)
y = da.arange(1000, like=x)
z = (y * 2).persist()
wait(z)
# Now let's look at some results
print(z[:10].compute())
```
We can run this code, giving the scheduler no access to a GPU:
```sh
$ CUDA_VISIBLE_DEVICES="" dask scheduler --protocol tcp --scheduler-file scheduler.json &
$ dask cuda worker --protocol tcp --scheduler-file scheduler.json &
$ python test.py
...
[ 0 2 4 6 8 10 12 14 16 18]
...
```
In contrast, if you provision an [Infiniband-enabled system](/guides/azure/infiniband.md) and wish to take advantage of the high-performance network, you will want to use the [UCX](https://openucx.org/) protocol, rather than TCP. Using such a setup without a GPU on the scheduler will not succeed. When the client or workers communicate with the scheduler, any GPU-allocated buffers will be sent directly between GPUs (avoiding a roundtrip to host memory). This is more efficient, but will not succeed if the scheduler does not _have_ a GPU. Running the same example from above, but this time using UCX we obtain an error:
```sh
$ CUDA_VISIBLE_DEVICES="" dask scheduler --protocol ucx --scheduler-file scheduler.json &
$ dask cuda worker --protocol ucx --scheduler-file scheduler.json &
$ python test.py
$ CUDA_VISIBLE_DEVICES="" dask scheduler --protocol ucx --scheduler-file foo.json &
$ dask-cuda-worker --protocol ucx --scheduler-file scheduler.json &
$ python test.py
...
2023-01-27 11:01:28,263 - distributed.core - ERROR - CUDA error at: .../rmm/include/rmm/cuda_device.hpp:56: cudaErrorNoDevice no CUDA-capable device is detected
Traceback (most recent call last):
File ".../distributed/distributed/utils.py", line 741, in wrapper
return await func(*args, **kwargs)
File ".../distributed/distributed/comm/ucx.py", line 372, in read
frames = [
File ".../distributed/distributed/comm/ucx.py", line 373, in <listcomp>
device_array(each_size) if is_cuda else host_array(each_size)
File ".../distributed/distributed/comm/ucx.py", line 171, in device_array
return rmm.DeviceBuffer(size=n)
File "device_buffer.pyx", line 85, in rmm._lib.device_buffer.DeviceBuffer.__cinit__
RuntimeError: CUDA error at: .../rmm/include/rmm/cuda_device.hpp:56: cudaErrorNoDevice no CUDA-capable device is detected
2023-01-27 11:01:28,263 - distributed.core - ERROR - Exception while handling op gather
Traceback (most recent call last):
File ".../distributed/distributed/core.py", line 820, in _handle_comm
result = await result
File ".../distributed/distributed/scheduler.py", line 5687, in gather
data, missing_keys, missing_workers = await gather_from_workers(
File ".../distributed/distributed/utils_comm.py", line 80, in gather_from_workers
r = await c
File ".../distributed/distributed/worker.py", line 2872, in get_data_from_worker
return await retry_operation(_get_data, operation="get_data_from_worker")
File ".../distributed/distributed/utils_comm.py", line 419, in retry_operation
return await retry(
File ".../distributed/distributed/utils_comm.py", line 404, in retry
return await coro()
File ".../distributed/distributed/worker.py", line 2852, in _get_data
response = await send_recv(
File ".../distributed/distributed/core.py", line 986, in send_recv
response = await comm.read(deserializers=deserializers)
File ".../distributed/distributed/utils.py", line 741, in wrapper
return await func(*args, **kwargs)
File ".../distributed/distributed/comm/ucx.py", line 372, in read
frames = [
File ".../distributed/distributed/comm/ucx.py", line 373, in <listcomp>
device_array(each_size) if is_cuda else host_array(each_size)
File ".../distributed/distributed/comm/ucx.py", line 171, in device_array
return rmm.DeviceBuffer(size=n)
File "device_buffer.pyx", line 85, in rmm._lib.device_buffer.DeviceBuffer.__cinit__
RuntimeError: CUDA error at: .../rmm/include/rmm/cuda_device.hpp:56: cudaErrorNoDevice no CUDA-capable device is detected
Traceback (most recent call last):
File "test.py", line 15, in <module>
print(z[:10].compute())
File ".../dask/dask/base.py", line 314, in compute
(result,) = compute(self, traverse=False, **kwargs)
File ".../dask/dask/base.py", line 599, in compute
results = schedule(dsk, keys, **kwargs)
File ".../distributed/distributed/client.py", line 3144, in get
results = self.gather(packed, asynchronous=asynchronous, direct=direct)
File ".../distributed/distributed/client.py", line 2313, in gather
return self.sync(
File ".../distributed/distributed/utils.py", line 338, in sync
return sync(
File ".../distributed/distributed/utils.py", line 405, in sync
raise exc.with_traceback(tb)
File ".../distributed/distributed/utils.py", line 378, in f
result = yield future
File ".../tornado/gen.py", line 769, in run
value = future.result()
File ".../distributed/distributed/client.py", line 2205, in _gather
response = await future
File ".../distributed/distributed/client.py", line 2256, in _gather_remote
response = await retry_operation(self.scheduler.gather, keys=keys)
File ".../distributed/distributed/utils_comm.py", line 419, in retry_operation
return await retry(
File ".../distributed/distributed/utils_comm.py", line 404, in retry
return await coro()
File ".../distributed/distributed/core.py", line 1221, in send_recv_from_rpc
return await send_recv(comm=comm, op=key, **kwargs)
File ".../distributed/distributed/core.py", line 1011, in send_recv
raise exc.with_traceback(tb)
File ".../distributed/distributed/core.py", line 820, in _handle_comm
result = await result
File ".../distributed/distributed/scheduler.py", line 5687, in gather
data, missing_keys, missing_workers = await gather_from_workers(
File ".../distributed/distributed/utils_comm.py", line 80, in gather_from_workers
r = await c
File ".../distributed/distributed/worker.py", line 2872, in get_data_from_worker
return await retry_operation(_get_data, operation="get_data_from_worker")
File ".../distributed/distributed/utils_comm.py", line 419, in retry_operation
return await retry(
File ".../distributed/distributed/utils_comm.py", line 404, in retry
return await coro()
File ".../distributed/distributed/worker.py", line 2852, in _get_data
response = await send_recv(
File ".../distributed/distributed/core.py", line 986, in send_recv
response = await comm.read(deserializers=deserializers)
File ".../distributed/distributed/utils.py", line 741, in wrapper
return await func(*args, **kwargs)
File ".../distributed/distributed/comm/ucx.py", line 372, in read
frames = [
File ".../distributed/distributed/comm/ucx.py", line 373, in <listcomp>
device_array(each_size) if is_cuda else host_array(each_size)
File ".../distributed/distributed/comm/ucx.py", line 171, in device_array
return rmm.DeviceBuffer(size=n)
File "device_buffer.pyx", line 85, in rmm._lib.device_buffer.DeviceBuffer.__cinit__
RuntimeError: CUDA error at: .../rmm/include/rmm/cuda_device.hpp:56: cudaErrorNoDevice no CUDA-capable device is detected
...
```
The critical error comes from [RMM](https://docs.rapids.ai/api/rmm/stable/), we're attempting to allocate a [`DeviceBuffer`](https://docs.rapids.ai/api/rmm/stable/basics.html#devicebuffers) on the scheduler, but there is no GPU available to do so:
```pytb
File ".../distributed/distributed/comm/ucx.py", line 171, in device_array
return rmm.DeviceBuffer(size=n)
File "device_buffer.pyx", line 85, in rmm._lib.device_buffer.DeviceBuffer.__cinit__
RuntimeError: CUDA error at: .../rmm/include/rmm/cuda_device.hpp:56: cudaErrorNoDevice no CUDA-capable device is detected
```
### Scheduler optimizations and High-Level graphs
The Dask community is actively working on implementing high-level graphs which will both speed up client -> scheduler communication and allow the scheduler to make advanced optmizations such as predicate pushdown.
Much effort has been put into using existing serialization strategies to communicate the HLG but this has proven prohibitively difficult to implement. The current plan is to simplify HighLevelGraph/Layer so that the entire HLG can be pickled on the client, sent to the scheduler as a single binary blob, and then unpickled/materialized (HLG->dict) on the scheduler. The problem with this new plan is that the pickle/un-pickle convention will require the scheduler to have the same environment as the client. If any Layer logic also requires a device allocation, then this approach also requires the scheduler to have access to a GPU.
## So what are the minimum requirements of the scheduler?
From a software perspective we recommend that the Python environment on the client, scheduler and workers all match. Given that the user is expected to ensure the worker has the same environment as the client it is not much of a burden to ensure the scheduler also has the same environment.
From a hardware perspective we recommend that the scheduler has the same capabilities, but not necessarily the same quantity of resource. Therefore if the workers have one or more GPUs we recommend that the scheduler has access to one GPU with matching NVIDIA driver and CUDA versions. In a large multi-node cluster deployment on a cloud platform this may mean the workers are launched on VMs with 8 GPUs and the scheduler is launched on a smaller VM with one GPU. You could also select a less powerful GPU such as those intended for inferencing for your scheduler like a T4, provided it has the same CUDA capabilities, NVIDIA driver version and CUDA/CUDA Toolkit version.
This balance means we can guarantee things function as intended, but reduces cost because placing the scheduler on an 8 GPU node would be a waste of resources.
| 0 |
rapidsai_public_repos/deployment/source/guides
|
rapidsai_public_repos/deployment/source/guides/azure/infiniband.md
|
# How to Setup InfiniBand on Azure
[Azure GPU optmized virtual machines](https://learn.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) provide
a low latency and high bandwidth InfiniBand network. This guide walks through the steps to enable InfiniBand to
optimize network performance.
## Build a Virtual Machine
Start by creating a GPU optimized VM from the Azure portal. Below is an example that we will use
for demonstration.
- Create new VM instance.
- Select `East US` region.
- Change `Availability options` to `Availability set` and create a set.
- If building multiple instances put additional instances in the same set.
- Use the 2nd Gen Ubuntu 20.04 image.
- Search all images for `Ubuntu Server 20.04` and choose the second one down on the list.
- Change size to `ND40rs_v2`.
- Set password login with credentials.
- User `someuser`
- Password `somepassword`
- Leave all other options as default.
Then connect to the VM using your preferred method.
## Install Software
Before installing the drivers ensure the system is up to date.
```shell
sudo apt-get update
sudo apt-get upgrade -y
```
### NVIDIA Drivers
The commands below should work for Ubuntu. See the [CUDA Toolkit documentation](https://docs.nvidia.com/cuda/index.html#installation-guides) for details on installing on other operating systems.
```shell
sudo apt-get install -y linux-headers-$(uname -r)
distribution=$(. /etc/os-release;echo $ID$VERSION_ID | sed -e 's/\.//g')
wget https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/cuda-keyring_1.0-1_all.deb
sudo dpkg -i cuda-keyring_1.0-1_all.deb
sudo apt-get update
sudo apt-get -y install cuda-drivers
```
Restart VM instance
```shell
sudo reboot
```
Once the VM boots, reconnect and run `nvidia-smi` to verify driver installation.
```shell
nvidia-smi
```
```shell
Mon Nov 14 20:32:39 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 520.61.05 Driver Version: 520.61.05 CUDA Version: 11.8 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla V100-SXM2... On | 00000001:00:00.0 Off | 0 |
| N/A 34C P0 41W / 300W | 445MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 1 Tesla V100-SXM2... On | 00000002:00:00.0 Off | 0 |
| N/A 37C P0 43W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 2 Tesla V100-SXM2... On | 00000003:00:00.0 Off | 0 |
| N/A 34C P0 42W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 3 Tesla V100-SXM2... On | 00000004:00:00.0 Off | 0 |
| N/A 35C P0 44W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 4 Tesla V100-SXM2... On | 00000005:00:00.0 Off | 0 |
| N/A 35C P0 41W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 5 Tesla V100-SXM2... On | 00000006:00:00.0 Off | 0 |
| N/A 36C P0 43W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 6 Tesla V100-SXM2... On | 00000007:00:00.0 Off | 0 |
| N/A 37C P0 44W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 7 Tesla V100-SXM2... On | 00000008:00:00.0 Off | 0 |
| N/A 38C P0 44W / 300W | 4MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 1396 G /usr/lib/xorg/Xorg 427MiB |
| 0 N/A N/A 1762 G /usr/bin/gnome-shell 16MiB |
| 1 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 2 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 3 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 4 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 5 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 6 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
| 7 N/A N/A 1396 G /usr/lib/xorg/Xorg 4MiB |
+-----------------------------------------------------------------------------+
```
### InfiniBand Driver
On Ubuntu 20.04
```shell
sudo apt-get install -y automake dh-make git libcap2 libnuma-dev libtool make pkg-config udev curl librdmacm-dev rdma-core \
libgfortran5 bison chrpath flex graphviz gfortran tk dpatch quilt swig tcl ibverbs-utils
```
Check install
```shell
ibv_devinfo
```
```shell
hca_id: mlx5_0
transport: InfiniBand (0)
fw_ver: 16.28.4000
node_guid: 0015:5dff:fe33:ff2c
sys_image_guid: 0c42:a103:00b3:2f68
vendor_id: 0x02c9
vendor_part_id: 4120
hw_ver: 0x0
board_id: MT_0000000010
phys_port_cnt: 1
port: 1
state: PORT_ACTIVE (4)
max_mtu: 4096 (5)
active_mtu: 4096 (5)
sm_lid: 7
port_lid: 115
port_lmc: 0x00
link_layer: InfiniBand
hca_id: rdmaP36305p0s2
transport: InfiniBand (0)
fw_ver: 2.43.7008
node_guid: 6045:bdff:feed:8445
sys_image_guid: 043f:7203:0003:d583
vendor_id: 0x02c9
vendor_part_id: 4100
hw_ver: 0x0
board_id: MT_1090111019
phys_port_cnt: 1
port: 1
state: PORT_ACTIVE (4)
max_mtu: 4096 (5)
active_mtu: 1024 (3)
sm_lid: 0
port_lid: 0
port_lmc: 0x00
link_layer: Ethernet
```
#### Enable IPoIB
```shell
sudo sed -i -e 's/# OS.EnableRDMA=y/OS.EnableRDMA=y/g' /etc/waagent.conf
```
Reboot and reconnect.
```shell
sudo reboot
```
#### Check IB
```shell
ip addr show
```
```shell
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 60:45:bd:a7:42:cc brd ff:ff:ff:ff:ff:ff
inet 10.6.0.5/24 brd 10.6.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::6245:bdff:fea7:42cc/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 00:15:5d:33:ff:16 brd ff:ff:ff:ff:ff:ff
4: enP44906s1: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master eth0 state UP group default qlen 1000
link/ether 60:45:bd:a7:42:cc brd ff:ff:ff:ff:ff:ff
altname enP44906p0s2
5: ibP59423s2: <BROADCAST,MULTICAST> mtu 4092 qdisc noop state DOWN group default qlen 256
link/infiniband 00:00:09:27:fe:80:00:00:00:00:00:00:00:15:5d:ff:fd:33:ff:16 brd 00:ff:ff:ff:ff:12:40:1b:80:1d:00:00:00:00:00:00:ff:ff:ff:ff
altname ibP59423p0s2
```
```shell
nvidia-smi topo -m
```
```shell
GPU0 GPU1 GPU2 GPU3 GPU4 GPU5 GPU6 GPU7 mlx5_0 CPU Affinity NUMA Affinity
GPU0 X NV2 NV1 NV2 NODE NODE NV1 NODE NODE 0-19 0
GPU1 NV2 X NV2 NV1 NODE NODE NODE NV1 NODE 0-19 0
GPU2 NV1 NV2 X NV1 NV2 NODE NODE NODE NODE 0-19 0
GPU3 NV2 NV1 NV1 X NODE NV2 NODE NODE NODE 0-19 0
GPU4 NODE NODE NV2 NODE X NV1 NV1 NV2 NODE 0-19 0
GPU5 NODE NODE NODE NV2 NV1 X NV2 NV1 NODE 0-19 0
GPU6 NV1 NODE NODE NODE NV1 NV2 X NV2 NODE 0-19 0
GPU7 NODE NV1 NODE NODE NV2 NV1 NV2 X NODE 0-19 0
mlx5_0 NODE NODE NODE NODE NODE NODE NODE NODE X
Legend:
X = Self
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
PIX = Connection traversing at most a single PCIe bridge
NV# = Connection traversing a bonded set of # NVLinks
```
### Install UCX-Py and tools
```shell
wget https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh
bash Mambaforge-Linux-x86_64.sh
```
Accept the default and allow conda init to run. Then start a new shell.
Create a conda environment (see [UCX-Py](https://ucx-py.readthedocs.io/en/latest/install.html) docs)
```shell
mamba create -n ucxpy {{ rapids_conda_channels }} {{ rapids_conda_packages }} ipython ucx-proc=*=gpu ucx ucx-py dask distributed numpy cupy pytest pynvml -y
mamba activate ucxpy
```
Clone UCX-Py repo locally
```shell
git clone https://github.com/rapidsai/ucx-py.git
cd ucx-py
```
### Run Tests
Start by running the UCX-Py test suite, from within the `ucx-py` repo:
```shell
pytest -vs tests/
pytest -vs ucp/_libs/tests/
```
Now check to see if InfiniBand works, for that you can run some of the benchmarks that we include in UCX-Py, for example:
```shell
# cd out of the ucx-py directory
cd ..
# Let UCX pick the best transport (expecting NVLink when available,
# otherwise InfiniBand, or TCP in worst case) on devices 0 and 1
python -m ucp.benchmarks.send_recv --server-dev 0 --client-dev 1 -o rmm --reuse-alloc -n 128MiB
# Force TCP-only on devices 0 and 1
UCX_TLS=tcp,cuda_copy python -m ucp.benchmarks.send_recv --server-dev 0 --client-dev 1 -o rmm --reuse-alloc -n 128MiB
```
We expect the first case above to have much higher bandwidth than the second. If you happen to have both
NVLink and InfiniBand connectivity, then you may limit to the specific transport by specifying `UCX_TLS`, e.g.:
```shell
# NVLink (if available) or TCP
UCX_TLS=tcp,cuda_copy,cuda_ipc
# InfiniBand (if available) or TCP
UCX_TLS=tcp,cuda_copy,rc
```
## Run Benchmarks
Finally, let's run the [merge benchmark](https://github.com/rapidsai/dask-cuda/blob/HEAD/dask_cuda/benchmarks/local_cudf_merge.py) from `dask-cuda`.
This benchmark uses Dask to perform a merge of two dataframes that are distributed across all the available GPUs on your
VM. Merges are a challenging benchmark in a distributed setting since they require communication-intensive shuffle
operations of the participating dataframes
(see the [Dask documentation](https://docs.dask.org/en/stable/dataframe-best-practices.html#avoid-full-data-shuffling)
for more on this type of operation). To perform the merge, each dataframe is shuffled such that rows with the same join
key appear on the same GPU. This results in an [all-to-all](<https://en.wikipedia.org/wiki/All-to-all_(parallel_pattern)>)
communication pattern which requires a lot of communication between the GPUs. As a result, network
performance will be very important for the throughput of the benchmark.
Below we are running for devices 0 through 7 (inclusive), you will want to adjust that for the number of devices available on your VM, the default
is to run on GPU 0 only. Additionally, `--chunk-size 100_000_000` is a safe value for 32GB GPUs, you may
adjust that proportional to the size of the GPU you have (it scales linearly, so `50_000_000` should
be good for 16GB or `150_000_000` for 48GB).
```shell
# Default Dask TCP communication protocol
python -m dask_cuda.benchmarks.local_cudf_merge --devs 0,1,2,3,4,5,6,7 --chunk-size 100_000_000 --no-show-p2p-bandwidth
```
```shell
Merge benchmark
--------------------------------------------------------------------------------
Backend | dask
Merge type | gpu
Rows-per-chunk | 100000000
Base-chunks | 8
Other-chunks | 8
Broadcast | default
Protocol | tcp
Device(s) | 0,1,2,3,4,5,6,7
RMM Pool | True
Frac-match | 0.3
Worker thread(s) | 1
Data processed | 23.84 GiB
Number of workers | 8
================================================================================
Wall clock | Throughput
--------------------------------------------------------------------------------
48.51 s | 503.25 MiB/s
47.85 s | 510.23 MiB/s
41.20 s | 592.57 MiB/s
================================================================================
Throughput | 532.43 MiB/s +/- 22.13 MiB/s
Bandwidth | 44.76 MiB/s +/- 0.93 MiB/s
Wall clock | 45.85 s +/- 3.30 s
```
```shell
# UCX protocol
python -m dask_cuda.benchmarks.local_cudf_merge --devs 0,1,2,3,4,5,6,7 --chunk-size 100_000_000 --protocol ucx --no-show-p2p-bandwidth
```
```shell
Merge benchmark
--------------------------------------------------------------------------------
Backend | dask
Merge type | gpu
Rows-per-chunk | 100000000
Base-chunks | 8
Other-chunks | 8
Broadcast | default
Protocol | ucx
Device(s) | 0,1,2,3,4,5,6,7
RMM Pool | True
Frac-match | 0.3
TCP | None
InfiniBand | None
NVLink | None
Worker thread(s) | 1
Data processed | 23.84 GiB
Number of workers | 8
================================================================================
Wall clock | Throughput
--------------------------------------------------------------------------------
9.57 s | 2.49 GiB/s
6.01 s | 3.96 GiB/s
9.80 s | 2.43 GiB/s
================================================================================
Throughput | 2.82 GiB/s +/- 341.13 MiB/s
Bandwidth | 159.89 MiB/s +/- 8.96 MiB/s
Wall clock | 8.46 s +/- 1.73 s
```
```{relatedexamples}
```
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/tools/dask-cuda.md
|
# dask-cuda
[Dask-CUDA](https://docs.rapids.ai/api/dask-cuda/stable/) is a library extending `LocalCluster` from `dask.distributed` to enable multi-GPU workloads.
## LocalCUDACluster
You can use `LocalCUDACluster` to create a cluster of one or more GPUs on your local machine. You can launch a Dask scheduler on LocalCUDACluster to parallelize and distribute your RAPIDS workflows across multiple GPUs on a single node.
In addition to enabling multi-GPU computation, `LocalCUDACluster` also provides a simple interface for managing the cluster, such as starting and stopping the cluster, querying the status of the nodes, and monitoring the workload distribution.
## Pre-requisites
Before running these instructions, ensure you have installed the [`dask`](https://docs.dask.org/en/stable/install.html) and [`dask-cuda`](https://docs.rapids.ai/api/dask-cuda/nightly/install.html) packages in your local environment
## Cluster setup
### Instantiate a LocalCUDACluster object
The `LocalCUDACluster` class autodetects the GPUs in your system, so if you create it on a machine with two GPUs it will create a cluster with two workers, each of which is responsible for executing tasks on a separate GPU.
```console
cluster = LocalCUDACluster()
```
You can also restrict your cluster to use specific GPUs by setting the `CUDA_VISIBLE_DEVICES` environment variable, or as a keyword argument.
```console
cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES="0,1") # Creates one worker for GPUs 0 and 1
```
### Connecting a Dask client
The Dask scheduler coordinates the execution of tasks, whereas the Dask client is the user-facing interface that submits tasks to the scheduler and monitors their progress.
```console
client = Client(cluster)
```
## Test RAPIDS
To test RAPIDS, create a `distributed` client for the cluster and query for the GPU model.
```Python
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
def get_gpu_model():
import pynvml
pynvml.nvmlInit()
return pynvml.nvmlDeviceGetName(pynvml.nvmlDeviceGetHandleByIndex(0))
def main():
cluster = LocalCUDACluster()
client = Client(cluster)
result = client.submit(get_gpu_model).result()
print(f"{result=}")
if __name__ == "__main__":
main()
```
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/tools/index.md
|
# Tools
## Packages
`````{gridtoctree} 1 2 2 3
:gutter: 2 2 2 2
````{grid-item-card}
:link: rapids-docker
:link-type: doc
Container Images
^^^
Container images containing the RAPIDS software environment.
````
````{grid-item-card}
:link: dask-cuda
:link-type: doc
Dask CUDA
^^^
Dask-CUDA is a library extending Dask.distributed’s single-machine LocalCluster and Worker for use in distributed GPU workloads.
````
`````
## Kubernetes
`````{gridtoctree} 1 2 2 3
:gutter: 2 2 2 2
````{grid-item-card}
:link: kubernetes/dask-operator
:link-type: doc
Dask Kubernetes Operator (Classic)
^^^
Launch RAPIDS containers and clusters as native Kubernetes resources with the Dask Operator.
````
````{grid-item-card}
:link: kubernetes/dask-kubernetes
:link-type: doc
Dask Kubernetes (Classic)
^^^
Spawn RAPIDS Pods on Kubernetes with `dask-kubernetes` (deprecated).
````
````{grid-item-card}
:link: kubernetes/dask-helm-chart
:link-type: doc
Dask Helm Chart
^^^
Install a single user notebook and cluster on Kubernetes with the Dask Helm Chart.
````
`````
| 0 |
rapidsai_public_repos/deployment/source
|
rapidsai_public_repos/deployment/source/tools/rapids-docker.md
|
# Container Images
Installation instructions for Docker are hosted at the [RAPIDS Container Installation Docs Page](https://docs.rapids.ai/install#docker).
```{relatedexamples}
```
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.