repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuda/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE)
option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON)
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
execute_process(COMMAND node -p
"require('@rapidsai/core').cmake_modules_path"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake")
project(rapidsai_cuda VERSION $ENV{npm_package_version} LANGUAGES C CXX)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/core'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake")
###################################################################################################
# - rapidsai_cuda target ------------------------------------------------------------------------------
file(GLOB_RECURSE NODE_CUDA_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
add_library(${PROJECT_NAME} SHARED ${NODE_CUDA_SRC_FILES} ${CMAKE_JS_SRC})
set_target_properties(${PROJECT_NAME}
PROPERTIES PREFIX ""
SUFFIX ".node"
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${PROJECT_NAME}
PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>"
)
target_include_directories(${PROJECT_NAME}
PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>"
"$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>"
)
target_compile_definitions(${PROJECT_NAME}
PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
"$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
)
target_link_libraries(${PROJECT_NAME}
PUBLIC ${CMAKE_JS_LIB}
CUDA::nvrtc_static
CUDA::cudart_static
CUDA::nppig_static
CUDA::nppicc_static
"${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake")
generate_arch_specific_custom_targets(
NAME ${PROJECT_NAME}
)
generate_install_rules(
NAME ${PROJECT_NAME}
CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
# Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin
execute_process(COMMAND
${CMAKE_COMMAND} -E create_symlink
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuda/README.md
|
### node-cuda (`npm install @rapidsai/cuda`)
A node native addon that provides bindings to the CUDA driver and runtime APIs.
These bindings allow calling the CUDA device management, memory, stream, event, ipc, and
DX/GL interop APIs from JS. These APIs allow node (v8 or chakracore) applications to read,
write, and share memory via zero-copy CUDA IPC with external processes that also use the
CUDA, OpenGL, and RAPIDS libraries.
#### Device management:
cudaChooseDevice, cudaGetDeviceCount, cuDeviceGet, cudaDeviceGetPCIBusId, cudaDeviceGetByPCIBusId, cudaGetDevice, cudaGetDeviceFlags, cudaGetDeviceProperties, cudaSetDevice, cudaSetDeviceFlags, cudaDeviceReset, cudaDeviceSynchronize, cudaDeviceCanAccessPeer, cudaDeviceEnablePeerAccess, cudaDeviceDisablePeerAccess
#### Memory:
- `CUDADevice`: A class to wrap and manage a CUDA device.
- `CUDABuffer`: A class to wrap and manage device memory allocations (similar to ArrayBuffer).
- `CUDAArray`: A class to wrap operations like read/write/share on a `CUDABuffer` (similar to TypedArray).
- cuPointerGetAttribute, cudaMalloc, cudaFree, cudaMallocHost, cudaFreeHost, cudaHostRegister, cudaHostUnregister, cudaMemcpy, cudaMemset, cudaMemcpyAsync, cudaMemsetAsync, cudaMemGetInfo
#### IPC:
cudaIpcGetMemHandle, cudaIpcOpenMemHandle, cudaIpcCloseMemHandle
#### Stream:
cudaStreamCreate, cudaStreamDestroy, cudaStreamSynchronize
#### OpenGL:
cuGraphicsGLRegisterBuffer, cuGraphicsGLRegisterImage, cuGraphicsUnregisterResource, cuGraphicsMapResources, cuGraphicsUnapResources, cuGraphicsResourceGetMappedPointer
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuda/tsconfig.json
|
{
"include": ["src"],
"exclude": ["node_modules"],
"compilerOptions": {
"baseUrl": "./",
"paths": {
"@rapidsai/cuda": ["src/index"],
"@rapidsai/cuda/*": ["src/*"]
},
"target": "ESNEXT",
"module": "commonjs",
"outDir": "./build/js",
/* Decorators */
"experimentalDecorators": false,
/* Basic stuff */
"moduleResolution": "node",
"skipLibCheck": true,
"skipDefaultLibCheck": true,
"lib": ["dom", "esnext", "esnext.asynciterable"],
/* Control what is emitted */
"declaration": true,
"declarationMap": true,
"noEmitOnError": true,
"removeComments": false,
"downlevelIteration": true,
/* Create inline sourcemaps with sources */
"sourceMap": false,
"inlineSources": true,
"inlineSourceMap": true,
/* The most restrictive settings possible */
"strict": true,
"importHelpers": true,
"noEmitHelpers": true,
"noImplicitAny": true,
"noUnusedLocals": true,
"noImplicitReturns": true,
"allowUnusedLabels": false,
"noUnusedParameters": true,
"allowUnreachableCode": false,
"noFallthroughCasesInSwitch": true,
"forceConsistentCasingInFileNames": true
}
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuda/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
src/visit_struct/visit_struct.hpp (modified): BSL 1.0
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuda/typedoc.js
|
module.exports = {
entryPoints: ['src/index.ts'],
out: 'doc',
name: '@rapidsai/cuda',
tsconfig: 'tsconfig.json',
excludePrivate: true,
excludeProtected: true,
excludeExternals: true,
};
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/.vscode/launch.json
|
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"compounds": [
{
"name": "Debug Tests (TS and C++)",
"configurations": [
"Debug Tests (launch gdb)",
// "Debug Tests (launch lldb)",
"Debug Tests (attach node)",
]
}
],
"configurations": [
{
"name": "Debug Tests (TS only)",
"type": "node",
"request": "launch",
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"program": "${workspaceFolder}/node_modules/.bin/jest",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
"env": {
"NODE_NO_WARNINGS": "1",
"NODE_ENV": "production",
"READABLE_STREAM": "disable",
},
"args": [
"--verbose",
"--runInBand",
"-c", "jest.config.js",
"${input:TEST_FILE}"
]
},
// {
// "name": "Debug Tests (launch lldb)",
// // hide the individual configurations from the debug dropdown list
// "presentation": { "hidden": true },
// "type": "lldb",
// "request": "launch",
// "stdio": null,
// "cwd": "${workspaceFolder}",
// "preLaunchTask": "cpp:ensure:debug:build",
// "env": {
// "NODE_DEBUG": "1",
// "NODE_NO_WARNINGS": "1",
// "NODE_ENV": "production",
// "READABLE_STREAM": "disable",
// },
// "stopOnEntry": false,
// "terminal": "console",
// "program": "${input:NODE_BINARY}",
// "initCommands": [
// "settings set target.disable-aslr false",
// ],
// "sourceLanguages": ["cpp", "cuda", "javascript"],
// "args": [
// "--inspect=9229",
// "--expose-internals",
// "${workspaceFolder}/node_modules/.bin/jest",
// "--verbose",
// "--runInBand",
// "-c",
// "jest.config.js",
// "${input:TEST_FILE}"
// ],
// },
{
"name": "Debug Tests (launch gdb)",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"type": "cppdbg",
"request": "launch",
"stopAtEntry": false,
"externalConsole": false,
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"MIMode": "gdb",
"miDebuggerPath": "/usr/bin/gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
],
"program": "${input:NODE_BINARY}",
"environment": [
{ "name": "NODE_DEBUG", "value": "1" },
{ "name": "NODE_NO_WARNINGS", "value": "1" },
{ "name": "NODE_ENV", "value": "production" },
{ "name": "READABLE_STREAM", "value": "disable" },
],
"args": [
"--inspect=9229",
"--expose-internals",
"${workspaceFolder}/node_modules/.bin/jest",
"--verbose",
"--runInBand",
"-c",
"jest.config.js",
"${input:TEST_FILE}"
],
},
{
"name": "Debug Tests (attach node)",
"type": "node",
"request": "attach",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"port": 9229,
"timeout": 60000,
"cwd": "${workspaceFolder}",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
},
],
"inputs": [
{
"type": "command",
"id": "NODE_BINARY",
"command": "shellCommand.execute",
"args": {
"description": "path to node",
"command": "which node",
"useFirstResult": true,
}
},
{
"type": "command",
"id": "TEST_FILE",
"command": "shellCommand.execute",
"args": {
"cwd": "${workspaceFolder}/modules/cuda",
"description": "Select a file to debug",
"command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"",
}
},
],
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/.vscode/tasks.json
|
{
"version": "2.0.0",
"tasks": [
{
"type": "shell",
"label": "Rebuild node_cuda TS and C++ (slow)",
"group": { "kind": "build", "isDefault": true, },
"command": "if [[ \"${input:CMAKE_BUILD_TYPE}\" == \"Release\" ]]; then yarn rebuild; else yarn rebuild:debug; fi",
"problemMatcher": [
"$tsc",
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
{
"type": "npm",
"group": "build",
"label": "Recompile node_cuda TS (fast)",
"script": "tsc:build",
"detail": "yarn tsc:build",
"problemMatcher": ["$tsc"],
},
{
"type": "shell",
"group": "build",
"label": "Recompile node_cuda C++ (fast)",
"command": "ninja -C ${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}",
"problemMatcher": [
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
],
"inputs": [
{
"type": "pickString",
"default": "Release",
"id": "CMAKE_BUILD_TYPE",
"options": ["Release", "Debug"],
"description": "C++ Build Type",
}
]
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/stream.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <cuda_runtime_api.h>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
// cudaError_t cudaStreamCreate(cudaStream_t *pStream);
Napi::Value cudaStreamCreate(CallbackArgs const& info) {
auto env = info.Env();
cudaStream_t stream;
NODE_CUDA_TRY(CUDARTAPI::cudaStreamCreate(&stream), env);
return CPPToNapi(info)(stream);
}
// cudaError_t cudaStreamDestroy(cudaStream_t stream);
void cudaStreamDestroy(CallbackArgs const& info) {
auto env = info.Env();
cudaStream_t stream = info[0];
NODE_CUDA_TRY(CUDARTAPI::cudaStreamDestroy(stream), env);
}
// cudaError_t cudaStreamSynchronize(cudaStream_t stream);
void cudaStreamSynchronize(CallbackArgs const& info) {
auto env = info.Env();
cudaStream_t stream = info[0];
NODE_CUDA_TRY(CUDARTAPI::cudaStreamSynchronize(stream), env);
}
namespace stream {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
EXPORT_FUNC(env, runtime, "create", nv::cudaStreamCreate);
EXPORT_FUNC(env, runtime, "destroy", nv::cudaStreamDestroy);
EXPORT_FUNC(env, runtime, "synchronize", nv::cudaStreamSynchronize);
return exports;
}
} // namespace stream
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/index.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export * as addon from './addon';
export * as CUDA from './addon';
export {driver, runtime} from './addon';
export * from './util';
export * from './buffer';
export * from './device';
export * from './memory';
export * from './interfaces';
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-namespace */
import {MemoryData} from './interfaces';
/** @ignore */
export declare const _cpp_exports: any;
export declare const VERSION: number;
export declare const IPC_HANDLE_SIZE: number;
export declare function getDriverVersion(): number;
export declare function rgbaMirror(
width: number, height: number, axis: number, source: any, target?: any): void;
export declare function bgraToYCrCb420(
target: any, source: any, width: number, height: number): void;
export declare namespace Math {
export function abs<T extends number|bigint>(x: T): T;
export function acos<T extends number|bigint>(x: T): T;
export function asin<T extends number|bigint>(x: T): T;
export function atan<T extends number|bigint>(x: T): T;
export function atan2<T extends number|bigint>(y: T, x: T): T;
export function ceil<T extends number|bigint>(x: T): T;
export function cos<T extends number|bigint>(x: T): T;
export function exp<T extends number|bigint>(x: T): T;
export function floor<T extends number|bigint>(x: T): T;
export function log<T extends number|bigint>(x: T): T;
export function max<T extends number|bigint>(...values: T[]): T;
export function min<T extends number|bigint>(...values: T[]): T;
export function pow<T extends number|bigint>(x: T, y: T): T;
export function round<T extends number|bigint>(x: T): T;
export function sin<T extends number|bigint>(x: T): T;
export function sqrt<T extends number|bigint>(x: T): T;
export function tan<T extends number|bigint>(x: T): T;
}
export declare namespace driver {
/** @ignore */
export enum PointerAttributes {
CONTEXT,
MEMORY_TYPE,
DEVICE_POINTER,
HOST_POINTER,
// P2P_TOKENS,
SYNC_MEMOPS,
BUFFER_ID,
IS_MANAGED,
DEVICE_ORDINAL,
}
export function cuPointerGetAttribute(mem: ArrayBuffer|ArrayBufferView|MemoryData,
attr: PointerAttributes): any;
}
export declare namespace runtime {
/**
* Flags to register a graphics resource
* @ignore
*/
export enum GraphicsRegisterFlags {
NONE,
READ_ONLY,
WRITE_DISCARD,
}
export function cudaMemGetInfo(): {free: number, total: number};
export function cudaMemset(
target: MemoryData, value: number, count: number, stream?: number): void;
export function cudaMemcpy(
target: MemoryData, source: MemoryData, count: number, stream?: number): void;
export function cudaGLGetDevices(list: 0|1|2): number[];
export function cudaGraphicsGLRegisterBuffer(glBuffer: number, flags: number): number;
export function cudaGraphicsGLRegisterImage(
glImage: number, target: number, flags: number): number;
export function cudaGraphicsUnregisterResource(resource: number): void;
export function cudaGraphicsMapResources(resources: number[]): void;
export function cudaGraphicsUnmapResources(resources: number[]): void;
export function cudaGraphicsResourceGetMappedArray(resource: number): CUDAArray;
export function cudaGraphicsResourceGetMappedPointer(resource: number): MappedGLMemory;
}
/**
* CUDAArray channel format kind
* @ignore
*/
export declare enum ChannelFormatKind {
/** Signed channel format */
SIGNED,
/** Unsigned channel format */
UNSIGNED,
/** Float channel format */
FLOAT,
/** No channel format */
NONE,
}
/** @ignore */
export declare class CUDAArray {
private constructor(ptr: number,
extent: {width: number, height: number, depth: number},
channelFormatDesc: {
x: number,
y: number,
z: number,
w: number,
f: ChannelFormatKind,
},
flags: number,
type: 0|1|2)
readonly ary: number;
readonly byteLength: number;
readonly bytesPerElement: number;
readonly width: number;
readonly height: number;
readonly depth: number;
readonly channelFormatX: number;
readonly channelFormatY: number;
readonly channelFormatZ: number;
readonly channelFormatW: number;
readonly channelFormatKind: ChannelFormatKind;
}
/**
* @summary The flags for the {@link Device}'s primary context.
*
* @description The three LSBs of the `flags` parameter can be used to control
* how the OS thread, which owns the CUDA context at the time of an API call,
* interacts with the OS scheduler when waiting for results from the GPU.
* Only one of the scheduling flags can be set when creating a context.
*/
export declare enum DeviceFlags {
/**
* Uses a heuristic based on the number of active CUDA contexts in the
* process `C` and the number of logical processors in the system `P`.
* If `C` > `P`, then CUDA will yield to other OS threads when waiting
* for the GPU (`DeviceFlag.scheduleYield`), otherwise CUDA will not
* yield while waiting for results and actively spin on the processor
* (`DeviceFlag.scheduleSpin`).
* <br/>
* Additionally, on Tegra devices, `DeviceFlag.scheduleAuto` uses a
* heuristic based on the power profile of the platform and may choose
* `DeviceFlag.scheduleBlockingSync` for low-powered devices.
*/
scheduleAuto,
/**
* Instruct CUDA to actively spin when waiting for results from the GPU.
* This can decrease latency when waiting for the GPU, but may lower the
* performance of CPU threads if they are performing work in parallel
* with the CUDA thread.
*/
scheduleSpin,
/**
* Instruct CUDA to yield its thread when waiting for results from the
* GPU. This can increase latency when waiting for the GPU, but can
* increase the performance of CPU threads performing work in parallel
* with the GPU.
*/
scheduleYield,
/**
* Instruct CUDA to block the CPU thread on a synchronization primitive
* when waiting for the GPU to finish work.
*/
scheduleBlockingSync,
/**
* @ignore
*/
mapHost,
/**
* Instruct CUDA to not reduce local memory after resizing local memory
* for a kernel. This can prevent thrashing by local memory allocations
* when launching many kernels with high local memory usage at the cost
* of potentially increased memory usage.
*/
lmemResizeToMax,
}
export declare class Device {
/**
* The number of compute-capable CUDA devices.
*/
static readonly numDevices: number;
/**
* The id of this thread's active CUDA device.
*/
static readonly activeDeviceId: number;
constructor(deviceId?: number, flags?: DeviceFlags);
/**
* The CUDA device identifer
*/
readonly id: number;
/**
* The CUDA device PCI bus string id
*/
readonly pciBusName: string;
/**
* @summary Destroy all allocations and reset all state on the current
* device in the current process.
*
* @description
* Explicitly destroys and cleans up all resources associated with the
* current device in the current process. Any subsequent API call to
* this device will reinitialize the device.
* <br/><br/>
* Note that this function will reset the device immediately. It is the
* caller's responsibility to ensure that the device is not being accessed
* by any other host threads from the process when this function is called.
*/
reset(): this;
/**
* @summary Set this device to be used for GPU executions.
*
* @description
* Sets this device as the current device for the calling host thread.
* <br/><br/>
* Any device memory subsequently allocated from this host thread
* will be physically resident on this device. Any host memory allocated
* from this host thread will have its lifetime associated with this
* device. Any streams or events created from this host thread will
* be associated with this device. Any kernels launched from this host
* thread will be executed on this device.
* <br/><br/>
* This call may be made from any host thread, to any device, and at
* any time. This function will do no synchronization with the previous
* or new device, and should be considered a very low overhead call.
*/
activate(): this;
/**
* @summary Get the {@link DeviceFlag_ device flags} used to initialize this device.
*/
getFlags(): DeviceFlags;
/**
* @summary Set the {@link DeviceFlag device flags} for the device's primary context.
*
* @param {DeviceFlags} newFlags The new flags for the device's primary context.
*/
setFlags(newFlags: DeviceFlags): void;
/**
* @summary An object with information about the device.
*/
getProperties(): DeviceProperties;
/**
* @summary Wait for this compute device to finish.
*
* @description
* Blocks execution of further device calls until the device has completed
* all preceding requested tasks.
*
* @throws an error if one of the preceding tasks has failed. If the
* `cudaDeviceScheduleBlockingSync` flag was set for this device, the
* host thread will block until the device has finished its work.
*/
synchronize(): this;
/**
* @summary Ensures this device is active, then executes the supplied `work` function.
* <br/><br/>
* If the current device was not already the active device, restores the active device after the
* `work` function has completed.
* @param work A function to execute
*/
callInContext(work: () => any): this;
/**
* @summary Queries if a device may directly access a peer device's memory.
* <br/><br/>
* If direct access of `peerDevice` from this device is possible, then
* access may be enabled on two specific devices by calling
* {@link enablePeerAccess}.
*
* @returns `true` if this Device's contexts are capable of directly
* accessing memory from contexts on `peerDevice`, otherwise `false`.
*/
canAccessPeerDevice(peerDevice: Device): boolean;
/**
* @summary Enables direct access to memory allocations in a peer device.
*/
enablePeerAccess(peerDevice: Device): this;
/**
* @summary Disables direct access to memory allocations in a peer device and unregisters any
* registered allocations.
*/
disablePeerAccess(peerDevice: Device): this;
}
export declare interface DeviceProperties {
/** ASCII string identifying device */
name: string;
/** 16-byte unique identifier */
uuid: ArrayBuffer;
/** Global memory available on device in bytes */
totalGlobalMem: number;
/** Shared memory available per block in bytes */
sharedMemPerBlock: number;
/** 32-bit registers available per block */
regsPerBlock: number;
/** Warp size in threads */
warpSize: number;
/** Maximum pitch in bytes allowed by memory copies */
memPitch: number;
/** Maximum number of threads per block */
maxThreadsPerBlock: number;
/** Maximum size of each dimension of a block */
maxThreadsDim: ReadonlyArray<number>;
/** Maximum size of each dimension of a grid */
maxGridSize: ReadonlyArray<number>;
/** Clock frequency in kilohertz */
clockRate: number;
/** Constant memory available on device in bytes */
totalConstMem: number;
/** Major compute capability */
major: number;
/** Minor compute capability */
minor: number;
/** Alignment requirement for textures */
textureAlignment: number;
/** Pitch alignment requirement for texture references bound to pitched memory */
texturePitchAlignment: number;
/**
* Device can concurrently copy memory and execute a kernel. Deprecated. Use instead
* asyncEngineCount.
*/
deviceOverlap: number;
/** Number of multiprocessors on device */
multiProcessorCount: number;
/** Specified whether there is a run time limit on kernels */
kernelExecTimeoutEnabled: number;
/** Device is integrated as opposed to discrete */
integrated: number;
/** Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer */
canMapHostMemory: number;
/** Compute mode (See ::cudaComputeMode) */
computeMode: number;
/** Maximum 1D texture size */
maxTexture1D: number;
/** Maximum 1D mipmapped texture size */
maxTexture1DMipmap: number;
/** Maximum size for 1D textures bound to linear memory */
maxTexture1DLinear: number;
/** Maximum 2D texture dimensions */
maxTexture2D: ReadonlyArray<number>;
/** Maximum 2D mipmapped texture dimensions */
maxTexture2DMipmap: ReadonlyArray<number>;
/** Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory */
maxTexture2DLinear: ReadonlyArray<number>;
/** Maximum 2D texture dimensions if texture gather operations have to be performed */
maxTexture2DGather: ReadonlyArray<number>;
/** Maximum 3D texture dimensions */
maxTexture3D: ReadonlyArray<number>;
/** Maximum alternate 3D texture dimensions */
maxTexture3DAlt: ReadonlyArray<number>;
/** Maximum Cubemap texture dimensions */
maxTextureCubemap: number;
/** Maximum 1D layered texture dimensions */
maxTexture1DLayered: ReadonlyArray<number>;
/** Maximum 2D layered texture dimensions */
maxTexture2DLayered: ReadonlyArray<number>;
/** Maximum Cubemap layered texture dimensions */
maxTextureCubemapLayered: ReadonlyArray<number>;
/** Maximum 1D surface size */
maxSurface1D: number;
/** Maximum 2D surface dimensions */
maxSurface2D: ReadonlyArray<number>;
/** Maximum 3D surface dimensions */
maxSurface3D: ReadonlyArray<number>;
/** Maximum 1D layered surface dimensions */
maxSurface1DLayered: ReadonlyArray<number>;
/** Maximum 2D layered surface dimensions */
maxSurface2DLayered: ReadonlyArray<number>;
/** Maximum Cubemap surface dimensions */
maxSurfaceCubemap: number;
/** Maximum Cubemap layered surface dimensions */
maxSurfaceCubemapLayered: ReadonlyArray<number>;
/** Alignment requirements for surfaces */
surfaceAlignment: number;
/** Device can possibly execute multiple kernels concurrently */
concurrentKernels: number;
/** Device has ECC support enabled */
ECCEnabled: number;
/** PCI bus ID of the device */
pciBusID: number;
/** PCI device ID of the device */
pciDeviceID: number;
/** PCI domain ID of the device */
pciDomainID: number;
/** 1 if device is a Tesla device using TCC driver, 0 otherwise */
tccDriver: number;
/** Number of asynchronous engines */
asyncEngineCount: number;
/** Device shares a unified address space with the host */
unifiedAddressing: number;
/** Peak memory clock frequency in kilohertz */
memoryClockRate: number;
/** Global memory bus width in bits */
memoryBusWidth: number;
/** Size of L2 cache in bytes */
l2CacheSize: number;
/** Maximum resident threads per multiprocessor */
maxThreadsPerMultiProcessor: number;
/** Device supports stream priorities */
streamPrioritiesSupported: number;
/** Device supports caching globals in L1 */
globalL1CacheSupported: number;
/** Device supports caching locals in L1 */
localL1CacheSupported: number;
/** Shared memory available per multiprocessor in bytes */
sharedMemPerMultiprocessor: number;
/** 32-bit registers available per multiprocessor */
regsPerMultiprocessor: number;
/** Device supports allocating managed memory on this system */
managedMemory: number;
/** Device is on a multi-GPU board */
isMultiGpuBoard: number;
/** Unique identifier for a group of devices on the same multi-GPU board */
multiGpuBoardGroupID: number;
/** Link between the device and the host supports native atomic operations */
hostNativeAtomicSupported: number;
/**
* Ratio of single precision performance (in floating-point operations per second) to double
* precision performance
*/
singleToDoublePrecisionPerfRatio: number;
/** Device supports coherently accessing pageable memory without calling cudaHostRegister on it */
pageableMemoryAccess: number;
/** Device can coherently access managed memory concurrently with the CPU */
concurrentManagedAccess: number;
/** Device supports Compute Preemption */
computePreemptionSupported: number;
/** Device can access host registered memory at the same virtual address as the CPU */
canUseHostPointerForRegisteredMem: number;
/** Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel */
cooperativeLaunch: number;
/**
* Device can participate in cooperative kernels launched via
* ::cudaLaunchCooperativeKernelMultiDevice
*/
cooperativeMultiDeviceLaunch: number;
/** Per device maximum shared memory per block usable by special opt in */
sharedMemPerBlockOptin: number;
/** Device accesses pageable memory via the host's page tables */
pageableMemoryAccessUsesHostPageTables: number;
/** Host can directly access managed memory on the device without migration. */
directManagedMemAccessFromHost: number;
}
/** @ignore */
export declare class Memory extends ArrayBuffer {
/** @ignore */
readonly ptr: number;
/**
* @summary The {@link Device device} this Memory instance is associated with.
*/
readonly device: number;
/**
* @summary Copies and returns a region of Memory.
*/
slice(start?: number, end?: number): Memory;
}
/**
* @summary An owning wrapper around a device memory allocation.
*/
export declare class DeviceMemory extends Memory {
constructor(byteLength?: number);
/** @ignore */
readonly[Symbol.toStringTag]: 'DeviceMemory';
/**
* @summary Copies and returns a region of DeviceMemory.
*/
slice(start?: number, end?: number): DeviceMemory;
}
/**
* @brief An owning wrapper around a pinned host memory allocation.
*/
export declare class PinnedMemory extends Memory {
constructor(byteLength?: number);
/** @ignore */
readonly[Symbol.toStringTag]: 'PinnedMemory';
/**
* @summary Copies and returns a region of PinnedMemory.
*/
slice(start?: number, end?: number): PinnedMemory;
}
/**
* @brief An owning wrapper around a CUDA-managed, unified memory allocation.
*/
export declare class ManagedMemory extends Memory {
constructor(byteLength?: number);
/** @ignore */
readonly[Symbol.toStringTag]: 'ManagedMemory';
/**
* @summary Copies and returns a region of ManagedMemory.
*/
slice(start?: number, end?: number): ManagedMemory;
}
/**
* @summary An owning wrapper around a CUDA device memory allocation shared by another process.
*/
export declare class IpcMemory extends Memory {
constructor(ipcHandle: Uint8Array);
/** @ignore */
readonly[Symbol.toStringTag]: 'IpcMemory';
/**
* @summary Copies a region of IpcMemory and returns as DeviceMemory.
*/
slice(start?: number, end?: number): DeviceMemory;
/**
* @summary Close the underlying IPC memory handle, allowing the exporting process to free the
* exported {@link DeviceMemory}.
*/
close(): void;
}
/**
* @summary A container for managing the lifetime of a {@link DeviceMemory} allocation exported for
* reading and/or writing by other processes with access to the allocation's associated {@link
* Device}.
*/
export declare class IpcHandle {
constructor(deviceMemory: DeviceMemory);
/** @ignore */
readonly[Symbol.toStringTag]: 'IpcHandle';
/**
* @summary The exported {@link DeviceMemory}
*/
readonly buffer: DeviceMemory;
/**
* @summary The device ordinal associated with the exported {@link DeviceMemory}
*/
readonly device: number;
/**
* @summary The CUDA IPC handle to be used to access the exported {@link DeviceMemory} from
* another process.
*/
readonly handle: Uint8Array;
}
/**
* @summary A class representing a region of memory originally created and owned by an OpenGL
* context, but has been mapped into the CUDA address space for reading and/or writing.
*/
export declare class MappedGLMemory extends Memory {
constructor(resource: number);
/** @ignore */
readonly[Symbol.toStringTag]: 'MappedGLMemory';
/**
* @summary Copies a region of MappedGLMemory and returns a DeviceMemory.
*/
slice(start?: number, end?: number): DeviceMemory;
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/util.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Memory} from './addon';
/** @ignore */
export const isNumber = (x: any): x is number => typeof x === 'number';
/** @ignore */
export const isBigInt = (x: any): x is bigint => typeof x === 'bigint';
/** @ignore */
export const isBoolean = (x: any): x is boolean => typeof x === 'boolean';
/** @ignore */
// eslint-disable-next-line @typescript-eslint/ban-types
export const isFunction = (x: any): x is Function => typeof x === 'function';
/** @ignore */
// eslint-disable-next-line @typescript-eslint/ban-types
export const isObject = (x: any): x is Object => x != null && Object(x) === x;
/** @ignore */
export const isPromise =
<T = any>(x: any): x is PromiseLike<T> => { return isObject(x) && isFunction(x.then);};
/** @ignore */
export const isIterable =
<T = any>(x: any): x is Iterable<T> => { return isObject(x) && isFunction(x[Symbol.iterator]);};
/** @ignore */
export const isAsyncIterable = <T = any>(x: any):
x is AsyncIterable<T> => { return isObject(x) && isFunction(x[Symbol.asyncIterator]);};
/** @ignore */
export const isArrayLike =
<T = any>(x: any): x is ArrayLike<T> => { return isObject(x) && isNumber(x.length);};
/** @ignore */
export const isMemoryLike =
(x: any): x is Memory => { return isObject(x) && isNumber(x.ptr) && isNumber(x.byteLength);};
/** @ignore */
export const isArrayBufferLike = (x: any): x is ArrayBufferLike => {
switch (x && x.constructor && x.constructor.name) {
case 'ArrayBuffer': return true;
case 'SharedArrayBuffer': return true;
default: return false;
}
};
/** @ignore */
// eslint-disable-next-line @typescript-eslint/unbound-method
export const isArrayBufferView = ArrayBuffer.isView;
/** @ignore */
export const isIteratorResult = <T = any>(x: any):
x is IteratorResult<T> => { return isObject(x) && ('done' in x) && ('value' in x);};
/**
* @summary Clamp begin and end ranges similar to `Array.prototype.slice`.
* @description Normalizes begin/end to between 0 and length, and wrap around on negative indices.
* @example
* ```typescript
* import {clampRange} from '@rapidsai/cuda';
*
* clampRange(5) // [0, 5]
* clampRange(5, 0, -1) // [0, 4]
* clampRange(5, -1) // [4, 5]
* clampRange(5, -1, 0) // [4, 4]
*
* const ary = Array.from({length: 5}, (_, i) => i);
*
* assert(ary.slice() == ary.slice(...clampRange(ary.length)))
* // > [0, 1, 2, 3, 4]
* assert(ary.slice(0, -1) == ary.slice(...clampRange(ary.length, 0, -1)))
* // > [0, 1, 2, 3]
* assert(ary.slice(-1) == ary.slice(...clampRange(ary.length, -1)))
* // > [4]
* assert(ary.slice(-1, 0) == ary.slice(...clampRange(ary.length, -1, 0)))
* // > []
* ```
*
* @param len The total number of elements.
* @param lhs The beginning of the range to clamp.
* @param rhs The end of the range to clamp (<b>Default:</b> `len`).
* @returns An Array of the normalized begin and end positions.
*/
export function clampRange(len: number, lhs = 0, rhs = len): [begin: number, end: number] {
// wrap around on negative begin and end positions
if (lhs < 0) { lhs = ((lhs % len) + len) % len; }
if (rhs < 0) { rhs = ((rhs % len) + len) % len; }
// enforce lhs <= rhs && lhs <= len && rhs <= len
return rhs < lhs ? [lhs > len ? len : lhs, lhs > len ? len : lhs] : [lhs, rhs > len ? len : rhs];
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/kernel.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <cuda_runtime_api.h>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
// CUresult cuLaunchKernel(CUfunction f,
// unsigned int gridDimX, unsigned int gridDimY,
// unsigned int gridDimZ, unsigned int blockDimX,
// unsigned int blockDimY, unsigned int blockDimZ,
// unsigned int sharedMemBytes, CUstream hStream,
// void **kernelParams, void ** extra);
void cuLaunchKernel(CallbackArgs const& info) {
auto env = info.Env();
CUfunction func = info[0];
std::vector<uint32_t> grid = info[1];
std::vector<uint32_t> block = info[2];
uint32_t sharedMem = info[3];
CUstream stream = info[4];
std::vector<napi_value> params = info[5];
NODE_CU_TRY(CUDAAPI::cuLaunchKernel(func,
grid[0],
grid[1],
grid[2],
block[0],
block[1],
block[2],
sharedMem,
stream,
(void**)params.data(),
nullptr),
env);
}
namespace kernel {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
EXPORT_FUNC(env, driver, "launchKernel", nv::cuLaunchKernel);
return exports;
}
} // namespace kernel
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/gl.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/array.hpp"
#include "node_cuda/device.hpp"
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <GL/gl.h>
#include <cuda.h>
#include <cudaGL.h>
#include <cuda_gl_interop.h>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
// cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices,
// unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList)
Napi::Value cudaGLGetDevices(CallbackArgs const& info) {
auto env = info.Env();
uint32_t cu_GL_device_list = info[0];
uint32_t device_count{};
std::vector<int> devices{};
devices.reserve(Device::get_num_devices());
NODE_CUDA_TRY(CUDARTAPI::cudaGLGetDevices(&device_count,
devices.data(),
devices.size(),
static_cast<cudaGLDeviceList>(cu_GL_device_list)),
env);
devices.resize(device_count);
devices.shrink_to_fit();
return CPPToNapi(info)(devices);
}
// cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(cudaGraphicsResource_t *resource, GLuint
// buffer, unsigned int flags)
Napi::Value cudaGraphicsGLRegisterBuffer(CallbackArgs const& info) {
auto env = info.Env();
GLuint buffer = info[0];
uint32_t flags = info[1];
cudaGraphicsResource_t resource;
NODE_CUDA_TRY(CUDARTAPI::cudaGraphicsGLRegisterBuffer(&resource, buffer, flags), env);
return CPPToNapi(info)(resource);
}
// cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(cudaGraphicsResource_t *resource, GLuint image,
// GLenum target, unsigned int flags)
Napi::Value cudaGraphicsGLRegisterImage(CallbackArgs const& info) {
auto env = info.Env();
GLuint image = info[0];
GLenum target = info[1];
uint32_t flags = info[2];
cudaGraphicsResource_t resource;
NODE_CUDA_TRY(CUDARTAPI::cudaGraphicsGLRegisterImage(&resource, image, target, flags), env);
return CPPToNapi(info)(resource);
}
// cudaError_t CUDARTAPI cudaGraphicsUnregisterResource(cudaGraphicsResource_t resource)
void cudaGraphicsUnregisterResource(CallbackArgs const& info) {
auto env = info.Env();
cudaGraphicsResource_t resource = info[0];
NODE_CUDA_TRY(CUDARTAPI::cudaGraphicsUnregisterResource(resource), env);
}
// cudaError_t CUDARTAPI cudaGraphicsMapResources(int count, cudaGraphicsResource_t *resources,
// cudaStream_t stream = 0)
void cudaGraphicsMapResources(CallbackArgs const& info) {
auto env = info.Env();
std::vector<cudaGraphicsResource_t> resources = info[0];
cudaStream_t stream = info[1];
NODE_CUDA_TRY(CUDARTAPI::cudaGraphicsMapResources(resources.size(), resources.data(), stream),
env);
}
// cudaError_t CUDARTAPI cudaGraphicsUnmapResources(int count, cudaGraphicsResource_t *resources,
// cudaStream_t stream = 0)
void cudaGraphicsUnmapResources(CallbackArgs const& info) {
auto env = info.Env();
std::vector<cudaGraphicsResource_t> resources = info[0];
cudaStream_t stream = info[1];
NODE_CUDA_TRY(CUDARTAPI::cudaGraphicsUnmapResources(resources.size(), resources.data(), stream),
env);
}
// cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedPointer(void **devPtr, size_t *size,
// cudaGraphicsResource_t resource)
Napi::Value cudaGraphicsResourceGetMappedPointer(CallbackArgs const& info) {
return MappedGLMemory::New(info.Env(), {info[0]});
}
// cudaError_t CUDARTAPI cudaGraphicsSubResourceGetMappedArray(cudaArray_t *array,
// cudaGraphicsResource_t resource, unsigned int arrayIndex, unsigned int mipLevel)
Napi::Value cudaGraphicsSubResourceGetMappedArray(CallbackArgs const& info) {
auto env = info.Env();
cudaGraphicsResource_t resource = info[0];
uint32_t arrayIndex = info[1];
uint32_t mipLevel = info[2];
cudaArray_t array;
NODE_CUDA_TRY(
CUDARTAPI::cudaGraphicsSubResourceGetMappedArray(&array, resource, arrayIndex, mipLevel), env);
uint32_t flags{};
cudaExtent extent{};
cudaChannelFormatDesc desc{};
NODE_CUDA_TRY(CUDARTAPI::cudaArrayGetInfo(&desc, &extent, &flags, array), env);
return CUDAArray::New(info.Env(), array, extent, desc, flags, array_type::GL);
}
namespace gl {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
EXPORT_FUNC(env, runtime, "cudaGLGetDevices", nv::cudaGLGetDevices);
EXPORT_FUNC(env, runtime, "cudaGraphicsGLRegisterBuffer", nv::cudaGraphicsGLRegisterBuffer);
EXPORT_FUNC(env, runtime, "cudaGraphicsGLRegisterImage", nv::cudaGraphicsGLRegisterImage);
EXPORT_FUNC(env, runtime, "cudaGraphicsUnregisterResource", nv::cudaGraphicsUnregisterResource);
EXPORT_FUNC(env, runtime, "cudaGraphicsMapResources", nv::cudaGraphicsMapResources);
EXPORT_FUNC(env, runtime, "cudaGraphicsUnmapResources", nv::cudaGraphicsUnmapResources);
EXPORT_FUNC(
env, runtime, "cudaGraphicsResourceGetMappedPointer", nv::cudaGraphicsResourceGetMappedPointer);
EXPORT_FUNC(
env, runtime, "cudaGraphicsResourceGetMappedArray", nv::cudaGraphicsSubResourceGetMappedArray);
auto GraphicsRegisterFlags = Napi::Object::New(env);
EXPORT_ENUM(env, GraphicsRegisterFlags, "NONE", CU_GRAPHICS_REGISTER_FLAGS_NONE);
EXPORT_ENUM(env, GraphicsRegisterFlags, "READ_ONLY", CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY);
EXPORT_ENUM(
env, GraphicsRegisterFlags, "WRITE_DISCARD", CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD);
EXPORT_PROP(runtime, "GraphicsRegisterFlags", GraphicsRegisterFlags);
return exports;
}
} // namespace gl
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/math.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/math.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
Napi::Value math_abs(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_abs{}, info);
}
Napi::Value math_acos(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_acos{}, info);
}
Napi::Value math_asin(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_asin{}, info);
}
Napi::Value math_atan(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_atan{}, info);
}
Napi::Value math_atan2(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_atan2{}, info);
}
Napi::Value math_ceil(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_ceil{}, info);
}
Napi::Value math_cos(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_cos{}, info);
}
Napi::Value math_exp(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_exp{}, info);
}
Napi::Value math_floor(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_floor{}, info);
}
Napi::Value math_log(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_log{}, info);
}
Napi::Value math_max(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_max{}, info);
}
Napi::Value math_min(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_min{}, info);
}
Napi::Value math_pow(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_pow{}, info);
}
Napi::Value math_round(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_round{}, info);
}
Napi::Value math_sin(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_sin{}, info);
}
Napi::Value math_sqrt(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_sqrt{}, info);
}
Napi::Value math_tan(CallbackArgs const& info) {
return nv::math::dispatch(nv::math::calc_tan{}, info);
}
namespace math {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
auto Math = Napi::Object::New(env);
EXPORT_FUNC(env, Math, "abs", math_abs);
EXPORT_FUNC(env, Math, "acos", math_acos);
EXPORT_FUNC(env, Math, "asin", math_asin);
EXPORT_FUNC(env, Math, "atan", math_atan);
EXPORT_FUNC(env, Math, "atan2", math_atan2);
EXPORT_FUNC(env, Math, "ceil", math_ceil);
EXPORT_FUNC(env, Math, "cos", math_cos);
EXPORT_FUNC(env, Math, "exp", math_exp);
EXPORT_FUNC(env, Math, "floor", math_floor);
EXPORT_FUNC(env, Math, "log", math_log);
EXPORT_FUNC(env, Math, "max", math_max);
EXPORT_FUNC(env, Math, "min", math_min);
EXPORT_FUNC(env, Math, "pow", math_pow);
EXPORT_FUNC(env, Math, "round", math_round);
EXPORT_FUNC(env, Math, "sin", math_sin);
EXPORT_FUNC(env, Math, "sqrt", math_sqrt);
EXPORT_FUNC(env, Math, "tan", math_tan);
exports.Set("Math", Math);
return exports;
}
} // namespace math
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/program.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <cuda_runtime_api.h>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
// nvrtcCreateProgram(nvrtcProgram *prog,
// const char *src,
// const char *name,
// int numHeaders,
// const char * const *headers,
// const char * const *includeNames)
Napi::Value createProgram(CallbackArgs const& info) {
auto env = info.Env();
std::string src = info[0];
std::string name = info[1];
std::vector<std::string> headers = info[2];
std::vector<std::string> includes = info[3];
std::vector<const char*> cHeaders(headers.size());
std::vector<const char*> cIncludes(includes.size());
auto get_cstr = [](const std::string& str) { return str.c_str(); };
std::transform(headers.begin(), headers.end(), cHeaders.begin(), get_cstr);
std::transform(includes.begin(), includes.end(), cIncludes.begin(), get_cstr);
nvrtcProgram prog;
NODE_NVRTC_TRY(
nvrtcCreateProgram(
&prog, src.c_str(), name.c_str(), headers.size(), cHeaders.data(), cIncludes.data()),
env);
auto free_str = [](const char* str) { delete str; };
std::for_each(cHeaders.begin(), cHeaders.end(), free_str);
std::for_each(cIncludes.begin(), cIncludes.end(), free_str);
return CPPToNapi(info)(reinterpret_cast<void*>(prog));
}
namespace program {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
EXPORT_FUNC(env, exports, "create", nv::createProgram);
return exports;
}
} // namespace program
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/interfaces.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/** @ignore */
export type FloatArray = Float32Array|Float64Array;
/** @ignore */
export type IntArray = Int8Array|Int16Array|Int32Array;
/** @ignore */
export type UintArray = Uint8Array|Uint16Array|Uint32Array|Uint8ClampedArray;
/** @ignore */
export type BigIntArray = BigInt64Array|BigUint64Array;
/** @ignore */
export type TypedArray = FloatArray|IntArray|UintArray;
/** @ignore */
export type TypedArrayConstructor<T extends TypedArray|BigIntArray> = {
readonly BYTES_PER_ELEMENT: number; new (length?: number): T; new (values: Iterable<T[0]>): T;
new (buffer: ArrayBufferLike, byteOffset?: number, length?: number): T;
from(arrayLike: Iterable<T[0]>|ArrayLike<T[0]>): T;
from(
arrayLike: Iterable<T[0]>|ArrayLike<T[0]>, mapfn: (v: T[0], k: number) => T[0], thisArg?: any):
T;
};
/** @ignore */
export type MemoryData = TypedArray|BigIntArray|ArrayBufferView|ArrayBufferLike //
|(import('./addon').DeviceMemory) //
|(import('./addon').PinnedMemory) //
|(import('./addon').ManagedMemory) //
|(import('./addon').IpcMemory) //
|(import('./addon').MappedGLMemory);
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/addon.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/addon.hpp"
#include "node_cuda/array.hpp"
#include "node_cuda/device.hpp"
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <nv_node/addon.hpp>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
#include <nppi.h>
struct rapidsai_cuda : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_cuda> {
rapidsai_cuda(Napi::Env const& env, Napi::Object exports) : EnvLocalAddon(env, exports) {
_driver = Napi::Persistent(Napi::Object::New(env));
_runtime = Napi::Persistent(Napi::Object::New(env));
_after_init = Napi::Persistent(Napi::Function::New(env, [](Napi::CallbackInfo const& info) {
auto env = info.Env();
NODE_CU_TRY(cuInit(0), env);
auto device = std::max(nv::Device::active_device_id(), 0);
if (device < nv::Device::get_num_devices()) {
NODE_CUDA_TRY(cudaSetDevice(device), env);
NODE_CUDA_TRY(cudaDeviceSynchronize(), env);
}
}));
nv::gl::initModule(env, exports, _driver.Value(), _runtime.Value());
nv::kernel::initModule(env, exports, _driver.Value(), _runtime.Value());
nv::math::initModule(env, exports, _driver.Value(), _runtime.Value());
nv::program::initModule(env, exports, _driver.Value(), _runtime.Value());
nv::stream::initModule(env, exports, _driver.Value(), _runtime.Value());
// nv::texture::initModule(env, exports, _driver.Value(), _runtime.Value());
nv::memory::initModule(env, exports, _driver.Value(), _runtime.Value());
DefineAddon(exports,
{
InstanceMethod("init", &rapidsai_cuda::InitAddon),
InstanceValue("_cpp_exports", _cpp_exports.Value()),
InstanceValue("driver", _driver.Value()),
InstanceValue("runtime", _runtime.Value()),
InstanceValue("VERSION", Napi::Number::New(env, CUDA_VERSION)),
InstanceValue("IPC_HANDLE_SIZE", Napi::Number::New(env, CU_IPC_HANDLE_SIZE)),
InstanceMethod<&rapidsai_cuda::get_driver_version>("getDriverVersion"),
InstanceMethod<&rapidsai_cuda::rgba_mirror>("rgbaMirror"),
InstanceMethod<&rapidsai_cuda::bgra_to_ycrcb420>("bgraToYCrCb420"),
InstanceValue("Device", InitClass<nv::Device>(env, exports)),
InstanceValue("PinnedMemory", InitClass<nv::PinnedMemory>(env, exports)),
InstanceValue("DeviceMemory", InitClass<nv::DeviceMemory>(env, exports)),
InstanceValue("ManagedMemory", InitClass<nv::ManagedMemory>(env, exports)),
InstanceValue("IpcMemory", InitClass<nv::IpcMemory>(env, exports)),
InstanceValue("IpcHandle", InitClass<nv::IpcHandle>(env, exports)),
InstanceValue("MappedGLMemory", InitClass<nv::MappedGLMemory>(env, exports)),
InstanceValue("CUDAArray", InitClass<nv::CUDAArray>(env, exports)),
});
}
private:
Napi::ObjectReference _driver;
Napi::ObjectReference _runtime;
Napi::Value get_driver_version(Napi::CallbackInfo const& info) {
int driverVersion;
auto env = info.Env();
NODE_CU_TRY(cuDriverGetVersion(&driverVersion), env);
return Napi::Number::New(env, driverVersion);
}
Napi::Value rgba_mirror(Napi::CallbackInfo const& info) {
nv::CallbackArgs args{info};
int32_t width = args[0];
int32_t height = args[1];
NppiAxis flip = static_cast<NppiAxis>(args[2].operator uint32_t());
nv::Span<uint8_t> src = args[3];
NppiSize roi = {width, height};
if (info.Length() == 4) {
nppiMirror_8u_C4IR(src.data(), width * 4, roi, flip);
} else if (info.Length() == 5) {
nv::Span<uint8_t> dst = args[4];
nppiMirror_8u_C4R(src.data(), width * 4, dst.data(), width * 4, roi, flip);
}
return info.Env().Undefined();
}
Napi::Value bgra_to_ycrcb420(Napi::CallbackInfo const& info) {
nv::CallbackArgs args{info};
nv::Span<uint8_t> dst = args[0];
nv::Span<uint8_t> src = args[1];
int32_t width = args[2];
int32_t height = args[3];
NppiSize roi = {width, height};
Npp8u* dstBuff[3] = {
dst.data(), dst.data() + width * height, dst.data() + width * height * 5 / 4};
int dstSteps[3] = {width, width / 2, width / 2};
nppiBGRToYCrCb420_8u_AC4P3R(src.data(), width * 4, dstBuff, dstSteps, roi);
return info.Env().Undefined();
}
};
NODE_API_ADDON(rapidsai_cuda);
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/device.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/device.hpp"
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
Napi::Value Device::get_num_devices(Napi::CallbackInfo const& info) {
return CPPToNapi(info)(Device::get_num_devices());
}
Napi::Value Device::active_device_id(Napi::CallbackInfo const& info) {
return CPPToNapi(info)(Device::active_device_id());
}
Napi::Function Device::Init(Napi::Env const& env, Napi::Object exports) {
auto DeviceFlags = Napi::Object::New(env);
EXPORT_ENUM(env, DeviceFlags, "scheduleAuto", cudaDeviceScheduleAuto);
EXPORT_ENUM(env, DeviceFlags, "scheduleSpin", cudaDeviceScheduleSpin);
EXPORT_ENUM(env, DeviceFlags, "scheduleYield", cudaDeviceScheduleYield);
EXPORT_ENUM(env, DeviceFlags, "scheduleBlockingSync", cudaDeviceScheduleBlockingSync);
EXPORT_ENUM(env, DeviceFlags, "mapHost", cudaDeviceMapHost);
EXPORT_ENUM(env, DeviceFlags, "lmemResizeToMax", cudaDeviceLmemResizeToMax);
exports.Set("DeviceFlags", DeviceFlags);
return DefineClass(
env,
"Device",
{
StaticAccessor("numDevices", &Device::get_num_devices, nullptr, napi_enumerable),
StaticAccessor("activeDeviceId", &Device::active_device_id, nullptr, napi_enumerable),
InstanceAccessor("id", &Device::id, nullptr, napi_enumerable),
InstanceAccessor("pciBusName", &Device::pci_bus_name, nullptr, napi_enumerable),
InstanceMethod("reset", &Device::reset),
InstanceMethod("activate", &Device::activate),
InstanceMethod("getFlags", &Device::get_flags),
InstanceMethod("setFlags", &Device::set_flags),
InstanceMethod("getProperties", &Device::get_properties),
InstanceMethod("synchronize", &Device::synchronize),
InstanceMethod("canAccessPeerDevice", &Device::can_access_peer_device),
InstanceMethod("enablePeerAccess", &Device::enable_peer_access),
InstanceMethod("disablePeerAccess", &Device::disable_peer_access),
InstanceMethod("callInContext", &Device::call_in_device_context),
});
}
Device::Device(CallbackArgs const& args) : EnvLocalObjectWrap<Device>(args) {
NODE_CUDA_EXPECT(args.IsConstructCall(), "Device constructor requires 'new'", args.Env());
switch (args.Length()) {
case 0: Initialize(args.Env()); break;
case 1:
NODE_CUDA_EXPECT(
args[0].IsNumber(), "Device constructor requires a numeric deviceId argument", args.Env());
Initialize(args.Env(), args[0]);
break;
case 2:
NODE_CUDA_EXPECT(
args[0].IsNumber(), "Device constructor requires a numeric deviceId argument", args.Env());
NODE_CUDA_EXPECT(args[1].IsNumber(),
"Device constructor requires a numeric CUDADeviceFlags argument",
args.Env());
Initialize(args.Env(), args[0], args[1]);
break;
default:
NODE_CUDA_EXPECT(false,
"Device constructor requires a numeric deviceId argument, and an optional "
"numeric CUDADeviceFlags argument",
args.Env());
break;
}
}
Device::wrapper_t Device::New(Napi::Env const& env, int32_t id, uint32_t flags) {
return EnvLocalObjectWrap<Device>::New(env, id, flags);
}
void Device::Initialize(Napi::Env const& env, int32_t id, uint32_t flags) {
id_ = id;
char bus_id[256];
NODE_CUDA_TRY(cudaGetDeviceProperties(&props_, id_), env);
NODE_CUDA_TRY(cudaDeviceGetPCIBusId(bus_id, 256, id_), env);
pci_bus_name_ = std::string{bus_id};
this->set_flags(env, flags);
}
Napi::Value Device::id(Napi::CallbackInfo const& info) { return CPPToNapi(info)(id()); }
Napi::Value Device::pci_bus_name(Napi::CallbackInfo const& info) {
return CPPToNapi(info)(pci_bus_name());
}
Device& Device::reset() {
call_in_context([&]() { NODE_CUDA_TRY(cudaDeviceReset(), Env()); });
return *this;
}
Napi::Value Device::reset(Napi::CallbackInfo const& info) {
reset();
return info.This();
}
uint32_t Device::get_flags() {
uint32_t flags;
call_in_context([&]() { NODE_CUDA_TRY(cudaGetDeviceFlags(&flags), Env()); });
return flags & ~cudaDeviceMapHost;
}
Napi::Value Device::get_flags(Napi::CallbackInfo const& info) {
return CPPToNapi(info)(get_flags());
}
void Device::set_flags(Napi::Env const& env, uint32_t new_flags) {
call_in_context(env, id(), [&]() {
if (get_flags() != new_flags) { NODE_CUDA_TRY(cudaSetDeviceFlags(new_flags), Env()); }
});
}
Napi::Value Device::set_flags(Napi::CallbackInfo const& info) {
uint32_t flags = CallbackArgs{info}[0];
set_flags(info.Env(), flags);
return info.This();
}
Napi::Value Device::get_properties(Napi::CallbackInfo const& info) {
return CPPToNapi(info)(props_);
}
Device& Device::activate() {
if (active_device_id() != id()) { NODE_CUDA_TRY(cudaSetDevice(id()), Env()); }
return *this;
}
Napi::Value Device::activate(Napi::CallbackInfo const& info) {
activate();
return info.This();
}
Device& Device::synchronize(Napi::Env const& env) {
call_in_context(env, id(), [&]() { NODE_CUDA_TRY(cudaDeviceSynchronize(), Env()); });
return *this;
}
Napi::Value Device::synchronize(Napi::CallbackInfo const& info) {
synchronize(info.Env());
return info.This();
}
bool Device::can_access_peer_device(Napi::Env const& env, Device const& peer) const {
int32_t can_access_peer{0};
NODE_CUDA_TRY(cudaDeviceCanAccessPeer(&can_access_peer, id(), peer.id()), Env());
return can_access_peer != 0;
}
Napi::Value Device::can_access_peer_device(Napi::CallbackInfo const& info) {
Device const& peer = CallbackArgs{info}[0];
return CPPToNapi(info)(can_access_peer_device(info.Env(), peer));
}
Device& Device::enable_peer_access(Napi::Env const& env, Device const& peer) {
call_in_context(
env, id(), [&]() { NODE_CUDA_TRY(cudaDeviceEnablePeerAccess(peer.id(), 0), Env()); });
return *this;
}
Napi::Value Device::enable_peer_access(Napi::CallbackInfo const& info) {
Device const& peer = CallbackArgs{info}[0];
enable_peer_access(info.Env(), peer);
return info.This();
}
Device& Device::disable_peer_access(Napi::Env const& env, Device const& peer) {
call_in_context(
env, id(), [&]() { NODE_CUDA_TRY(cudaDeviceDisablePeerAccess(peer.id()), Env()); });
return *this;
}
Napi::Value Device::disable_peer_access(Napi::CallbackInfo const& info) {
Device const& peer = CallbackArgs{info}[0];
disable_peer_access(info.Env(), peer);
return info.This();
}
Napi::Value Device::call_in_device_context(Napi::CallbackInfo const& info) {
if (info.Length() == 1 and info[0].IsFunction()) {
auto callback = info[0].As<Napi::Function>();
call_in_context(info.Env(), id(), [&] { callback({}); });
}
return info.This();
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/array.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cuda/array.hpp>
#include <node_cuda/utilities/cpp_to_napi.hpp>
#include <node_cuda/utilities/napi_to_cpp.hpp>
#include <nv_node/macros.hpp>
#include <nv_node/utilities/args.hpp>
#include <cuda_runtime_api.h>
namespace nv {
Napi::Function CUDAArray::Init(Napi::Env const& env, Napi::Object exports) {
auto ChannelFormatKind = Napi::Object::New(env);
EXPORT_ENUM(env, ChannelFormatKind, "SIGNED", cudaChannelFormatKindSigned);
EXPORT_ENUM(env, ChannelFormatKind, "UNSIGNED", cudaChannelFormatKindUnsigned);
EXPORT_ENUM(env, ChannelFormatKind, "FLOAT", cudaChannelFormatKindFloat);
EXPORT_ENUM(env, ChannelFormatKind, "NONE", cudaChannelFormatKindNone);
EXPORT_ENUM(env, ChannelFormatKind, "lmemResizeToMax", cudaDeviceLmemResizeToMax);
exports.Set("ChannelFormatKind", ChannelFormatKind);
return DefineClass(env,
"CUDAArray",
{
InstanceAccessor<&CUDAArray::GetChannelFormatX>("channelFormatX"),
InstanceAccessor<&CUDAArray::GetChannelFormatY>("channelFormatY"),
InstanceAccessor<&CUDAArray::GetChannelFormatZ>("channelFormatZ"),
InstanceAccessor<&CUDAArray::GetChannelFormatW>("channelFormatW"),
InstanceAccessor<&CUDAArray::GetChannelFormatKind>("channelFormatKind"),
InstanceAccessor<&CUDAArray::GetWidth>("width"),
InstanceAccessor<&CUDAArray::GetHeight>("height"),
InstanceAccessor<&CUDAArray::GetDepth>("depth"),
InstanceAccessor<&CUDAArray::GetBytesPerElement>("bytesPerElement"),
InstanceAccessor<&CUDAArray::GetByteLength>("byteLength"),
InstanceAccessor<&CUDAArray::GetPointer>("ary"),
});
}
CUDAArray::CUDAArray(CallbackArgs const& args) : EnvLocalObjectWrap<CUDAArray>(args) {
array_ = args[0];
extent_ = args[1];
channelFormatDesc_ = args[2];
flags_ = args[3];
type_ = args[4];
}
CUDAArray::wrapper_t CUDAArray::New(Napi::Env const& env,
cudaArray_t const& array,
cudaExtent const& extent,
cudaChannelFormatDesc const& channelFormatDesc,
uint32_t flags,
array_type type) {
return EnvLocalObjectWrap<CUDAArray>::New(
env,
{
Napi::External<cudaArray_t>::New(env, const_cast<cudaArray_t*>(&array)),
[&]() {
auto obj = Napi::Object::New(env);
obj.Set("width", extent.width);
obj.Set("height", extent.height);
obj.Set("depth", extent.depth);
return obj;
}(),
[&]() {
auto obj = Napi::Object::New(env);
obj.Set("x", channelFormatDesc.x);
obj.Set("y", channelFormatDesc.y);
obj.Set("z", channelFormatDesc.z);
obj.Set("w", channelFormatDesc.w);
obj.Set("f", static_cast<uint8_t>(channelFormatDesc.f));
return obj;
}(),
Napi::Number::New(env, flags),
Napi::Number::New(env, static_cast<uint8_t>(type)),
});
}
Napi::Value CUDAArray::GetBytesPerElement(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), BytesPerElement());
}
Napi::Value CUDAArray::GetByteLength(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), BytesPerElement() * Width() * Height() * Depth());
}
Napi::Value CUDAArray::GetPointer(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), reinterpret_cast<int64_t>(Array()));
}
Napi::Value CUDAArray::GetChannelFormatX(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), ChannelFormatDesc().x);
}
Napi::Value CUDAArray::GetChannelFormatY(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), ChannelFormatDesc().y);
}
Napi::Value CUDAArray::GetChannelFormatZ(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), ChannelFormatDesc().z);
}
Napi::Value CUDAArray::GetChannelFormatW(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), ChannelFormatDesc().w);
}
Napi::Value CUDAArray::GetChannelFormatKind(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), ChannelFormatDesc().f);
}
Napi::Value CUDAArray::GetWidth(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), Extent().width);
}
Napi::Value CUDAArray::GetHeight(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), Extent().height);
}
Napi::Value CUDAArray::GetDepth(Napi::CallbackInfo const& info) {
return Napi::Number::New(info.Env(), Extent().depth);
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/device.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Device as CUDADevice, DeviceFlags, DeviceProperties} from './addon';
export {DeviceFlags};
export class Device extends CUDADevice {
/**
* The number of compute-capable CUDA devices.
*/
public static get numDevices() { return CUDADevice.numDevices; }
/**
* The id of this thread's active CUDA device.
*/
public static get activeDeviceId() { return CUDADevice.activeDeviceId; }
/**
* The human-readable name of this CUDA Device
*/
public get name(): string { return this.getProperties().name; }
/**
* The PCI Bus identifier of this CUDA Device
*/
public get pciBusId(): number { return this.getProperties().pciBusID; }
// @ts-ignore
protected _properties: DeviceProperties;
/**
* @summary An object with information about the device.
*/
public getProperties() { return this._properties || (this._properties = super.getProperties()); }
/** @ignore */
public get[Symbol.toStringTag]() { return 'CUDADevice'; }
/** @ignore */
public[Symbol.for('nodejs.util.inspect.custom')]() { return this.toString(); }
/** @ignore */
public toString() {
const {name, major, minor} = this.getProperties();
return `${this[Symbol.toStringTag]} ${
JSON.stringify({'id': this.id, 'name': name, 'compute_capability': [major, minor]})}`;
}
}
interface DeviceList extends Iterable<Device> {
length: number;
[key: number]: Device;
}
/**
* @summary A lazily-evaluated list of available CUDA devices.
* <br/><br/>
* This list has a `length` property, and each available active Device can be accessed by device
* ordinal (via Array-style subscript-accesses).
* <br/><br/>
* This list implements the Iterable<Device> protocol, meaning it can be enumerated in a `for..of`
* loop, or with the `[...]` iterable expansion syntax.
*
* @note While this list may seem like an Array, it is a JavaScript Proxy that only creates and
* returns a Device instance for a given device ordinal the first time it's accessed.
* @note Enumerating the `devices` list (i.e. `[...devices]`) will create and cache Device instances
* for all CUDA devices available to the current process.
*
* @example
* ```typescript
* import {Device, devices} from '@rapidsai/cuda';
*
* console.log(`Number of devices: ${devices.length}`);
*
* // CUDA Device 0 is automatically activated by default
* console.log(`Active device id: ${Device.activeDeviceId}`); // 0
*
* // Access (and create) Devices 0,1
* const [device0, device1] = devices;
*
* console.log(device0);
* // > CUDADevice {"id":0,"name":"Quadro RTX 8000","compute_capability":[7,5]}
*
* console.log(device0.pciBusName);
* // > '0000:15:00.0'
*
* console.log(device0.canAccessPeerDevice(device1));
* // > true
*
* console.log(device0.getProperties());
* // > {
* // > name: 'Quadro RTX 8000',
* // > totalGlobalMem: 50944540672,
* // > sharedMemPerBlock: 49152,
* // > regsPerBlock: 65536,
* // > warpSize: 32,
* // > memPitch: 2147483647,
* // > maxThreadsPerBlock: 1024,
* // > ...
* // > }
*
* // Device 0 remains the active device until `device1` is made active
* console.log(`Active device id: ${Device.activeDeviceId}`);
* // > 0
*
* device1.activate();
* console.log(`Active device id: ${Device.activeDeviceId}`);
* // > 1
*
* // Set Device 0 to the active device again
* device0.activate();
* console.log(`Active device id: ${Device.activeDeviceId}`);
* // > 0
* ```
*/
export const devices = new Proxy<DeviceList>(
{
length: Device.numDevices,
* [Symbol.iterator]() {
for (let i = -1, n = this.length; ++i < n;) { yield this[i]; }
}
},
{
isExtensible() { return false;},
set() { throw new Error('Invalid operation');},
defineProperty() { throw new Error('Invalid operation');},
deleteProperty() { throw new Error('Invalid operation');},
has(target, key) { //
const idx = typeof key !== 'symbol' ? +(key as any) : NaN;
return (idx !== idx) ? key in target : idx > -1 && idx < Device.numDevices;
},
get(target, key) {
const idx = typeof key !== 'symbol' ? +(key as any) : NaN;
if (idx == idx && idx > -1 && idx < Device.numDevices) {
return target[idx] ? target[idx] : (target[idx] = new Device(idx));
}
return target[key as any];
},
});
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/memory.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
DeviceMemory,
IpcHandle as CUDAIpcHandle,
IpcMemory,
ManagedMemory,
MappedGLMemory,
Memory,
PinnedMemory,
} from './addon';
export {DeviceMemory, IpcMemory, ManagedMemory, MappedGLMemory, Memory, PinnedMemory};
/**
* @summary A container for managing the lifetime of a {@link DeviceMemory} allocation exported for
* reading and/or writing by other processes with access to the allocation's associated {@link
* Device}.
*/
export class IpcHandle extends CUDAIpcHandle {
constructor(deviceMemory: DeviceMemory, byteOffset = 0) {
super(deviceMemory);
this.byteOffset = byteOffset;
}
/**
* @summary The byte offset (if applicable) into the exported {@link DeviceMemory}
*/
public readonly byteOffset: number;
/** @ignore */
public[Symbol.for('nodejs.util.inspect.custom')]() {
return `${this[Symbol.toStringTag]} ${this.toString()}`;
}
/**
* @summary JSON-stringified details describing the exported {@link DeviceMemory} and CUDA IPC
* handle.
* @returns The result of calling `JSON.stringify(this.toJSON())`
*/
public toString() { return JSON.stringify(this.toJSON()); }
/**
* @summary An object describing the exported {@link DeviceMemory} and CUDA IPC handle.
* @returns An object with the device ordinal, the 64-bit IPC handle (as a JavaScript Array of
* octets), byte offset (if applicable) into the exported {@link DeviceMemory}, byte length of
* the IPC segment.
*/
public toJSON() {
return {
device: this.device,
handle: [...this.handle],
byteOffset: this.byteOffset,
byteLength: this.buffer.byteLength - this.byteOffset,
};
}
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/buffer.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Math, runtime} from './addon';
import {BigIntArray, MemoryData, TypedArray, TypedArrayConstructor} from './interfaces';
import {DeviceMemory, IpcHandle, ManagedMemory, Memory, PinnedMemory} from './memory';
import {
clampRange,
isArrayBufferLike,
isArrayBufferView,
isArrayLike,
isIterable,
isMemoryLike,
isNumber,
isObject
} from './util';
const {min, max} = Math;
const {cudaMemcpy, cudaMemset} = runtime;
/** @ignore */
// clang-format off
type MemoryViewOf<T extends TypedArray|BigIntArray> =
T extends Int8Array ? Int8Buffer
: T extends Int16Array ? Int16Buffer
: T extends Int32Array ? Int32Buffer
: T extends BigInt64Array ? Int64Buffer
: T extends Uint8Array ? Uint8Buffer
: T extends Uint8ClampedArray ? Uint8ClampedBuffer
: T extends Uint16Array ? Uint16Buffer
: T extends Uint32Array ? Uint32Buffer
: T extends BigUint64Array ? Uint64Buffer
: T extends Float32Array ? Float32Buffer
: T extends Float64Array ? Float64Buffer
: never;
// clang-format on
export type CUDAMemoryView = Int8Buffer|Int16Buffer|Int32Buffer|Int64Buffer|Uint8Buffer|
Uint8ClampedBuffer|Uint16Buffer|Uint32Buffer|Uint64Buffer|Float32Buffer|Float64Buffer;
/** @ignore */
export type MemoryViewConstructor<T extends TypedArray|BigIntArray> = {
readonly BYTES_PER_ELEMENT: number,
readonly TypedArray: TypedArrayConstructor<T>,
new (length?: number): MemoryViewOf<T>,
new (values: Iterable<T[0]>): MemoryViewOf<T>,
new (buffer: ArrayLike<T[0]>|MemoryData, byteOffset?: number, length?: number): MemoryViewOf<T>,
};
const allocateDeviceMemory = (byteLength: number): Memory => new DeviceMemory(byteLength);
let allocateMemory = allocateDeviceMemory;
/**
* @summary A function to override the default device memory allocation behavior.
* The supplied function will be called to create the underlying {@link Memory `Memory`} instances
* when constructing one of the {@link CUDAMemoryView `CUDAMemoryView`} in JavaScript.
*
* @example
* ```typescript
* import {
* DeviceMemory,
* ManagedMemory,
* Float32Buffer,
* setDefaultAllocator
* } from '@rapidsai/cuda';
*
* // The default allocator creates `DeviceMemory` instances,
* // which can only be accessed directly from the GPU device.
* // An expensive copy from GPU to CPU memory must be performed
* // in order to read the data in JavaScript.
* const dbuf = new Float32Buffer([1.0, 2.0, 3.0]);
* assert(dbuf.buffer instanceof DeviceMemory);
*
* // Override allocate function to create `ManagedMemory` instances.
* setDefaultAllocator((byteLength) => new ManagedMemory(byteLength));
*
* // Now the allocator uses the supplied function to create
* // `ManagedMemory` instances. This kind of memory can be accessed
* // by both the CPU and GPU, because the CUDA driver automatically
* // migrates the data from the CPU <-> GPU as required.
* const mbuf = new Float32Buffer([1.0, 2.0, 3.0]);
* assert(mbuf.buffer instanceof ManagedMemory);
* ```
*
* @param allocate Function to use for device {@link Memory `Memory`} allocations.
*/
export function setDefaultAllocator(allocate?: null|((byteLength: number) => Memory)) {
if (allocate === undefined || allocate === null) {
// If allocate is null or undefined, reset to the default
allocateMemory = allocateDeviceMemory;
} else if (typeof allocate !== 'function') {
throw new TypeError('setDefaultAllocator requires an `allocate` function');
} else {
// Validate the user-provided function returns something we expect.
const mem = allocate(8);
if (!isMemoryLike(mem) || (mem.byteLength !== 8)) {
throw new TypeError(
'setDefaultAllocator requires the `allocate` function to return Memory instances');
}
allocateMemory = allocate;
}
}
/**
* @summary A base class for typed arrays of values in owned or managed by CUDA.
*/
export interface MemoryView<T extends TypedArray|BigIntArray = any> extends ArrayBufferView {
/**
* @summary The size in bytes of each element in the MemoryView.
*/
readonly BYTES_PER_ELEMENT: number;
/**
* @summary The constructor of the MemoryView's corresponding JS TypedArray.
*/
readonly TypedArray: TypedArrayConstructor<T>;
/**
* @ignore
* @summary The constructor function for the MemoryView type.
*/
readonly[Symbol.species]: MemoryViewConstructor<T>;
}
/**
* @summary A base class for typed arrays of values in owned or managed by CUDA.
*/
export abstract class MemoryView<T extends TypedArray|BigIntArray = any> implements
ArrayBufferView {
public static readonly BYTES_PER_ELEMENT: number;
/**
* @summary The {@link Memory `Memory`} instance referenced by the MemoryView.
*/
public readonly buffer!: Memory;
/**
* @summary The offset in bytes of the MemoryView.
*/
public readonly byteOffset!: number;
/**
* @summary The length in bytes of the MemoryView.
*/
public readonly byteLength!: number;
/**
* @summary The length of the MemoryView.
*/
public readonly length!: number;
[index: number]: T[0];
constructor(length?: number);
constructor(arrayOrArrayBuffer: Iterable<T[0]>|ArrayLike<T[0]>|MemoryData);
constructor(buffer: ArrayLike<T[0]>|MemoryData, byteOffset: number, length?: number);
constructor() {
// eslint-disable-next-line prefer-const, prefer-rest-params
let [buffer, byteOffset, length] = arguments;
Object.assign(this, toMemory(buffer, this.TypedArray));
switch (arguments.length) {
// @ts-ignore
case 3:
this.length = length = max(+length, 0) || 0;
this.byteLength = length * this.BYTES_PER_ELEMENT;
// @ts-ignore
// eslint-disable-next-line no-fallthrough
case 2: this.byteOffset = max(+byteOffset, 0) || 0; break;
}
}
/**
* Copies data from a region of a source {@link MemoryView}, {@link TypedArray}, or Array to a
* region in this {@link MemoryView}, even if the source region overlaps with this {@link
* MemoryView}.
* @param source The {@link MemoryView}, {@link TypedArray}, or Array to copy
* from.
* @param sourceStart The offset in `source` at which to begin copying. <b>Default:</b> `0`.
* @param targetStart The offset in `this` from which to begin writing. <b>Default:</b> `0`.
* @param targetEnd The offset in `this` at which to stop writing (not inclusive).
* <b>Default:</b> `this.length - targetStart`.
* @returns `this`
*/
public copyFrom(source: MemoryData|Iterable<number|bigint>|ArrayLike<number|bigint>,
sourceStart = 0,
targetStart = 0,
targetEnd = this.length) {
this.subarray(targetStart, targetEnd)
.set(toHDView(source, this.TypedArray).subarray(sourceStart));
return this;
}
/**
* Copies data from a region of this {@link MemoryView} to a region in a target {@link
* MemoryView}, {@link TypedArray}, or Array, even if the target region overlaps with this {@link
* MemoryView}.
* @param target The {@link MemoryView}, {@link TypedArray}, or Array to copy
* into.
* @param targetStart The offset in `target` at which to begin writing. <b>Default:</b> `0`.
* @param sourceStart The offset in `this` from which to begin copying. <b>Default:</b> `0`.
* @param sourceEnd The offset in `this` at which to stop copying (not inclusive). <b>Default:</b>
* <b>Default:</b> `this.length - sourceStart`.
* @returns `this`
*/
public copyInto(target: MemoryData|Array<any>,
targetStart = 0,
sourceStart = 0,
sourceEnd = this.length) {
if (!target) {
throw new TypeError(
`${this[Symbol.toStringTag]}.copyInto argument "target" cannot be null or undefined`);
}
const source = this.subarray(...clampRange(this.length, sourceStart, sourceEnd));
if (target instanceof MemoryView || isMemoryLike(target)) {
toMemoryView(target, this.TypedArray).set(source, targetStart);
} else if (isArrayBufferLike(target) || isArrayBufferView(target)) {
// If target is a ArrayBuffer or ArrayBufferView, copy from device to host via cudaMemcpy
const destination = toHDView(target, this.TypedArray).subarray(targetStart);
cudaMemcpy(destination, source, min(destination.byteLength, source.byteLength));
} else if (Array.isArray(target)) {
// If target is an Array, copy the data from device to host and splice the values into place
target.splice(targetStart, 0, ...source.toArray());
} else {
throw new TypeError(`${this[Symbol.toStringTag]}.copyInto argument "target" invalid type`);
}
return this;
}
/**
* Copies the underlying CUDA memory into a JavaScript typed array.
* @returns A JavaScript typed array copy of the underlying CUDA memory.
*/
public toArray(): T {
const target = new this.TypedArray(this.length);
this.copyInto(target);
return target;
}
/**
* Sets a value or an array of values.
* @param array A typed or untyped array of values to set.
* @param start The index in the current array at which the values are to be written.
*/
public set(array: MemoryData|ArrayLike<number>|ArrayLike<bigint>, start?: number) {
const [begin, end] = clampRange(this.length, start);
const source = toHDView(array, this.TypedArray);
const length = min((end - begin) * this.BYTES_PER_ELEMENT, source.byteLength);
// const length = min(end * this.BYTES_PER_ELEMENT, source.byteLength);
cudaMemcpy(this.subarray(begin), source, length);
}
/**
* Returns the this object after filling the section identified by start and end with value.
* @param value value to fill array section with.
* @param start index to start filling the array at. If start is negative, it is treated as
* length+start where length is the length of the array.
* @param end index to stop filling the array at. If end is negative, it is treated as
* length+end.
*/
public fill(value: T[0], start?: number, end?: number) {
[start, end] = clampRange(this.length, start, end);
this.set((new this.TypedArray(end - start)).fill(<never>value), start);
return this;
}
/**
* Returns a section of an array.
* @param start The beginning of the specified portion of the array.
* @param end The end of the specified portion of the array. This is exclusive of the element at
* the index 'end'.
*/
public slice(start?: number, end?: number) {
[start, end] = clampRange(this.length, start, end);
return new this[Symbol.species](
this.buffer.slice(this.byteOffset + (start * this.BYTES_PER_ELEMENT),
this.byteOffset + (end * this.BYTES_PER_ELEMENT)));
}
/**
* Creates a new MemoryView view over the underlying Memory of this array,
* referencing the elements at begin, inclusive, up to end, exclusive.
* @param begin The index of the beginning of the array.
* @param end The index of the end of the array.
*/
public subarray(begin?: number, end?: number) {
[begin, end] = clampRange(this.length, begin, end);
return new this[Symbol.species](
this.buffer, this.byteOffset + (begin * this.BYTES_PER_ELEMENT), end - begin);
}
/** @ignore */
public get[Symbol.toStringTag]() { return this.constructor.name; }
/** @ignore */
public[Symbol.for('nodejs.util.inspect.custom')]() { return this.toString(); }
/** @ignore */
public toString() {
return `${this[Symbol.toStringTag]} ${JSON.stringify({
'length': this.length,
'byteOffset': this.byteOffset,
'byteLength': this.byteLength,
'device': this.buffer.device,
'type': this.buffer[Symbol.toStringTag],
})}`;
}
/**
* @summary Create an IpcHandle for the underlying CUDA device memory.
*/
public getIpcHandle() {
if (this.buffer instanceof PinnedMemory) {
throw new Error(
`${this[Symbol.toStringTag]}'s buffer must not be an instance of PinnedMemory`);
}
if (this.buffer instanceof ManagedMemory) {
throw new Error(
`${this[Symbol.toStringTag]}'s buffer must not be an instance of ManagedMemory`);
}
return new IpcHandle(<any>this.buffer, this.byteOffset);
}
}
Object.setPrototypeOf(MemoryView.prototype, new Proxy({}, {
get(target: any, p: any, receiver: any) {
let i: number = p;
switch (typeof p) {
// @ts-ignore
case 'string':
if (isNaN(i = +p)) { break; }
// eslint-disable-next-line no-fallthrough
case 'number':
if (i > -1 && i < receiver.length) {
const {byteOffset, BYTES_PER_ELEMENT, E} = receiver;
// eslint-disable-next-line @typescript-eslint/restrict-plus-operands
receiver.byteOffset = byteOffset + i * BYTES_PER_ELEMENT;
cudaMemcpy(E, receiver, BYTES_PER_ELEMENT);
receiver.byteOffset = byteOffset;
return E[0];
}
return undefined;
}
return Reflect.get(target, p, receiver);
},
set(target: any, p: any, value: any, receiver: any) {
let i: number = p;
switch (typeof p) {
// @ts-ignore
case 'string':
if (isNaN(i = +p)) { break; }
// eslint-disable-next-line no-fallthrough
case 'number':
if (i > -1 && i < receiver.length) {
const {byteOffset, BYTES_PER_ELEMENT, E} = receiver;
// eslint-disable-next-line @typescript-eslint/restrict-plus-operands
receiver.byteOffset = byteOffset + i * BYTES_PER_ELEMENT;
E[0] = value;
cudaMemcpy(receiver, E, BYTES_PER_ELEMENT);
receiver.byteOffset = byteOffset;
return true;
}
}
return Reflect.set(target, p, value, receiver);
}
}));
/** @ignore */ (<any>MemoryView.prototype).buffer = new DeviceMemory(0);
/** @ignore */ (<any>MemoryView.prototype).length = 0;
/** @ignore */ (<any>MemoryView.prototype).byteOffset = 0;
/** @ignore */ (<any>MemoryView.prototype).byteLength = 0;
/** @ignore */ (<any>MemoryView.prototype)[Symbol.species] = MemoryView;
/** @ignore */ (<any>MemoryView.prototype).TypedArray = Uint8ClampedArray;
/** @ignore */ (<any>MemoryView.prototype).E = new Uint8ClampedArray(8);
/** @ignore */ (<any>MemoryView.prototype).BYTES_PER_ELEMENT = Uint8ClampedArray.BYTES_PER_ELEMENT;
/** @summary A typed array of twos-complement 8-bit signed integers in CUDA memory. */
export class Int8Buffer extends MemoryView<Int8Array> {
public static readonly TypedArray = Int8Array;
public static readonly BYTES_PER_ELEMENT = Int8Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of twos-complement 16-bit signed integers in CUDA memory. */
export class Int16Buffer extends MemoryView<Int16Array> {
public static readonly TypedArray = Int16Array;
public static readonly BYTES_PER_ELEMENT = Int16Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of twos-complement 32-bit signed integers in CUDA memory. */
export class Int32Buffer extends MemoryView<Int32Array> {
public static readonly TypedArray = Int32Array;
public static readonly BYTES_PER_ELEMENT = Int32Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 8-bit unsigned integers in CUDA memory. */
export class Uint8Buffer extends MemoryView<Uint8Array> {
public static readonly TypedArray = Uint8Array;
public static readonly BYTES_PER_ELEMENT = Uint8Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 8-bit unsigned integers clamped to 0-255 in CUDA memory. */
export class Uint8ClampedBuffer extends MemoryView<Uint8ClampedArray> {
public static readonly TypedArray = Uint8ClampedArray;
public static readonly BYTES_PER_ELEMENT = Uint8ClampedArray.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 16-bit unsigned integers in CUDA memory. */
export class Uint16Buffer extends MemoryView<Uint16Array> {
public static readonly TypedArray = Uint16Array;
public static readonly BYTES_PER_ELEMENT = Uint16Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 32-bit unsigned integers in CUDA memory. */
export class Uint32Buffer extends MemoryView<Uint32Array> {
public static readonly TypedArray = Uint32Array;
public static readonly BYTES_PER_ELEMENT = Uint32Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 32-bit floating point numbers in CUDA memory. */
export class Float32Buffer extends MemoryView<Float32Array> {
public static readonly TypedArray = Float32Array;
public static readonly BYTES_PER_ELEMENT = Float32Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 64-bit floating point numbers values in CUDA memory. */
export class Float64Buffer extends MemoryView<Float64Array> {
public static readonly TypedArray = Float64Array;
public static readonly BYTES_PER_ELEMENT = Float64Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 64-bit signed integers in CUDA memory. */
export class Int64Buffer extends MemoryView<BigInt64Array> {
public static readonly TypedArray = BigInt64Array;
public static readonly BYTES_PER_ELEMENT = BigInt64Array.BYTES_PER_ELEMENT;
}
/** @summary A typed array of 64-bit unsigned integers in CUDA memory. */
export class Uint64Buffer extends MemoryView<BigUint64Array> {
public static readonly TypedArray = BigUint64Array;
public static readonly BYTES_PER_ELEMENT = BigUint64Array.BYTES_PER_ELEMENT;
}
[{0: Int8Buffer, 1: Int8Array},
{0: Int16Buffer, 1: Int16Array},
{0: Int32Buffer, 1: Int32Array},
{0: Uint8Buffer, 1: Uint8Array},
{0: Uint8ClampedBuffer, 1: Uint8ClampedArray},
{0: Uint16Buffer, 1: Uint16Array},
{0: Uint32Buffer, 1: Uint32Array},
{0: Float32Buffer, 1: Float32Array},
{0: Float64Buffer, 1: Float64Array},
{0: Int64Buffer, 1: BigInt64Array},
{0: Uint64Buffer, 1: BigUint64Array},
].forEach(({0: MemoryViewCtor, 1: TypedArrayCtor}) => {
(<any>MemoryViewCtor.prototype).TypedArray = TypedArrayCtor;
(<any>MemoryViewCtor.prototype)[Symbol.species] = MemoryViewCtor;
(<any>MemoryViewCtor).BYTES_PER_ELEMENT = TypedArrayCtor.BYTES_PER_ELEMENT;
(<any>MemoryViewCtor.prototype).BYTES_PER_ELEMENT = TypedArrayCtor.BYTES_PER_ELEMENT;
(<any>MemoryViewCtor.prototype).E = new TypedArrayCtor((<any>MemoryView.prototype).E.buffer);
});
/** @internal */
function toMemory<T extends TypedArray|BigIntArray>(
source: number|Iterable<T[0]>|ArrayLike<T[0]>|MemoryData, TypedArray: TypedArrayConstructor<T>) {
let byteOffset = 0;
let byteLength = 0;
let buffer: Memory;
if (isNumber(source)) {
byteLength = source * TypedArray.BYTES_PER_ELEMENT;
buffer = allocateMemory(source * TypedArray.BYTES_PER_ELEMENT);
// initialize with new allocated memory with zeroes
cudaMemset(buffer, 0, byteLength);
} else if (isMemoryLike(source)) {
// If source is a device Memory instance, don't copy it
buffer = source;
byteLength = source.byteLength;
} else if ((source instanceof MemoryView) //
|| isArrayBufferLike(source) //
|| isArrayBufferView(source)) {
// If source is a host ArrayBuffer[View] or MemoryView, make a device copy
byteLength = source.byteLength;
buffer = allocateMemory(byteLength);
cudaMemcpy(buffer, source, byteLength);
} else if (isIterable(source) || isArrayLike(source)) {
// If source is an Iterable or JavaScript Array, construct a TypedArray from the values
const array = TypedArray.from(source, TypedArray.name.includes('Big') ? BigInt : Number);
byteLength = array.byteLength;
buffer = allocateMemory(byteLength);
cudaMemcpy(buffer, array.buffer, byteLength);
} else if (isObject(source) && ('buffer' in source)) {
({buffer, byteOffset, byteLength} = toMemoryView(source, TypedArray));
} else {
byteOffset = 0;
byteLength = 0;
buffer = allocateMemory(0);
}
return {buffer, byteLength, byteOffset, length: byteLength / TypedArray.BYTES_PER_ELEMENT};
}
/**
* @internal
*
* @summary Construct and return a MemoryView corresponding to the given TypedArray.
* If necessary, copy data from the source host CPU arrays or buffers to device Memory.
*
* @note If the source is already a Memory or MemoryView, this function will create a
* new MemoryView of the requested type without copying the underlying device Memory.
*
* @param source The source data from which to construct a GPU MemoryView.
* @param TypedArray The MemoryView corresponding to the requested TypedArray.
* @returns A MemoryView corresponding to the given TypedArray type.
*/
function toMemoryView<T extends TypedArray|BigIntArray>(
source: Iterable<T[0]>|ArrayLike<T[0]>|MemoryData, TypedArray: TypedArrayConstructor<T>) {
if (source instanceof MemoryView && source.TypedArray === TypedArray) {
// If source is already the requested type, return it
return source as MemoryViewOf<T>;
}
let buffer = source as MemoryData;
let byteOffset = 0, byteLength: number|undefined;
if ('byteOffset' in source) { ({byteOffset} = source); }
if ('byteLength' in source) { ({byteLength} = source); }
while (('buffer' in buffer) && (buffer['buffer'] !== buffer)) { //
buffer = buffer['buffer'];
}
buffer = ((source: MemoryData) => {
switch (TypedArray.name) {
case 'Int8Array': return new Int8Buffer(source);
case 'Int16Array': return new Int16Buffer(source);
case 'Int32Array': return new Int32Buffer(source);
case 'Uint8Array': return new Uint8Buffer(source);
case 'Uint8ClampedArray': return new Uint8ClampedBuffer(source);
case 'Uint16Array': return new Uint16Buffer(source);
case 'Uint32Array': return new Uint32Buffer(source);
case 'Float32Array': return new Float32Buffer(source);
case 'Float64Array': return new Float64Buffer(source);
case 'BigInt64Array': return new Int64Buffer(source);
case 'BigUint64Array': return new Uint64Buffer(source);
}
throw new Error('Unknown dtype');
})(source as MemoryData);
if (byteLength !== undefined) {
(<any>buffer).byteOffset = byteOffset;
(<any>buffer).byteLength = byteLength;
(<any>buffer).length = byteLength / TypedArray.BYTES_PER_ELEMENT;
}
return buffer as MemoryViewOf<T>;
}
/**
* @internal
*
* @summary Construct a host TypedArray or device `MemoryView` based on the location of the input
* `source` data.
*
* * If the source data is already a `Memory` or `MemoryView`, construct and return a `MemoryView`
* corresponding to the desired TypedArray.
* * If the source data is already an `ArrayBuffer` or `ArrayBufferView`, construct and return a
* TypedArray of the desired type.
* * If the source data is a JavaScript Iterable or Array, construct and return a TypedArray of the
* desired type by enumerating the source values.
* * If the source data is a JavaScript Object with a "buffer" member, construct either a host
* TypedArray or device `MemoryView` depending on the location of the underling buffer.
*
* @param source The source data from which to construct a CPU TypedArray or GPU `MemoryView`.
* @param TypedArray The TypedArray to return (if source is on the host) or its corresponding
* `MemoryView` (if source is on the device).
* @returns A TypedArray or `MemoryView` corresponding to the desired TypedArray type.
*/
function toHDView<T extends TypedArray|BigIntArray>(
source: Iterable<number|bigint>|ArrayLike<number|bigint>|MemoryData,
TypedArray: TypedArrayConstructor<T>): T|MemoryViewOf<T> {
if (source instanceof MemoryView) {
return (source.TypedArray === TypedArray)
// If source is already the desired type, return it
? source as MemoryViewOf<T>
// If source is another type of MemoryView, wrap in the desired type
: toMemoryView(source, TypedArray);
} else if (isMemoryLike(source)) {
// If source is MemoryLike, wrap it in a MemoryView of the desired type
return toMemoryView(source, TypedArray);
} else if (isArrayBufferLike(source)) {
// If source is an ArrayBuffer or SharedArrayBuffer, wrap in a TypedArray of the desired type
return new TypedArray(source);
} else if (isArrayBufferView(source)) {
// If source is already an ArrayBufferView of the desired type, return it
if (source.constructor === TypedArray) { return source as T; }
// If source is an ArrayyBufferView of another kind, return a TypedArray of the desired type
return new TypedArray(source.buffer,
source.byteOffset, //
source.byteLength / TypedArray.BYTES_PER_ELEMENT);
} else if (isIterable(source) || isArrayLike(source)) {
// If source is an Iterable or Array, construct a TypedArray of the desired type
return TypedArray.from(source, TypedArray.name.includes('Big') ? BigInt : Number);
}
if (isObject(source) && ('buffer' in source)) {
// If source is a JS object with a 'buffer' key, recurse down to wrap either as a
// MemoryView or TypedArray based on whether buffer is a Memory or ArrayBuffer instance.
let buffer = source as MemoryData;
let byteOffset = 0, byteLength: number|undefined;
if ('byteOffset' in source) { ({byteOffset} = source); }
if ('byteLength' in source) { ({byteLength} = source); }
while (('buffer' in buffer) && (buffer['buffer'] !== buffer)) { //
buffer = buffer['buffer'];
}
buffer = toHDView(buffer, TypedArray);
if (byteLength !== undefined) {
(<any>buffer).byteOffset = byteOffset;
(<any>buffer).byteLength = byteLength;
(<any>buffer).length = byteLength / TypedArray.BYTES_PER_ELEMENT;
}
return buffer as MemoryViewOf<T>| T;
}
throw new TypeError(
'asMemoryData() received invalid "source". Expected a MemoryData, Iterable, Array, or Object with a {"buffer"} `source`.');
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/src/addon.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-redeclare */
import {addon as CORE} from '@rapidsai/core';
export const {
VERSION,
IPC_HANDLE_SIZE,
getDriverVersion,
rgbaMirror,
bgraToYCrCb420,
Math,
driver,
runtime,
CUDAArray,
ChannelFormatKind,
Device,
DeviceFlags,
DeviceMemory,
PinnedMemory,
ManagedMemory,
IpcMemory,
IpcHandle,
MappedGLMemory,
_cpp_exports,
} = require('bindings')('rapidsai_cuda.node').init(CORE) as typeof import('./node_cuda');
export type getDriverVersion = typeof import('./node_cuda').getDriverVersion;
export type rgbaMirror = typeof import('./node_cuda').rgbaMirror;
export type bgraToYCrCb420 = typeof import('./node_cuda').bgraToYCrCb420;
export type Math = typeof import('./node_cuda').Math;
export type driver = typeof import('./node_cuda').driver;
export type runtime = typeof import('./node_cuda').runtime;
export type Memory = import('./node_cuda').Memory;
export type DeviceMemory = import('./node_cuda').DeviceMemory;
export type PinnedMemory = import('./node_cuda').PinnedMemory;
export type ManagedMemory = import('./node_cuda').ManagedMemory;
export type IpcMemory = import('./node_cuda').IpcMemory;
export type IpcHandle = import('./node_cuda').IpcHandle;
export type MappedGLMemory = import('./node_cuda').MappedGLMemory;
export type CUDAArray = import('./node_cuda').CUDAArray;
export type ChannelFormatKind = import('./node_cuda').ChannelFormatKind;
export type Device = import('./node_cuda').Device;
export type DeviceFlags = import('./node_cuda').DeviceFlags;
export type DeviceProperties = import('./node_cuda').DeviceProperties;
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/visit_struct/visit_struct.hpp
|
// (C) Copyright 2015 - 2018 Christopher Beck
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef VISIT_STRUCT_HPP_INCLUDED
#define VISIT_STRUCT_HPP_INCLUDED
/***
* Provides a facility to declare a structure as "visitable" and apply a visitor
* to it. The list of members is a compile-time data structure, and there is no
* run-time overhead.
*/
#include <type_traits>
#include <utility>
// Library version
#define VISIT_STRUCT_VERSION_MAJOR 1
#define VISIT_STRUCT_VERSION_MINOR 0
#define VISIT_STRUCT_VERSION_PATCH 0
#define VISIT_STRUCT_STRING_HELPER(X) #X
#define VISIT_STRUCT_STRING(X) VISIT_STRUCT_STRING_HELPER(X)
#define VISIT_STRUCT_VERSION_STRING \
VISIT_STRUCT_STRING(VISIT_STRUCT_VERSION_MAJOR) \
"." VISIT_STRUCT_STRING(VISIT_STRUCT_VERSION_MINOR) "." VISIT_STRUCT_STRING( \
VISIT_STRUCT_VERSION_PATCH)
// For MSVC 2013 support, we put constexpr behind a define.
#ifndef VISIT_STRUCT_CONSTEXPR
#if (defined _MSC_VER) && (_MSC_VER <= 1800)
#define VISIT_STRUCT_CONSTEXPR
#else
#define VISIT_STRUCT_CONSTEXPR constexpr
#endif
#endif
// After C++14 the apply_visitor function can be constexpr.
// We target C++11, but such functions are tagged VISIT_STRUCT_CXX14_CONSTEXPR.
#ifndef VISIT_STRUCT_CXX14_CONSTEXPR
#if ((defined _MSC_VER) && (_MSC_VER <= 1900)) || (!defined __cplusplus) || (__cplusplus == 201103L)
#define VISIT_STRUCT_CXX14_CONSTEXPR
#else
#define VISIT_STRUCT_CXX14_CONSTEXPR constexpr
#endif
#endif
namespace visit_struct {
namespace traits {
// Primary template which is specialized to register a type
template <typename T, typename ENABLE = void>
struct visitable;
// Helper template which checks if a type is registered
template <typename T, typename ENABLE = void>
struct is_visitable : std::false_type {};
template <typename T>
struct is_visitable<T, typename std::enable_if<traits::visitable<T>::value>::type>
: std::true_type {};
// Helper template which removes cv and reference from a type (saves some typing)
template <typename T>
struct clean {
typedef typename std::remove_cv<typename std::remove_reference<T>::type>::type type;
};
template <typename T>
using clean_t = typename clean<T>::type;
// Mini-version of std::common_type (we only require C++11)
template <typename T, typename U>
struct common_type {
typedef decltype(true ? std::declval<T>() : std::declval<U>()) type;
};
} // end namespace traits
// Tag for tag dispatch
template <typename T>
struct type_c {
using type = T;
};
// Accessor type: function object encapsulating a pointer-to-member
template <typename MemPtr, MemPtr ptr>
struct accessor {
template <typename T>
VISIT_STRUCT_CONSTEXPR auto operator()(T&& t) const -> decltype(std::forward<T>(t).*ptr) {
return std::forward<T>(t).*ptr;
}
};
//
// User-interface
//
// Return number of fields in a visitable struct
template <typename S>
VISIT_STRUCT_CONSTEXPR std::size_t field_count() {
return traits::visitable<traits::clean_t<S>>::field_count;
}
template <typename S>
VISIT_STRUCT_CONSTEXPR std::size_t field_count(S&&) {
return field_count<S>();
}
// apply_visitor (one struct instance)
template <typename S, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto apply_visitor(V&& v, S&& s) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
traits::visitable<traits::clean_t<S>>::apply(std::forward<V>(v), std::forward<S>(s));
}
// apply_visitor (two struct instances)
template <typename S1, typename S2, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto apply_visitor(V&& v, S1&& s1, S2&& s2) -> typename std::enable_if<
traits::is_visitable<traits::clean_t<typename traits::common_type<S1, S2>::type>>::value>::type {
using common_S = typename traits::common_type<S1, S2>::type;
traits::visitable<traits::clean_t<common_S>>::apply(
std::forward<V>(v), std::forward<S1>(s1), std::forward<S2>(s2));
}
// for_each (Alternate syntax for apply_visitor, reverses order of arguments)
template <typename V, typename S>
VISIT_STRUCT_CXX14_CONSTEXPR auto for_each(S&& s, V&& v) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
traits::visitable<traits::clean_t<S>>::apply(std::forward<V>(v), std::forward<S>(s));
}
// for_each with two structure instances
template <typename S1, typename S2, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto for_each(S1&& s1, S2&& s2, V&& v) -> typename std::enable_if<
traits::is_visitable<traits::clean_t<typename traits::common_type<S1, S2>::type>>::value>::type {
using common_S = typename traits::common_type<S1, S2>::type;
traits::visitable<traits::clean_t<common_S>>::apply(
std::forward<V>(v), std::forward<S1>(s1), std::forward<S2>(s2));
}
// Visit the types (visit_struct::type_c<...>) of the registered members
template <typename S, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto visit_types(V&& v) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
traits::visitable<traits::clean_t<S>>::visit_types(std::forward<V>(v));
}
// Visit the member pointers (&S::a) of the registered members
template <typename S, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto visit_pointers(V&& v) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
traits::visitable<traits::clean_t<S>>::visit_pointers(std::forward<V>(v));
}
// Visit the accessors (function objects) of the registered members
template <typename S, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto visit_accessors(V&& v) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
traits::visitable<traits::clean_t<S>>::visit_accessors(std::forward<V>(v));
}
// Apply visitor (with no instances)
// This calls visit_pointers, for backwards compat reasons
template <typename S, typename V>
VISIT_STRUCT_CXX14_CONSTEXPR auto apply_visitor(V&& v) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value>::type {
visit_struct::visit_pointers<S>(std::forward<V>(v));
}
// Get value by index (like std::get for tuples)
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get(S&& s) ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value,
decltype(traits::visitable<traits::clean_t<S>>::get_value(
std::integral_constant<int, idx>{}, std::forward<S>(s)))>::type {
return traits::visitable<traits::clean_t<S>>::get_value(std::integral_constant<int, idx>{},
std::forward<S>(s));
}
// Get name of field, by index
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_name() ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value,
decltype(traits::visitable<traits::clean_t<S>>::get_name(
std::integral_constant<int, idx>{}))>::type {
return traits::visitable<traits::clean_t<S>>::get_name(std::integral_constant<int, idx>{});
}
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_name(S&&) -> decltype(get_name<idx, S>()) {
return get_name<idx, S>();
}
// Get member pointer, by index
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_pointer() ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value,
decltype(traits::visitable<traits::clean_t<S>>::get_pointer(
std::integral_constant<int, idx>{}))>::type {
return traits::visitable<traits::clean_t<S>>::get_pointer(std::integral_constant<int, idx>{});
}
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_pointer(S&&) -> decltype(get_pointer<idx, S>()) {
return get_pointer<idx, S>();
}
// Get member accessor, by index
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_accessor() ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value,
decltype(traits::visitable<traits::clean_t<S>>::get_accessor(
std::integral_constant<int, idx>{}))>::type {
return traits::visitable<traits::clean_t<S>>::get_accessor(std::integral_constant<int, idx>{});
}
template <int idx, typename S>
VISIT_STRUCT_CONSTEXPR auto get_accessor(S&&) -> decltype(get_accessor<idx, S>()) {
return get_accessor<idx, S>();
}
// Get type, by index
template <int idx, typename S>
struct type_at_s {
using type_c =
decltype(traits::visitable<traits::clean_t<S>>::type_at(std::integral_constant<int, idx>{}));
using type = typename type_c::type;
};
template <int idx, typename S>
using type_at = typename type_at_s<idx, S>::type;
// Get name of structure
template <typename S>
VISIT_STRUCT_CONSTEXPR auto get_name() ->
typename std::enable_if<traits::is_visitable<traits::clean_t<S>>::value,
decltype(traits::visitable<traits::clean_t<S>>::get_name())>::type {
return traits::visitable<traits::clean_t<S>>::get_name();
}
template <typename S>
VISIT_STRUCT_CONSTEXPR auto get_name(S&&) -> decltype(get_name<S>()) {
return get_name<S>();
}
/***
* To implement the VISITABLE_STRUCT macro, we need a map-macro, which can take
* the name of a macro and some other arguments, and apply that macro to each other argument.
*
* There are some techniques you can use within C preprocessor to accomplish this succinctly,
* by settng up "recursive" macros.
*
* But this can also cause it to give worse error messages when something goes wrong.
*
* We are now doing it in a more "dumb", bulletproof way which has the advantage that it is
* more portable and gives better error messages.
* For discussion see IMPLEMENTATION_NOTES.md
*
* The code below is based on a patch from Jarod42, and is now generated by a python script.
* The purpose of the generated code is to define VISIT_STRUCT_PP_MAP as described.
*/
/*** Generated code ***/
static VISIT_STRUCT_CONSTEXPR const int max_visitable_members = 75;
#define VISIT_STRUCT_EXPAND(x) x
#define VISIT_STRUCT_PP_ARG_N(_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71, \
_72, \
_73, \
_74, \
_75, \
N, \
...) \
N
#define VISIT_STRUCT_PP_NARG(...) \
VISIT_STRUCT_EXPAND(VISIT_STRUCT_PP_ARG_N(__VA_ARGS__, \
75, \
74, \
73, \
72, \
71, \
70, \
69, \
68, \
67, \
66, \
65, \
64, \
63, \
62, \
61, \
60, \
59, \
58, \
57, \
56, \
55, \
54, \
53, \
52, \
51, \
50, \
49, \
48, \
47, \
46, \
45, \
44, \
43, \
42, \
41, \
40, \
39, \
38, \
37, \
36, \
35, \
34, \
33, \
32, \
31, \
30, \
29, \
28, \
27, \
26, \
25, \
24, \
23, \
22, \
21, \
20, \
19, \
18, \
17, \
16, \
15, \
14, \
13, \
12, \
11, \
10, \
9, \
8, \
7, \
6, \
5, \
4, \
3, \
2, \
1, \
0))
/* need extra level to force extra eval */
#define VISIT_STRUCT_CONCAT_(a, b) a##b
#define VISIT_STRUCT_CONCAT(a, b) VISIT_STRUCT_CONCAT_(a, b)
#define VISIT_STRUCT_APPLYF0(f)
#define VISIT_STRUCT_APPLYF1(f, _1) f(_1)
#define VISIT_STRUCT_APPLYF2(f, _1, _2) f(_1) f(_2)
#define VISIT_STRUCT_APPLYF3(f, _1, _2, _3) f(_1) f(_2) f(_3)
#define VISIT_STRUCT_APPLYF4(f, _1, _2, _3, _4) f(_1) f(_2) f(_3) f(_4)
#define VISIT_STRUCT_APPLYF5(f, _1, _2, _3, _4, _5) f(_1) f(_2) f(_3) f(_4) f(_5)
#define VISIT_STRUCT_APPLYF6(f, _1, _2, _3, _4, _5, _6) f(_1) f(_2) f(_3) f(_4) f(_5) f(_6)
#define VISIT_STRUCT_APPLYF7(f, _1, _2, _3, _4, _5, _6, _7) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7)
#define VISIT_STRUCT_APPLYF8(f, _1, _2, _3, _4, _5, _6, _7, _8) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8)
#define VISIT_STRUCT_APPLYF9(f, _1, _2, _3, _4, _5, _6, _7, _8, _9) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9)
#define VISIT_STRUCT_APPLYF10(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10)
#define VISIT_STRUCT_APPLYF11(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11)
#define VISIT_STRUCT_APPLYF12(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12)
#define VISIT_STRUCT_APPLYF13(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13)
#define VISIT_STRUCT_APPLYF14(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14)
#define VISIT_STRUCT_APPLYF15(f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15)
#define VISIT_STRUCT_APPLYF16( \
f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16)
#define VISIT_STRUCT_APPLYF17( \
f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17)
#define VISIT_STRUCT_APPLYF18( \
f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18)
#define VISIT_STRUCT_APPLYF19( \
f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19)
#define VISIT_STRUCT_APPLYF20( \
f, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20)
#define VISIT_STRUCT_APPLYF21(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21)
#define VISIT_STRUCT_APPLYF22(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22)
#define VISIT_STRUCT_APPLYF23(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23)
#define VISIT_STRUCT_APPLYF24(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24)
#define VISIT_STRUCT_APPLYF25(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25)
#define VISIT_STRUCT_APPLYF26(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26)
#define VISIT_STRUCT_APPLYF27(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27)
#define VISIT_STRUCT_APPLYF28(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28)
#define VISIT_STRUCT_APPLYF29(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29)
#define VISIT_STRUCT_APPLYF30(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30)
#define VISIT_STRUCT_APPLYF31(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31)
#define VISIT_STRUCT_APPLYF32(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32)
#define VISIT_STRUCT_APPLYF33(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33)
#define VISIT_STRUCT_APPLYF34(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34)
#define VISIT_STRUCT_APPLYF35(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35)
#define VISIT_STRUCT_APPLYF36(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36)
#define VISIT_STRUCT_APPLYF37(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37)
#define VISIT_STRUCT_APPLYF38(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38)
#define VISIT_STRUCT_APPLYF39(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39)
#define VISIT_STRUCT_APPLYF40(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40)
#define VISIT_STRUCT_APPLYF41(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41)
#define VISIT_STRUCT_APPLYF42(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42)
#define VISIT_STRUCT_APPLYF43(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43)
#define VISIT_STRUCT_APPLYF44(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44)
#define VISIT_STRUCT_APPLYF45(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45)
#define VISIT_STRUCT_APPLYF46(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46)
#define VISIT_STRUCT_APPLYF47(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47)
#define VISIT_STRUCT_APPLYF48(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48)
#define VISIT_STRUCT_APPLYF49(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49)
#define VISIT_STRUCT_APPLYF50(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50)
#define VISIT_STRUCT_APPLYF51(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51)
#define VISIT_STRUCT_APPLYF52(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52)
#define VISIT_STRUCT_APPLYF53(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53)
#define VISIT_STRUCT_APPLYF54(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54)
#define VISIT_STRUCT_APPLYF55(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55)
#define VISIT_STRUCT_APPLYF56(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56)
#define VISIT_STRUCT_APPLYF57(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57)
#define VISIT_STRUCT_APPLYF58(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58)
#define VISIT_STRUCT_APPLYF59(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59)
#define VISIT_STRUCT_APPLYF60(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60)
#define VISIT_STRUCT_APPLYF61(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61)
#define VISIT_STRUCT_APPLYF62(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62)
#define VISIT_STRUCT_APPLYF63(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63)
#define VISIT_STRUCT_APPLYF64(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64)
#define VISIT_STRUCT_APPLYF65(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65)
#define VISIT_STRUCT_APPLYF66(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66)
#define VISIT_STRUCT_APPLYF67(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67)
#define VISIT_STRUCT_APPLYF68(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68)
#define VISIT_STRUCT_APPLYF69(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69)
#define VISIT_STRUCT_APPLYF70(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70)
#define VISIT_STRUCT_APPLYF71(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70) f(_71)
#define VISIT_STRUCT_APPLYF72(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71, \
_72) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70) f(_71) f(_72)
#define VISIT_STRUCT_APPLYF73(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71, \
_72, \
_73) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70) f(_71) f(_72) f(_73)
#define VISIT_STRUCT_APPLYF74(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71, \
_72, \
_73, \
_74) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70) f(_71) f(_72) f(_73) f(_74)
#define VISIT_STRUCT_APPLYF75(f, \
_1, \
_2, \
_3, \
_4, \
_5, \
_6, \
_7, \
_8, \
_9, \
_10, \
_11, \
_12, \
_13, \
_14, \
_15, \
_16, \
_17, \
_18, \
_19, \
_20, \
_21, \
_22, \
_23, \
_24, \
_25, \
_26, \
_27, \
_28, \
_29, \
_30, \
_31, \
_32, \
_33, \
_34, \
_35, \
_36, \
_37, \
_38, \
_39, \
_40, \
_41, \
_42, \
_43, \
_44, \
_45, \
_46, \
_47, \
_48, \
_49, \
_50, \
_51, \
_52, \
_53, \
_54, \
_55, \
_56, \
_57, \
_58, \
_59, \
_60, \
_61, \
_62, \
_63, \
_64, \
_65, \
_66, \
_67, \
_68, \
_69, \
_70, \
_71, \
_72, \
_73, \
_74, \
_75) \
f(_1) f(_2) f(_3) f(_4) f(_5) f(_6) f(_7) f(_8) f(_9) f(_10) f(_11) f(_12) f(_13) f(_14) f(_15) \
f(_16) f(_17) f(_18) f(_19) f(_20) f(_21) f(_22) f(_23) f(_24) f(_25) f(_26) f(_27) f(_28) \
f(_29) f(_30) f(_31) f(_32) f(_33) f(_34) f(_35) f(_36) f(_37) f(_38) f(_39) f(_40) f(_41) \
f(_42) f(_43) f(_44) f(_45) f(_46) f(_47) f(_48) f(_49) f(_50) f(_51) f(_52) f(_53) f(_54) \
f(_55) f(_56) f(_57) f(_58) f(_59) f(_60) f(_61) f(_62) f(_63) f(_64) f(_65) f(_66) \
f(_67) f(_68) f(_69) f(_70) f(_71) f(_72) f(_73) f(_74) f(_75)
#define VISIT_STRUCT_APPLY_F_(M, ...) VISIT_STRUCT_EXPAND(M(__VA_ARGS__))
#define VISIT_STRUCT_PP_MAP(f, ...) \
VISIT_STRUCT_EXPAND(VISIT_STRUCT_APPLY_F_( \
VISIT_STRUCT_CONCAT(VISIT_STRUCT_APPLYF, VISIT_STRUCT_PP_NARG(__VA_ARGS__)), f, __VA_ARGS__))
/*** End generated code ***/
/***
* These macros are used with VISIT_STRUCT_PP_MAP
*/
#define VISIT_STRUCT_FIELD_COUNT(MEMBER_NAME) +1
#define VISIT_STRUCT_MEMBER_HELPER(MEMBER_NAME) \
std::forward<V>(visitor)(#MEMBER_NAME, std::forward<S>(struct_instance).MEMBER_NAME);
#define VISIT_STRUCT_MEMBER_HELPER_PTR(MEMBER_NAME) \
std::forward<V>(visitor)(#MEMBER_NAME, &this_type::MEMBER_NAME);
#define VISIT_STRUCT_MEMBER_HELPER_TYPE(MEMBER_NAME) \
std::forward<V>(visitor)(#MEMBER_NAME, visit_struct::type_c<decltype(this_type::MEMBER_NAME)>{});
#define VISIT_STRUCT_MEMBER_HELPER_ACC(MEMBER_NAME) \
std::forward<V>(visitor)( \
#MEMBER_NAME, \
visit_struct::accessor<decltype(&this_type::MEMBER_NAME), &this_type::MEMBER_NAME>{});
#define VISIT_STRUCT_MEMBER_HELPER_PAIR(MEMBER_NAME) \
std::forward<V>(visitor)( \
#MEMBER_NAME, std::forward<S1>(s1).MEMBER_NAME, std::forward<S2>(s2).MEMBER_NAME);
#define VISIT_STRUCT_MAKE_GETTERS(MEMBER_NAME) \
template <typename S> \
static VISIT_STRUCT_CONSTEXPR auto get_value( \
std::integral_constant<int, fields_enum::MEMBER_NAME>, S&& s) \
-> decltype((std::forward<S>(s).MEMBER_NAME)) { \
return std::forward<S>(s).MEMBER_NAME; \
} \
\
static VISIT_STRUCT_CONSTEXPR auto get_name( \
std::integral_constant<int, fields_enum::MEMBER_NAME>) -> decltype(#MEMBER_NAME) { \
return #MEMBER_NAME; \
} \
\
static VISIT_STRUCT_CONSTEXPR auto get_pointer( \
std::integral_constant<int, fields_enum::MEMBER_NAME>) -> decltype(&this_type::MEMBER_NAME) { \
return &this_type::MEMBER_NAME; \
} \
\
static VISIT_STRUCT_CONSTEXPR auto get_accessor( \
std::integral_constant<int, fields_enum::MEMBER_NAME>) \
-> visit_struct::accessor<decltype(&this_type::MEMBER_NAME), &this_type::MEMBER_NAME> { \
return {}; \
} \
\
static auto type_at(std::integral_constant<int, fields_enum::MEMBER_NAME>) \
-> visit_struct::type_c<decltype(this_type::MEMBER_NAME)>;
// This macro specializes the trait, provides "apply" method which does the work.
// Below, template parameter S should always be the same as STRUCT_NAME modulo const and reference.
// The interface defined above ensures that STRUCT_NAME is clean_t<S> basically.
//
// Note: The code to make the indexed getters work is more convoluted than I'd like.
// PP_MAP doesn't give you the index of each member. And rather than hack it so that it will
// do that, what we do instead is:
// 1: Declare an enum `field_enum` in the scope of visitable, which maps names to indices.
// This gives an easy way for the macro to get the index from the name token.
// 2: Intuitively we'd like to use template partial specialization to make indices map to
// values, and have a new specialization for each member. But, specializations can only
// be made at namespace scope. So to keep things tidy and contained within this trait,
// we use tag dispatch with std::integral_constant<int> instead.
#define VISITABLE_STRUCT(STRUCT_NAME, ...) \
namespace visit_struct { \
namespace traits { \
\
template <> \
struct visitable<STRUCT_NAME, void> { \
using this_type = STRUCT_NAME; \
\
static VISIT_STRUCT_CONSTEXPR auto get_name() -> decltype(#STRUCT_NAME) { \
return #STRUCT_NAME; \
} \
\
static VISIT_STRUCT_CONSTEXPR const std::size_t field_count = \
0 VISIT_STRUCT_PP_MAP(VISIT_STRUCT_FIELD_COUNT, __VA_ARGS__); \
\
template <typename V, typename S> \
VISIT_STRUCT_CXX14_CONSTEXPR static void apply(V&& visitor, S&& struct_instance) { \
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MEMBER_HELPER, __VA_ARGS__) \
} \
\
template <typename V, typename S1, typename S2> \
VISIT_STRUCT_CXX14_CONSTEXPR static void apply(V&& visitor, S1&& s1, S2&& s2) { \
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MEMBER_HELPER_PAIR, __VA_ARGS__) \
} \
\
template <typename V> \
VISIT_STRUCT_CXX14_CONSTEXPR static void visit_pointers(V&& visitor) { \
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MEMBER_HELPER_PTR, __VA_ARGS__) \
} \
\
template <typename V> \
VISIT_STRUCT_CXX14_CONSTEXPR static void visit_types(V&& visitor) { \
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MEMBER_HELPER_TYPE, __VA_ARGS__) \
} \
\
template <typename V> \
VISIT_STRUCT_CXX14_CONSTEXPR static void visit_accessors(V&& visitor) { \
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MEMBER_HELPER_ACC, __VA_ARGS__) \
} \
\
struct fields_enum { \
enum index { __VA_ARGS__ }; \
}; \
\
VISIT_STRUCT_PP_MAP(VISIT_STRUCT_MAKE_GETTERS, __VA_ARGS__) \
\
static VISIT_STRUCT_CONSTEXPR const bool value = true; \
}; \
} \
} \
static_assert(true, "")
} // end namespace visit_struct
#endif // VISIT_STRUCT_HPP_INCLUDED
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/gl.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
Napi::Function MappedGLMemory::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(
env,
"MappedGLMemory",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "MappedGLMemory"),
napi_enumerable),
InstanceAccessor("byteLength", &MappedGLMemory::size, nullptr, napi_enumerable),
InstanceAccessor("device", &MappedGLMemory::device, nullptr, napi_enumerable),
InstanceAccessor("ptr", &MappedGLMemory::ptr, nullptr, napi_enumerable),
InstanceMethod("slice", &MappedGLMemory::slice),
});
}
MappedGLMemory::MappedGLMemory(CallbackArgs const& args)
: EnvLocalObjectWrap<MappedGLMemory>(args), Memory(args) {
NODE_CUDA_EXPECT(args.IsConstructCall(), "MappedGLMemory constructor requires 'new'", args.Env());
NODE_CUDA_EXPECT(args.Length() == 0 || (args.Length() == 1 && args[0].IsNumber()),
"MappedGLMemory constructor requires a numeric byteLength argument",
args.Env());
if (args.Length() == 1 && args[0].IsNumber()) {
cudaGraphicsResource_t resource = args[0];
NODE_CUDA_TRY(cudaGraphicsResourceGetMappedPointer(&data_, &size_, resource), Env());
Napi::MemoryManagement::AdjustExternalMemory(Env(), size_);
}
}
MappedGLMemory::wrapper_t MappedGLMemory::New(Napi::Env const& env,
cudaGraphicsResource_t resource) {
return EnvLocalObjectWrap<MappedGLMemory>::New(env, reinterpret_cast<std::ptrdiff_t>(resource));
}
void MappedGLMemory::Finalize(Napi::Env env) {
data_ = nullptr;
size_ = 0;
}
Napi::Value MappedGLMemory::slice(Napi::CallbackInfo const& info) {
CallbackArgs args{info};
int64_t lhs = args.Length() > 0 ? args[0] : 0;
int64_t rhs = args.Length() > 1 ? args[1] : size_;
std::tie(lhs, rhs) = clamp_slice_args(size_, lhs, rhs);
auto copy = DeviceMemory::New(info.Env(), rhs - lhs);
if (rhs - lhs > 0) {
NODE_CUDA_TRY(cudaMemcpy(copy->base(), base() + lhs, rhs - lhs, cudaMemcpyDefault));
}
return copy;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/pinned.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
Napi::Function PinnedMemory::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(
env,
"PinnedMemory",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "PinnedMemory"),
napi_enumerable),
InstanceAccessor("byteLength", &PinnedMemory::size, nullptr, napi_enumerable),
InstanceAccessor("device", &PinnedMemory::device, nullptr, napi_enumerable),
InstanceAccessor("ptr", &PinnedMemory::ptr, nullptr, napi_enumerable),
InstanceMethod("slice", &PinnedMemory::slice),
});
}
PinnedMemory::PinnedMemory(CallbackArgs const& args)
: EnvLocalObjectWrap<PinnedMemory>(args), Memory(args) {
NODE_CUDA_EXPECT(args.IsConstructCall(), "PinnedMemory constructor requires 'new'", args.Env());
NODE_CUDA_EXPECT(args.Length() == 0 || (args.Length() == 1 && args[0].IsNumber()),
"PinnedMemory constructor requires a numeric byteLength argument",
args.Env());
size_ = args[0];
if (size_ > 0) {
NODE_CUDA_TRY(cudaMallocHost(&data_, size_));
Napi::MemoryManagement::AdjustExternalMemory(Env(), size_);
}
}
PinnedMemory::wrapper_t PinnedMemory::New(Napi::Env const& env, size_t size) {
return EnvLocalObjectWrap<PinnedMemory>::New(env, size);
}
void PinnedMemory::Finalize(Napi::Env env) {
if (data_ != nullptr && size_ > 0) {
if (cudaFreeHost(data_) == cudaSuccess) {
Napi::MemoryManagement::AdjustExternalMemory(env, -size_);
}
}
data_ = nullptr;
size_ = 0;
}
Napi::Value PinnedMemory::slice(Napi::CallbackInfo const& info) {
CallbackArgs args{info};
int64_t lhs = args.Length() > 0 ? args[0] : 0;
int64_t rhs = args.Length() > 1 ? args[1] : size_;
std::tie(lhs, rhs) = clamp_slice_args(size_, lhs, rhs);
auto copy = PinnedMemory::New(info.Env(), rhs - lhs);
if (rhs - lhs > 0) {
NODE_CUDA_TRY(cudaMemcpy(copy->base(), base() + lhs, rhs - lhs, cudaMemcpyDefault));
}
return copy;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/managed.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
Napi::Function ManagedMemory::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(
env,
"ManagedMemory",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "ManagedMemory"),
napi_enumerable),
InstanceAccessor("byteLength", &ManagedMemory::size, nullptr, napi_enumerable),
InstanceAccessor("device", &ManagedMemory::device, nullptr, napi_enumerable),
InstanceAccessor("ptr", &ManagedMemory::ptr, nullptr, napi_enumerable),
InstanceMethod("slice", &ManagedMemory::slice),
});
}
ManagedMemory::ManagedMemory(CallbackArgs const& args)
: EnvLocalObjectWrap<ManagedMemory>(args), Memory(args) {
NODE_CUDA_EXPECT(args.IsConstructCall(), "ManagedMemory constructor requires 'new'", args.Env());
NODE_CUDA_EXPECT(args.Length() == 0 || (args.Length() == 1 && args[0].IsNumber()),
"ManagedMemory constructor requires a numeric byteLength argument",
args.Env());
size_ = args[0];
if (size_ > 0) {
NODE_CUDA_TRY(cudaMallocManaged(&data_, size_));
Napi::MemoryManagement::AdjustExternalMemory(Env(), size_);
}
}
ManagedMemory::wrapper_t ManagedMemory::New(Napi::Env const& env, size_t size) {
return EnvLocalObjectWrap<ManagedMemory>::New(env, size);
}
void ManagedMemory::Finalize(Napi::Env env) {
if (data_ != nullptr && size_ > 0) {
if (cudaFree(data_) == cudaSuccess) {
Napi::MemoryManagement::AdjustExternalMemory(env, -size_);
}
}
data_ = nullptr;
size_ = 0;
}
Napi::Value ManagedMemory::slice(Napi::CallbackInfo const& info) {
CallbackArgs args{info};
int64_t lhs = args.Length() > 0 ? args[0] : 0;
int64_t rhs = args.Length() > 1 ? args[1] : size_;
std::tie(lhs, rhs) = clamp_slice_args(size_, lhs, rhs);
auto copy = ManagedMemory::New(info.Env(), rhs - lhs);
if (rhs - lhs > 0) {
NODE_CUDA_TRY(cudaMemcpy(copy->base(), base() + lhs, rhs - lhs, cudaMemcpyDefault));
}
return copy;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/ipc.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
Napi::Function IpcMemory::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(env,
"IPCMemory",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "IPCMemory"),
napi_enumerable),
InstanceAccessor("byteLength", &IpcMemory::size, nullptr, napi_enumerable),
InstanceAccessor("device", &IpcMemory::device, nullptr, napi_enumerable),
InstanceAccessor("ptr", &IpcMemory::ptr, nullptr, napi_enumerable),
InstanceMethod("slice", &IpcMemory::slice),
InstanceMethod("close", &IpcMemory::close),
});
}
IpcMemory::IpcMemory(CallbackArgs const& args) : EnvLocalObjectWrap<IpcMemory>(args), Memory(args) {
if (args.Length() == 1) {
cudaIpcMemHandle_t const handle = args[0];
NODE_CUDA_TRY(cudaIpcOpenMemHandle(&data_, handle, cudaIpcMemLazyEnablePeerAccess), Env());
NODE_CU_TRY(cuMemGetAddressRange(nullptr, &size_, ptr()), Env());
Napi::MemoryManagement::AdjustExternalMemory(Env(), size_);
}
}
IpcMemory::wrapper_t IpcMemory::New(Napi::Env const& env, cudaIpcMemHandle_t const& handle) {
return EnvLocalObjectWrap<IpcMemory>::New(
env, {Napi::External<cudaIpcMemHandle_t>::New(env, const_cast<cudaIpcMemHandle_t*>(&handle))});
}
void IpcMemory::Finalize(Napi::Env env) { close(env); }
void IpcMemory::close() { close(Env()); }
void IpcMemory::close(Napi::Env const& env) {
if (data_ != nullptr && size_ > 0) {
if (cudaIpcCloseMemHandle(data_) == cudaSuccess) {
Napi::MemoryManagement::AdjustExternalMemory(env, -size_);
}
}
data_ = nullptr;
size_ = 0;
}
void IpcMemory::close(Napi::CallbackInfo const& info) { close(info.Env()); }
Napi::Value IpcMemory::slice(Napi::CallbackInfo const& info) {
CallbackArgs args{info};
int64_t lhs = args.Length() > 0 ? args[0] : 0;
int64_t rhs = args.Length() > 1 ? args[1] : size_;
std::tie(lhs, rhs) = clamp_slice_args(size_, lhs, rhs);
auto copy = DeviceMemory::New(info.Env(), rhs - lhs);
if (rhs - lhs > 0) {
NODE_CUDA_TRY(cudaMemcpy(copy->base(), base() + lhs, rhs - lhs, cudaMemcpyDefault));
}
return copy;
}
Napi::Function IpcHandle::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(env,
"IpcHandle",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "IpcHandle"),
napi_enumerable),
InstanceAccessor("buffer", &IpcHandle::buffer, nullptr, napi_enumerable),
InstanceAccessor("device", &IpcHandle::device, nullptr, napi_enumerable),
InstanceAccessor("handle", &IpcHandle::handle, nullptr, napi_enumerable),
});
};
IpcHandle::IpcHandle(CallbackArgs const& args) : EnvLocalObjectWrap<IpcHandle>(args) {
DeviceMemory::wrapper_t dmem = args[0].ToObject();
dmem_ = Napi::Persistent(dmem);
handle_ = Napi::Persistent(dmem->getIpcMemHandle());
}
IpcHandle::wrapper_t IpcHandle::New(Napi::Env const& env, DeviceMemory const& dmem) {
return EnvLocalObjectWrap<IpcHandle>::New(env, dmem.Value());
}
Napi::Value IpcHandle::buffer(Napi::CallbackInfo const& info) { return dmem_.Value(); }
Napi::Value IpcHandle::device(Napi::CallbackInfo const& info) { return CPPToNapi(info)(device()); }
Napi::Value IpcHandle::handle(Napi::CallbackInfo const& info) { return handle_.Value(); }
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/device.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
Napi::Function DeviceMemory::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(
env,
"DeviceMemory",
{
InstanceValue(Napi::Symbol::WellKnown(env, "toStringTag"),
Napi::String::New(env, "DeviceMemory"),
napi_enumerable),
InstanceAccessor("byteLength", &DeviceMemory::size, nullptr, napi_enumerable),
InstanceAccessor("device", &DeviceMemory::device, nullptr, napi_enumerable),
InstanceAccessor("ptr", &DeviceMemory::ptr, nullptr, napi_enumerable),
InstanceMethod("slice", &DeviceMemory::slice),
});
}
DeviceMemory::DeviceMemory(CallbackArgs const& args)
: EnvLocalObjectWrap<DeviceMemory>(args), Memory(args) {
NODE_CUDA_EXPECT(args.IsConstructCall(), "DeviceMemory constructor requires 'new'", args.Env());
NODE_CUDA_EXPECT(args.Length() == 0 || (args.Length() == 1 && args[0].IsNumber()),
"DeviceMemory constructor requires a numeric byteLength argument",
args.Env());
size_ = args[0];
if (size_ > 0) {
NODE_CUDA_TRY(cudaMalloc(&data_, size_));
Napi::MemoryManagement::AdjustExternalMemory(Env(), size_);
}
}
DeviceMemory::wrapper_t DeviceMemory::New(Napi::Env const& env, std::size_t size) {
return EnvLocalObjectWrap<DeviceMemory>::New(env, size);
}
void DeviceMemory::Finalize(Napi::Env env) {
if (data_ != nullptr && size_ > 0) {
if (cudaFree(data_) == cudaSuccess) {
Napi::MemoryManagement::AdjustExternalMemory(env, -size_);
}
}
ipcMemHandle_.Reset();
data_ = nullptr;
size_ = 0;
}
Napi::Uint8Array DeviceMemory::getIpcMemHandle() {
Napi::Env env = Env();
if (data_ != nullptr && ipcMemHandle_.IsEmpty()) {
auto ary = Napi::Uint8Array::New(env, CUDA_IPC_HANDLE_SIZE);
auto ptr = reinterpret_cast<cudaIpcMemHandle_t*>(ary.Data());
NODE_CUDA_TRY(CUDAAPI::cudaIpcGetMemHandle(ptr, data_), env);
ipcMemHandle_ = Napi::Persistent(ary);
}
return ipcMemHandle_.Value();
}
Napi::Value DeviceMemory::slice(Napi::CallbackInfo const& info) {
CallbackArgs args{info};
int64_t lhs = args.Length() > 0 ? args[0] : 0;
int64_t rhs = args.Length() > 1 ? args[1] : size_;
std::tie(lhs, rhs) = clamp_slice_args(size_, lhs, rhs);
auto copy = DeviceMemory::New(info.Env(), rhs - lhs);
if (rhs - lhs > 0) {
NODE_CUDA_TRY(cudaMemcpy(copy->base(), base() + lhs, rhs - lhs, cudaMemcpyDefault));
}
return copy;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/memory/memory.cpp
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuda/memory.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
#include <nv_node/macros.hpp>
namespace nv {
/**
* @brief Check whether an Napi value is an instance of `Memory`.
*
* @param val The Napi::Value to test
* @return true if the value is a `Memory`
* @return false if the value is not a `Memory`
*/
bool Memory::IsInstance(Napi::Value const& value) {
return IpcMemory::IsInstance(value) or //
DeviceMemory::IsInstance(value) or //
ManagedMemory::IsInstance(value) or //
PinnedMemory::IsInstance(value) or //
MappedGLMemory::IsInstance(value);
}
namespace {
void cudaMemsetNapi(CallbackArgs const& args) {
Span<char> target = args[0];
int32_t value = args[1];
size_t count = args[2];
if (args.Length() == 3) {
NODE_CUDA_TRY(cudaMemset(target.data(), value, count));
} else {
cudaStream_t stream = args[3];
NODE_CUDA_TRY(cudaMemsetAsync(target.data(), value, count, stream));
}
}
void cudaMemcpyNapi(CallbackArgs const& args) {
Span<char> target = args[0];
Span<char> source = args[1];
size_t count = args[2];
if (args.Length() == 3) {
NODE_CUDA_TRY(cudaMemcpy(target.data(), source.data(), count, cudaMemcpyDefault), args.Env());
} else {
cudaStream_t stream = args[3];
NODE_CUDA_TRY(cudaMemcpyAsync(target.data(), source.data(), count, cudaMemcpyDefault, stream),
args.Env());
}
}
// CUresult cudaMemGetInfo(size_t * free, size_t * total);
Napi::Value cudaMemGetInfoNapi(CallbackArgs const& args) {
size_t free, total;
NODE_CUDA_TRY(CUDARTAPI::cudaMemGetInfo(&free, &total), args.Env());
return CPPToNapi(args)(std::vector<size_t>{free, total},
std::vector<std::string>{"free", "total"});
}
Napi::Value cudaPointerGetAttributesNapi(CallbackArgs const& args) {
auto env = args.Env();
void* dptr = args[0];
CUDARTAPI::cudaPointerAttributes attrs{};
NODE_CUDA_TRY(CUDARTAPI::cudaPointerGetAttributes(&attrs, dptr), env);
auto obj = Napi::Object::New(env);
obj.Set("type", attrs.type);
obj.Set("device", attrs.device);
obj.Set("hptr", reinterpret_cast<uint64_t>(attrs.hostPointer));
obj.Set("dptr", reinterpret_cast<uint64_t>(attrs.devicePointer));
return obj;
}
// CUresult cuPointerGetAttribute(void *data, CUpointer_attribute attribute,
// CUdeviceptr ptr);
Napi::Value cuPointerGetAttributeNapi(CallbackArgs const& args) {
auto env = args.Env();
CUdeviceptr dptr = args[0];
CUpointer_attribute attribute = args[1];
switch (attribute) {
case CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:
case CU_POINTER_ATTRIBUTE_IS_MANAGED: {
bool data;
NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
return CPPToNapi(args)(data);
}
case CU_POINTER_ATTRIBUTE_CONTEXT: {
CUcontext data;
NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
return CPPToNapi(args)(data);
}
case CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL:
case CU_POINTER_ATTRIBUTE_MEMORY_TYPE: {
uint32_t data;
NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
return CPPToNapi(args)(data);
}
case CU_POINTER_ATTRIBUTE_BUFFER_ID: {
uint64_t data;
NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
return CPPToNapi(args)(data);
}
// case CU_POINTER_ATTRIBUTE_DEVICE_POINTER: {
// size_t size;
// CUdeviceptr base;
// CUdeviceptr data;
// NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
// NODE_CU_TRY(cuMemGetAddressRange(&base, &size, dptr), env);
// return CPPToNapi(args)(reinterpret_cast<size_t>(base));
// }
case CU_POINTER_ATTRIBUTE_HOST_POINTER: {
size_t size;
CUdeviceptr base;
char* data{nullptr};
NODE_CU_TRY(cuPointerGetAttribute(&data, attribute, dptr), env);
NODE_CU_TRY(cuMemGetAddressRange(&base, &size, dptr), env);
return CPPToNapi(args)({data, size - (dptr - base)});
}
// todo?
case CU_POINTER_ATTRIBUTE_P2P_TOKENS: break;
default: NODE_CUDA_THROW(cudaErrorNotSupported, env);
}
return env.Undefined();
}
} // namespace
namespace memory {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime) {
// nv::PinnedMemory::Init(env, exports);
// nv::DeviceMemory::Init(env, exports);
// nv::ManagedMemory::Init(env, exports);
// nv::IpcMemory::Init(env, exports);
// nv::IpcHandle::Init(env, exports);
// nv::MappedGLMemory::Init(env, exports);
EXPORT_FUNC(env, runtime, "cudaMemset", cudaMemsetNapi);
EXPORT_FUNC(env, runtime, "cudaMemcpy", cudaMemcpyNapi);
EXPORT_FUNC(env, runtime, "cudaMemGetInfo", cudaMemGetInfoNapi);
EXPORT_FUNC(env, runtime, "cudaPointerGetAttributes", cudaPointerGetAttributesNapi);
EXPORT_FUNC(env, driver, "cuPointerGetAttribute", cuPointerGetAttributeNapi);
auto PointerAttributes = Napi::Object::New(env);
EXPORT_ENUM(env, PointerAttributes, "CONTEXT", CU_POINTER_ATTRIBUTE_CONTEXT);
EXPORT_ENUM(env, PointerAttributes, "MEMORY_TYPE", CU_POINTER_ATTRIBUTE_MEMORY_TYPE);
EXPORT_ENUM(env, PointerAttributes, "DEVICE_POINTER", CU_POINTER_ATTRIBUTE_DEVICE_POINTER);
EXPORT_ENUM(env, PointerAttributes, "HOST_POINTER", CU_POINTER_ATTRIBUTE_HOST_POINTER);
// EXPORT_ENUM(env, PointerAttributes, "P2P_TOKENS", CU_POINTER_ATTRIBUTE_P2P_TOKENS);
EXPORT_ENUM(env, PointerAttributes, "SYNC_MEMOPS", CU_POINTER_ATTRIBUTE_SYNC_MEMOPS);
EXPORT_ENUM(env, PointerAttributes, "BUFFER_ID", CU_POINTER_ATTRIBUTE_BUFFER_ID);
EXPORT_ENUM(env, PointerAttributes, "IS_MANAGED", CU_POINTER_ATTRIBUTE_IS_MANAGED);
EXPORT_ENUM(env, PointerAttributes, "DEVICE_ORDINAL", CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL);
EXPORT_PROP(driver, "PointerAttributes", PointerAttributes);
return exports;
}
} // namespace memory
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/memory.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include <nv_node/objectwrap.hpp>
#include <nv_node/utilities/args.hpp>
#include <cuda_runtime_api.h>
#include <napi.h>
#include <cstdint>
#include <tuple>
namespace nv {
/**
* @brief Base class for an owning wrapper around a memory allocation.
*
*/
struct Memory {
/**
* @brief Construct a new Memory instance from JavaScript.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
Memory(Napi::CallbackInfo const& args) {}
inline void* data() const { return data_; }
inline size_t size() const { return size_; }
inline int32_t device() const { return device_id_; }
inline uint8_t* base() const { return reinterpret_cast<uint8_t*>(data_); }
inline uintptr_t ptr() const { return reinterpret_cast<uintptr_t>(data_); }
/**
* @brief Check whether an Napi value is an instance of `Memory`.
*
* @param val The Napi::Value to test
* @return true if the value is a `Memory`
* @return false if the value is not a `Memory`
*/
static bool IsInstance(Napi::Value const& value);
protected:
Napi::Value device(Napi::CallbackInfo const& info) { return CPPToNapi(info)(device()); }
Napi::Value ptr(Napi::CallbackInfo const& info) { return CPPToNapi(info)(ptr()); }
Napi::Value size(Napi::CallbackInfo const& info) { return CPPToNapi(info)(size_); }
inline std::pair<int64_t, int64_t> clamp_slice_args(int64_t len, int64_t lhs, int64_t rhs) {
// Adjust args similar to Array.prototype.slice. Normalize begin/end to
// clamp between 0 and length, and wrap around on negative indices, e.g.
// slice(-1, 5) or slice(5, -1)
//
// wrap around on negative start/end positions
if (lhs < 0) { lhs = ((lhs % len) + len) % len; }
if (rhs < 0) { rhs = ((rhs % len) + len) % len; }
// enforce lhs <= rhs and rhs <= count
return rhs < lhs ? std::make_pair(rhs, lhs) : std::make_pair(lhs, rhs > len ? len : rhs);
}
void* data_{nullptr}; ///< Pointer to memory allocation
size_t size_{0}; ///< Requested size of the memory allocation
int32_t device_id_{0};
};
/**
* @brief An owning wrapper around a pinned host memory allocation.
*
*/
struct PinnedMemory : public EnvLocalObjectWrap<PinnedMemory>, public Memory {
using EnvLocalObjectWrap<PinnedMemory>::IsInstance;
/**
* @brief Initialize and export the PinnedMemory JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The PinnedMemory constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new PinnedMemory instance from C++.
*
* @param size Size in bytes to allocate in pinned host memory.
*/
static wrapper_t New(Napi::Env const& env, size_t size);
/**
* @brief Constructs a new PinnedMemory instance.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
PinnedMemory(CallbackArgs const& args);
/**
* @brief Destructor called when the JavaScript VM garbage collects this PinnedMemory instance.
*
* @param env The active JavaScript environment.
*/
void Finalize(Napi::Env env) override;
private:
Napi::Value slice(Napi::CallbackInfo const& info);
};
/**
* @brief An owning wrapper around a device memory allocation.
*
*/
struct DeviceMemory : public EnvLocalObjectWrap<DeviceMemory>, public Memory {
using EnvLocalObjectWrap<DeviceMemory>::IsInstance;
/**
* @brief Initialize and export the DeviceMemory JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The DeviceMemory constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new DeviceMemory instance from C++.
*
* @param size Size in bytes to allocate in device memory.
*/
static wrapper_t New(Napi::Env const& env, std::size_t size);
/**
* @brief Constructs a new DeviceMemory instance.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
DeviceMemory(CallbackArgs const& args);
DeviceMemory(const nv::DeviceMemory& other)
: EnvLocalObjectWrap<DeviceMemory>({other.Env(), {}}), Memory({other.Env(), {}}) {
data_ = other.data_;
size_ = other.size_;
device_id_ = other.device_id_;
ipcMemHandle_ = Napi::Persistent(other.ipcMemHandle_.Value());
}
/**
* @brief Destructor called when the JavaScript VM garbage collects this DeviceMemory instance.
*
* @param env The active JavaScript environment.
*/
void Finalize(Napi::Env env) override;
Napi::Uint8Array getIpcMemHandle();
private:
Napi::Value slice(Napi::CallbackInfo const& info);
Napi::Reference<Napi::Uint8Array> ipcMemHandle_;
};
/**
* @brief An owning wrapper around a CUDA managed memory allocation.
*
*/
struct ManagedMemory : public EnvLocalObjectWrap<ManagedMemory>, public Memory {
using EnvLocalObjectWrap<ManagedMemory>::IsInstance;
/**
* @brief Initialize and export the ManagedMemory JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The ManagedMemory constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new ManagedMemory instance from C++.
*
* @param size Size in bytes to allocate in CUDA managed memory.
*/
static wrapper_t New(Napi::Env const& env, size_t size);
/**
* @brief Constructs a new ManagedMemory instance.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
ManagedMemory(CallbackArgs const& args);
/**
* @brief Destructor called when the JavaScript VM garbage collects this ManagedMemory instance.
*
* @param env The active JavaScript environment.
*/
void Finalize(Napi::Env env) override;
private:
Napi::Value slice(Napi::CallbackInfo const& info);
};
/**
* @brief An owning wrapper around a CUDA device memory allocation shared by another process.
*
*/
struct IpcMemory : public EnvLocalObjectWrap<IpcMemory>, public Memory {
using EnvLocalObjectWrap<IpcMemory>::IsInstance;
/**
* @brief Initialize and export the IPCMemory JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The IpcMemory constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new IPCMemory instance from C++.
*
* @param handle Handle to the device memory shared by another process.
*/
static wrapper_t New(Napi::Env const& env, cudaIpcMemHandle_t const& handle);
/**
* @brief Constructs a new IPCMemory instance.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
IpcMemory(CallbackArgs const& args);
/**
* @brief Destructor called when the JavaScript VM garbage collects this IPCMemory instance.
*
* @param env The active JavaScript environment.
*/
void Finalize(Napi::Env env) override;
/**
* @brief Close the underlying IPC memory handle, allowing the exporting process to free the
* underlying device memory.
*
*/
void close();
void close(Napi::Env const& env);
private:
void close(Napi::CallbackInfo const& info);
Napi::Value slice(Napi::CallbackInfo const& info);
};
struct IpcHandle : public EnvLocalObjectWrap<IpcHandle> {
using EnvLocalObjectWrap<IpcHandle>::IsInstance;
/**
* @brief Initialize and export the IpcHandle JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The IpcHandle constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new IpcHandle instance from C++.
*
* @param dmem Device memory for which to create an IPC memory handle.
*/
static wrapper_t New(Napi::Env const& env, DeviceMemory const& dmem);
/**
* @brief Constructs a new IpcHandle instance.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
IpcHandle(CallbackArgs const& args);
inline int32_t device() const {
if (!dmem_.IsEmpty()) { //
return dmem_.Value()->device();
}
return -1;
}
inline cudaIpcMemHandle_t* handle() const {
return reinterpret_cast<cudaIpcMemHandle_t*>(handle_.Value().Data());
}
private:
Napi::Reference<Wrapper<DeviceMemory>> dmem_;
Napi::Reference<Napi::Uint8Array> handle_;
Napi::Value buffer(Napi::CallbackInfo const& info);
Napi::Value device(Napi::CallbackInfo const& info);
Napi::Value handle(Napi::CallbackInfo const& info);
};
/**
* @brief An owning wrapper around a CUDA managed memory allocation.
*
*/
struct MappedGLMemory : public EnvLocalObjectWrap<MappedGLMemory>, public Memory {
using EnvLocalObjectWrap<MappedGLMemory>::IsInstance;
/**
* @brief Initialize and export the MappedGLMemory JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The MappedGLMemory constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new MappedGLMemory instance from C++.
*
* @param resource The registered CUDA Graphics Resource for an OpenGL buffer.
*/
static wrapper_t New(Napi::Env const& env, cudaGraphicsResource_t resource);
/**
* @brief Construct a new MappedGLMemory instance from JavaScript.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
MappedGLMemory(CallbackArgs const& args);
/**
* @brief Destructor called when the JavaScript VM garbage collects this MappedGLMemory instance.
*
* @param env The active JavaScript environment.
*/
void Finalize(Napi::Env env) override;
private:
Napi::Value slice(Napi::CallbackInfo const& info);
};
} // namespace nv
namespace Napi {
template <>
inline Value Value::From(napi_env env, nv::DeviceMemory const& mem) {
return mem.operator nv::DeviceMemory::wrapper_t();
}
template <>
inline Value Value::From(napi_env env, nv::ManagedMemory const& mem) {
return mem.operator nv::ManagedMemory::wrapper_t();
}
template <>
inline Value Value::From(napi_env env, nv::PinnedMemory const& mem) {
return mem.operator nv::PinnedMemory::wrapper_t();
}
template <>
inline Value Value::From(napi_env env, nv::IpcMemory const& mem) {
return mem.operator nv::IpcMemory::wrapper_t();
}
template <>
inline Value Value::From(napi_env env, nv::IpcHandle const& mem) {
return mem.operator nv::IpcHandle::wrapper_t();
}
template <>
inline Value Value::From(napi_env env, nv::MappedGLMemory const& mem) {
return mem.operator nv::MappedGLMemory::wrapper_t();
}
} // namespace Napi
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/device.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "node_cuda/utilities/error.hpp"
#include <nv_node/objectwrap.hpp>
#include <nv_node/utilities/args.hpp>
#include <cuda_runtime_api.h>
#include <napi.h>
#include <cstddef>
#include <cstdint>
namespace nv {
struct Device : public EnvLocalObjectWrap<Device> {
/**
* @brief Initialize the Device JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The Device constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new Device instance from C++.
*
* @param id The zero-based CUDA device ordinal.
* @param flags Flags for the device's primary context.
*/
static wrapper_t New(Napi::Env const& env,
int32_t id = active_device_id(),
uint32_t flags = cudaDeviceScheduleAuto);
/**
* @brief Retrieve the id of the current CUDA device for this thread.
*
* @return int32_t The CUDA device id.
*/
static int32_t active_device_id() {
int32_t device{};
NODE_CUDA_TRY(cudaGetDevice(&device));
return device;
}
/**
* @brief Retrieve the number of compute-capable CUDA devices.
*
* @return int32_t The number of compute-capable CUDA devices.
*/
static int32_t get_num_devices() {
int32_t count{};
NODE_CUDA_TRY(cudaGetDeviceCount(&count));
return count;
}
template <typename Function>
static inline void call_in_context(Napi::Env const& env,
int32_t new_device_id,
Function do_work) {
auto cur_device_id = active_device_id();
auto change_device = [&](int32_t cur_id, int32_t new_id) {
if (cur_id != new_id) { //
NODE_CUDA_TRY(cudaSetDevice(new_id), env);
}
};
change_device(cur_device_id, new_device_id);
try {
do_work();
} catch (std::exception const& e) {
change_device(new_device_id, cur_device_id);
throw;
}
change_device(new_device_id, cur_device_id);
}
/**
* @brief Construct a new Device instance from JavaScript.
*
* @param args The JavaScript arguments list wrapped in a conversion helper.
*/
Device(CallbackArgs const& args);
/**
* @brief Initialize the Device instance created by either C++ or JavaScript.
*
* @param id The zero-based CUDA device id.
* @param flags Flags for the device's primary context.
*/
void Initialize(Napi::Env const& env,
int32_t id = active_device_id(),
uint32_t flags = cudaDeviceScheduleAuto);
/**
* @brief Destroy all allocations and reset all state on the current
* device in the current process. Resets the device with the specified
* device flags.
*
* Explicitly destroys and cleans up all resources associated with the
* current device in the current process. Any subsequent API call to
* this device will reinitialize the device.
*
* Note that this function will reset the device immediately. It is the
* caller's responsibility to ensure that the device is not being accessed
* by any other host threads from the process when this function is called.
*
* @return Device const&
*/
Device& reset();
/**
* @brief Set this device to be used for GPU executions.
*
* Sets this device as the current device for the calling host thread.
*
* Any device memory subsequently allocated from this host thread
* will be physically resident on this device. Any host memory allocated
* from this host thread will have its lifetime associated with this
* device. Any streams or events created from this host thread will
* be associated with this device. Any kernels launched from this host
* thread will be executed on this device.
*
* This call may be made from any host thread, to any device, and at
* any time. This function will do no synchronization with the previous
* or new device, and should be considered a very low overhead call.
*
* @return Device const&
*/
Device& activate();
/**
* @brief Wait for this compute device to finish.
*
* Blocks execution of further device calls until the device has completed
* all preceding requested tasks.
*
* @throw an error if one of the preceding tasks has failed. If the
* `cudaDeviceScheduleBlockingSync` flag was set for this device, the
* host thread will block until the device has finished its work.
*
* @return Device const&
*/
Device& synchronize(Napi::Env const& env);
/**
* @brief Get the flags for the device's primary context.
*
* @return uint32_t Flags for the device's primary context.
*/
uint32_t get_flags();
/**
* @brief Set the flags for the device's primary context.
*
* @param new_flags New flags for the device's primary context.
*/
void set_flags(Napi::Env const& env, uint32_t new_flags);
/**
* @brief Queries if a device may directly access a peer device's memory.
*
* If direct access of `peer` from this device is possible, then
* access may be enabled on two specific contexts by calling
* `enable_peer_access`.
*
* @param peer
* @return bool
*/
bool can_access_peer_device(Napi::Env const& env, Device const& peer) const;
/**
* @brief Enables direct access to memory allocations in a peer device.
*
* @param peer
* @return Device const&
*/
Device& enable_peer_access(Napi::Env const& env, Device const& peer);
/**
* @brief Disables direct access to memory allocations in a peer device and unregisters any
* registered allocations.
*
* @param peer
* @return Device const&
*/
Device& disable_peer_access(Napi::Env const& env, Device const& peer);
int32_t id() const { return id_; }
cudaDeviceProp const& props() const { return props_; }
std::string const& pci_bus_name() const { return pci_bus_name_; }
private:
int32_t id_{}; ///< The CUDA device identifer
cudaDeviceProp props_; ///< The CUDA device properties
std::string pci_bus_name_; ///< The CUDA device PCI bus id string
template <typename Function>
inline void call_in_context(Function do_work) {
Device::call_in_context(Env(), id(), do_work);
}
static Napi::Value get_num_devices(Napi::CallbackInfo const& info);
static Napi::Value active_device_id(Napi::CallbackInfo const& info);
Napi::Value reset(Napi::CallbackInfo const& info);
Napi::Value activate(Napi::CallbackInfo const& info);
Napi::Value get_flags(Napi::CallbackInfo const& info);
Napi::Value set_flags(Napi::CallbackInfo const& info);
Napi::Value synchronize(Napi::CallbackInfo const& info);
Napi::Value get_properties(Napi::CallbackInfo const& info);
Napi::Value can_access_peer_device(Napi::CallbackInfo const& info);
Napi::Value enable_peer_access(Napi::CallbackInfo const& info);
Napi::Value disable_peer_access(Napi::CallbackInfo const& info);
Napi::Value call_in_device_context(Napi::CallbackInfo const& info);
Napi::Value id(Napi::CallbackInfo const& info);
Napi::Value pci_bus_name(Napi::CallbackInfo const& info);
};
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/array.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <nv_node/objectwrap.hpp>
#include <nv_node/utilities/args.hpp>
#include <cuda_runtime_api.h>
#include <napi.h>
namespace nv {
enum class array_type : uint8_t { CUDA = 0, IPC = 1, GL = 2 };
struct CUDAArray : public EnvLocalObjectWrap<CUDAArray> {
/**
* @brief Initialize and export the CUDAArray JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The CUDAArray constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new CUDAArray instance from C++.
*/
static wrapper_t New(Napi::Env const& env,
cudaArray_t const& array,
cudaExtent const& extent,
cudaChannelFormatDesc const& channelFormatDesc,
uint32_t flags = 0,
array_type type = array_type::CUDA);
CUDAArray(CallbackArgs const& args);
cudaArray_t Array() { return array_; }
cudaExtent& Extent() { return extent_; }
cudaChannelFormatDesc& ChannelFormatDesc() { return channelFormatDesc_; }
uint32_t Flags() { return flags_; }
uint32_t Width() { return std::max(Extent().width, size_t{1}); }
uint32_t Height() { return std::max(Extent().height, size_t{1}); }
uint32_t Depth() { return std::max(Extent().depth, size_t{1}); }
uint8_t BytesPerElement() {
auto x = ChannelFormatDesc().x;
auto y = ChannelFormatDesc().y;
auto z = ChannelFormatDesc().z;
auto w = ChannelFormatDesc().w;
return (x + y + z + w) >> 3;
}
private:
Napi::Value GetPointer(Napi::CallbackInfo const& info);
Napi::Value GetByteLength(Napi::CallbackInfo const& info);
Napi::Value GetBytesPerElement(Napi::CallbackInfo const& info);
// Napi::Value CopySlice(Napi::CallbackInfo const& info);
Napi::Value GetWidth(Napi::CallbackInfo const& info);
Napi::Value GetHeight(Napi::CallbackInfo const& info);
Napi::Value GetDepth(Napi::CallbackInfo const& info);
Napi::Value GetChannelFormatX(Napi::CallbackInfo const& info);
Napi::Value GetChannelFormatY(Napi::CallbackInfo const& info);
Napi::Value GetChannelFormatZ(Napi::CallbackInfo const& info);
Napi::Value GetChannelFormatW(Napi::CallbackInfo const& info);
Napi::Value GetChannelFormatKind(Napi::CallbackInfo const& info);
cudaArray_t array_;
uint32_t flags_;
array_type type_;
cudaExtent extent_;
cudaChannelFormatDesc channelFormatDesc_;
};
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/types.hpp
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cuda_runtime_api.h>
#include <cstdlib>
#include "visit_struct/visit_struct.hpp"
VISITABLE_STRUCT(CUDARTAPI::cudaPointerAttributes, type, device, devicePointer, hostPointer);
static_assert(visit_struct::traits::is_visitable<CUDARTAPI::cudaPointerAttributes>::value, "");
VISITABLE_STRUCT(CUDARTAPI::cudaDeviceProp,
name,
// uuid,
totalGlobalMem,
sharedMemPerBlock,
regsPerBlock,
warpSize,
memPitch,
maxThreadsPerBlock,
maxThreadsDim,
maxGridSize,
clockRate,
totalConstMem,
major,
minor,
textureAlignment,
texturePitchAlignment,
deviceOverlap,
multiProcessorCount,
kernelExecTimeoutEnabled,
integrated,
canMapHostMemory,
computeMode,
maxTexture1D,
maxTexture1DMipmap,
maxTexture1DLinear,
maxTexture2D,
maxTexture2DMipmap,
maxTexture2DLinear,
maxTexture2DGather,
maxTexture3D,
maxTexture3DAlt,
maxTextureCubemap,
maxTexture1DLayered,
maxTexture2DLayered,
maxTextureCubemapLayered,
maxSurface1D,
maxSurface2D,
maxSurface3D,
maxSurface1DLayered,
maxSurface2DLayered,
maxSurfaceCubemap,
maxSurfaceCubemapLayered,
surfaceAlignment,
concurrentKernels,
ECCEnabled,
pciBusID,
pciDeviceID,
pciDomainID,
tccDriver,
asyncEngineCount,
unifiedAddressing,
memoryClockRate,
memoryBusWidth,
l2CacheSize,
maxThreadsPerMultiProcessor,
streamPrioritiesSupported,
globalL1CacheSupported,
localL1CacheSupported,
sharedMemPerMultiprocessor,
regsPerMultiprocessor,
managedMemory,
isMultiGpuBoard,
multiGpuBoardGroupID,
hostNativeAtomicSupported,
singleToDoublePrecisionPerfRatio,
pageableMemoryAccess,
concurrentManagedAccess,
computePreemptionSupported,
canUseHostPointerForRegisteredMem,
cooperativeLaunch,
cooperativeMultiDeviceLaunch,
sharedMemPerBlockOptin,
pageableMemoryAccessUsesHostPageTables,
directManagedMemAccessFromHost);
static_assert(visit_struct::traits::is_visitable<CUDARTAPI::cudaDeviceProp>::value, "");
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/addon.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <napi.h>
namespace nv {
namespace gl {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace gl
namespace kernel {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace kernel
namespace math {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace math
namespace memory {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace memory
namespace program {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace program
namespace stream {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace stream
namespace texture {
Napi::Object initModule(Napi::Env const& env,
Napi::Object exports,
Napi::Object driver,
Napi::Object runtime);
} // namespace texture
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/math.hpp
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <napi.h>
#include <cmath>
#include <cstdint>
#include <random>
#include <nv_node/utilities/args.hpp>
#include "node_cuda/utilities/cpp_to_napi.hpp"
#include "node_cuda/utilities/napi_to_cpp.hpp"
namespace nv {
namespace math {
template <typename F>
inline Napi::Value dispatch(F f, CallbackArgs const& info) {
return info[0].val.IsBigInt() ? CPPToNapi(info)(f.template operator()<int64_t>(info))
: CPPToNapi(info)(f.template operator()<double>(info));
}
struct calc_abs {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::abs(info[0].operator T());
}
};
struct calc_acos {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::acos(info[0].operator T());
}
};
struct calc_asin {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::asin(info[0].operator T());
}
};
struct calc_atan {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::atan(info[0].operator T());
}
};
struct calc_atan2 {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::atan2(info[0].operator T(), info[1].operator T());
}
};
struct calc_ceil {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::ceil(info[0].operator T());
}
};
struct calc_cos {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::cos(info[0].operator T());
}
};
struct calc_exp {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::exp(info[0].operator T());
}
};
struct calc_floor {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::floor(info[0].operator T());
}
};
struct calc_log {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::log(info[0].operator T());
}
};
struct calc_max {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::max(info[0].operator T(), info[1].operator T());
}
};
struct calc_min {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::min(info[0].operator T(), info[1].operator T());
}
};
struct calc_pow {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::pow(info[0].operator T(), info[1].operator T());
}
};
struct calc_round {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::round(info[0].operator T());
}
};
struct calc_sin {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::sin(info[0].operator T());
}
};
struct calc_sqrt {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::sqrt(info[0].operator T());
}
};
struct calc_tan {
template <typename T>
inline auto operator()(CallbackArgs const& info) {
return std::tan(info[0].operator T());
}
};
} // namespace math
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src/node_cuda
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/utilities/napi_to_cpp.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "node_cuda/device.hpp"
#include "node_cuda/memory.hpp"
#include "node_cuda/types.hpp"
#include "visit_struct/visit_struct.hpp"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <napi.h>
#include <nv_node/utilities/napi_to_cpp.hpp>
#include <type_traits>
namespace nv {
//
// CUDA Driver type conversion helpers
//
template <>
inline NapiToCPP::operator CUresult() const {
return static_cast<CUresult>(this->operator int64_t());
}
template <>
inline NapiToCPP::operator CUfunction() const {
return reinterpret_cast<CUfunction>(this->operator char*());
}
template <>
inline NapiToCPP::operator CUdevice_attribute() const {
return static_cast<CUdevice_attribute>(this->operator int64_t());
}
template <>
inline NapiToCPP::operator CUpointer_attribute() const {
return static_cast<CUpointer_attribute>(this->operator int64_t());
}
//
// CUDA Runtime type conversion helpers
//
template <>
inline NapiToCPP::operator cudaArray_t() const {
return reinterpret_cast<cudaArray_t>(this->operator char*());
}
template <>
inline NapiToCPP::operator cudaGraphicsResource_t() const {
return reinterpret_cast<cudaGraphicsResource_t>(this->operator char*());
}
template <>
inline NapiToCPP::operator cudaIpcMemHandle_t() const {
if (val.IsArray()) {
auto ary = As<Napi::Array>();
auto buf = Napi::Uint8Array::New(Env(), 64);
for (size_t i = 0; i < ary.Length(); ++i) { buf.Set(i, ary.Get(i)); }
return *reinterpret_cast<cudaIpcMemHandle_t*>(buf.ArrayBuffer().Data());
}
if (IpcHandle::IsInstance(val)) { return *(IpcHandle::Unwrap(ToObject())->handle()); }
return *reinterpret_cast<cudaIpcMemHandle_t*>(this->operator char*());
}
template <>
inline NapiToCPP::operator cudaStream_t() const {
return reinterpret_cast<cudaStream_t>(this->operator char*());
}
template <>
inline NapiToCPP::operator cudaUUID_t*() const {
return reinterpret_cast<cudaUUID_t*>(this->operator char*());
}
template <>
inline NapiToCPP::operator cudaDeviceProp() const {
cudaDeviceProp props{};
if (val.IsObject()) {
auto obj = val.As<Napi::Object>();
visit_struct::for_each(props, [&](char const* key, auto& val) {
if (obj.Has(key) && !obj.Get(key).IsUndefined()) {
using T = typename std::decay<decltype(val)>::type;
*reinterpret_cast<T*>(&val) = NapiToCPP(obj.Get(key)).operator T();
}
});
}
return props;
}
template <>
inline NapiToCPP::operator cudaExtent() const {
if (IsObject()) {
auto const obj = ToObject();
return {
static_cast<size_t>(obj.Get("width").ToNumber().Int64Value()),
static_cast<size_t>(obj.Get("height").ToNumber().Int64Value()),
static_cast<size_t>(obj.Get("depth").ToNumber().Int64Value()),
};
}
NAPI_THROW(Napi::Error::New(
Env(), "expected cudaChannelFormatDesc Object with numeric x, y, z, w, f keys"));
}
template <>
inline NapiToCPP::operator cudaChannelFormatDesc() const {
if (IsObject()) {
auto const obj = ToObject();
return {
obj.Get("x").ToNumber(),
obj.Get("y").ToNumber(),
obj.Get("z").ToNumber(),
obj.Get("w").ToNumber(),
static_cast<cudaChannelFormatKind>(obj.Get("f").ToNumber().Uint32Value()),
};
}
NAPI_THROW(Napi::Error::New(
Env(), "expected cudaChannelFormatDesc Object with numeric x, y, z, w, f keys"));
}
template <>
inline NapiToCPP::operator Device() const {
if (Device::IsInstance(val)) { return *Device::Unwrap(val.ToObject()); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a Device instance");
}
template <>
inline NapiToCPP::operator PinnedMemory() const {
if (PinnedMemory::IsInstance(val)) { return *PinnedMemory::Unwrap(val.ToObject()); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a PinnedMemory instance");
}
template <>
inline NapiToCPP::operator DeviceMemory() const {
if (DeviceMemory::IsInstance(val)) { return *DeviceMemory::Unwrap(val.ToObject()); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a DeviceMemory instance");
}
template <>
inline NapiToCPP::operator ManagedMemory() const {
if (ManagedMemory::IsInstance(val)) { return *ManagedMemory::Unwrap(val.ToObject()); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a ManagedMemory instance");
}
template <>
inline NapiToCPP::operator IpcMemory() const {
if (IpcMemory::IsInstance(val)) { return *IpcMemory::Unwrap(val.ToObject()); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a IpcMemory instance");
}
template <>
inline NapiToCPP::operator IpcHandle() const {
if (IpcHandle::IsInstance(val)) { return std::move(*IpcHandle::Unwrap(val.ToObject())); }
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a IpcHandle instance");
}
template <>
inline NapiToCPP::operator MappedGLMemory() const {
if (MappedGLMemory::IsInstance(val)) {
return std::move(*MappedGLMemory::Unwrap(val.ToObject()));
}
NAPI_THROW(Napi::Error::New(val.Env()), "Expected value to be a MappedGLMemory instance");
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuda/src/node_cuda
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/utilities/error.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <napi.h>
#include <nvrtc.h>
namespace nv {
inline std::runtime_error cuError(CUresult code, std::string const& file, uint32_t line) {
const char* name;
const char* estr;
cuGetErrorName(code, &name);
cuGetErrorString(code, &estr);
auto msg =
std::string{name} + " " + std::string{estr} + "\n at " + file + ":" + std::to_string(line);
return std::runtime_error(msg);
}
inline std::runtime_error cudaError(cudaError_t code, std::string const& file, uint32_t line) {
auto const name = cudaGetErrorName(code);
auto const estr = cudaGetErrorString(code);
auto const msg =
std::string{name} + " " + std::string{estr} + "\n at " + file + ":" + std::to_string(line);
return std::runtime_error(msg);
}
inline std::runtime_error nvrtcError(nvrtcResult code, std::string const& file, uint32_t line) {
auto const name = nvrtcGetErrorString(code);
auto const msg = std::string{name} + "\n at " + file + ":" + std::to_string(line);
return std::runtime_error(msg);
}
inline std::runtime_error node_cuda_error(std::string const& message,
std::string const& file,
uint32_t line) {
return std::runtime_error("node_cuda failure:" + message + "\n at " + file + ":" +
std::to_string(line));
}
inline Napi::Error cuError(CUresult code,
std::string const& file,
uint32_t line,
Napi::Env const& env) {
return Napi::Error::New(env, cuError(code, file, line).what());
}
inline Napi::Error cudaError(cudaError_t code,
std::string const& file,
uint32_t line,
Napi::Env const& env) {
return Napi::Error::New(env, cudaError(code, file, line).what());
}
inline Napi::Error nvrtcError(nvrtcResult code,
std::string const& file,
uint32_t line,
Napi::Env const& env) {
return Napi::Error::New(env, nvrtcError(code, file, line).what());
}
inline Napi::Error node_cuda_error(std::string const& message,
std::string const& file,
uint32_t line,
Napi::Env const& env) {
return Napi::Error::New(env, node_cuda_error(message, file, line).what());
}
} // namespace nv
#ifndef NODE_CUDA_EXPECT
#define NODE_CUDA_EXPECT(expr, message, ...) \
do { \
if (!(expr)) NAPI_THROW(nv::node_cuda_error(message, __FILE__, __LINE__, ##__VA_ARGS__)); \
} while (0)
#endif
#ifndef NODE_CU_THROW
#define NODE_CU_THROW(code, ...) NAPI_THROW(nv::cuError(code, __FILE__, __LINE__, ##__VA_ARGS__))
#endif
/**
* @brief Error checking macro for CUDA driver API functions.
*
* Invokes a CUDA driver API function call, if the call does not return
* CUDA_SUCCESS, throws an exception detailing the CUDA error that occurred.
*
**/
#ifndef NODE_CU_TRY
#define NODE_CU_TRY(expr, ...) \
do { \
CUresult const status = (expr); \
if (status != CUDA_SUCCESS) { NODE_CU_THROW(status, ##__VA_ARGS__); } \
} while (0)
#endif
#ifndef NODE_CUDA_THROW
#define NODE_CUDA_THROW(code, ...) \
NAPI_THROW(nv::cudaError(code, __FILE__, __LINE__, ##__VA_ARGS__))
#endif
/**
* @brief Error checking macro for CUDA runtime API functions.
*
* Invokes a CUDA runtime API function call, if the call does not return
* cudaSuccess, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred.
*
**/
#ifndef NODE_CUDA_TRY
#define NODE_CUDA_TRY(expr, ...) \
do { \
cudaError_t const status = (expr); \
if (status != cudaSuccess) { \
cudaGetLastError(); \
NODE_CUDA_THROW(status, ##__VA_ARGS__); \
} \
} while (0)
#endif
#ifndef NODE_NVRTC_THROW
#define NODE_NVRTC_THROW(code, ...) \
NAPI_THROW(nv::nvrtcError(code, __FILE__, __LINE__, ##__VA_ARGS__))
#endif
#ifndef NODE_NVRTC_TRY
#define NODE_NVRTC_TRY(expr, ...) \
do { \
nvrtcResult status = (expr); \
if (status != NVRTC_SUCCESS) { NODE_NVRTC_THROW(status, ##__VA_ARGS__); } \
} while (0)
#endif
| 0 |
rapidsai_public_repos/node/modules/cuda/src/node_cuda
|
rapidsai_public_repos/node/modules/cuda/src/node_cuda/utilities/cpp_to_napi.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "node_cuda/types.hpp"
#include "visit_struct/visit_struct.hpp"
#include <cuda_runtime_api.h>
#include <cstdint>
#include <nv_node/utilities/cpp_to_napi.hpp>
#include <string>
#include <type_traits>
namespace nv {
//
// CUDA Runtime type conversions
//
template <>
inline Napi::Value CPPToNapi::operator()(cudaUUID_t const& data) const {
return this->operator()({data.bytes, sizeof(cudaUUID_t)});
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaError_t const& error) const {
return Napi::Number::New(Env(), error);
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaStream_t const& stream) const {
return Napi::Number::New(Env(), reinterpret_cast<size_t>(stream));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaEvent_t const& event) const {
return Napi::Number::New(Env(), reinterpret_cast<uintptr_t>(event));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaGraph_t const& graph) const {
return Napi::Number::New(Env(), reinterpret_cast<uintptr_t>(graph));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaGraphNode_t const& graphNode) const {
return Napi::Number::New(Env(), reinterpret_cast<uintptr_t>(graphNode));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaGraphExec_t const& graphExec) const {
return Napi::Number::New(Env(), reinterpret_cast<uintptr_t>(graphExec));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaGraphicsResource_t const& resource) const {
return Napi::Number::New(Env(), reinterpret_cast<uintptr_t>(resource));
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaIpcMemHandle_t const& data) const {
auto buf = Napi::ArrayBuffer::New(Env(), CUDA_IPC_HANDLE_SIZE);
std::memcpy(buf.Data(), &data, CUDA_IPC_HANDLE_SIZE);
return buffer_to_typed_array<uint8_t>(buf);
// return this->operator()(data.reserved, CUDA_IPC_HANDLE_SIZE);
}
template <>
inline Napi::Value CPPToNapi::operator()(cudaDeviceProp const& props) const {
auto cast_t = *this;
auto obj = Napi::Object::New(Env());
visit_struct::for_each(props, [&](char const* name, auto const& val) { //
using T = typename std::decay<decltype(val)>::type;
if (std::is_pointer<T>()) {
using P = typename std::remove_pointer<T>::type;
if (std::is_same<P, char const>()) {
obj.Set(name, cast_t(std::string{reinterpret_cast<char const*>(&val)}));
} else {
obj.Set(name, cast_t(std::make_tuple(reinterpret_cast<P const*>(val), sizeof(val))));
}
} else {
obj.Set(name, cast_t(val));
}
});
return obj;
}
} // namespace nv
namespace Napi {
template <>
inline Value Value::From(napi_env env, CUDARTAPI::cudaMemoryType const& type) {
return Value::From(env, static_cast<uint8_t>(type));
}
} // namespace Napi
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/test/buffer-tests.ts
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
BigIntArray,
Float32Buffer,
Float64Buffer,
Int16Buffer,
Int32Buffer,
Int64Buffer,
Int8Buffer,
MemoryViewConstructor,
setDefaultAllocator,
TypedArray,
Uint16Buffer,
Uint32Buffer,
Uint64Buffer,
Uint8Buffer,
} from '@rapidsai/cuda';
describe.each(<[MemoryViewConstructor<TypedArray|BigIntArray>, typeof Number | typeof BigInt][]>[
[Int8Buffer, Number],
[Int16Buffer, Number],
[Int32Buffer, Number],
[Int64Buffer, BigInt],
[Uint8Buffer, Number],
[Uint16Buffer, Number],
[Uint32Buffer, Number],
[Uint64Buffer, BigInt],
[Float32Buffer, Number],
[Float64Buffer, Number],
])(`%s`,
<T extends TypedArray|BigIntArray, TValue extends typeof Number|typeof BigInt>(
BufferCtor: MemoryViewConstructor<T>, ValueCtor: TValue) => {
const values = Array.from({length: 1024}, (_, i) => ValueCtor(i));
const buffer = BufferCtor.TypedArray.from(values);
beforeEach(() => setDefaultAllocator(null));
test(`constructs ${BufferCtor.name} from a JavaScript Array via HtoD copy`, () => {
const dbuf = new BufferCtor(values);
expect(dbuf.toArray()).toEqual(buffer);
});
test(`constructs ${BufferCtor.name} from a JavaScript Iterable via HtoD copy`, () => {
const dbuf = new BufferCtor(function*() { yield* values; }());
expect(dbuf.toArray()).toEqual(buffer);
});
test(`constructs ${BufferCtor.name} from an ArrayBuffer via HtoD copy`, () => {
const dbuf = new BufferCtor(buffer.buffer);
expect(dbuf.toArray()).toEqual(buffer);
});
test(`constructs ${BufferCtor.name} from an ArrayBufferView via HtoD copy`, () => {
const dbuf = new BufferCtor(buffer);
expect(dbuf.toArray()).toEqual(buffer);
});
test(`constructs ${BufferCtor.name} from a device Memory instance zero-copy`, () => {
const mem = new BufferCtor(buffer).buffer;
const dbuf = new BufferCtor(mem);
expect(dbuf.toArray()).toEqual(buffer);
expect(dbuf.buffer === mem).toBe(true);
expect(dbuf.buffer.ptr).toEqual(mem.ptr);
});
test(`constructs ${BufferCtor.name} from a device MemoryView via DtoD copy`, () => {
const dbuf = new BufferCtor(new BufferCtor(buffer));
expect(dbuf.toArray()).toEqual(buffer);
});
test(`reads ${BufferCtor.name} values via subscript accessor`, () => {
const dbuf = new BufferCtor(buffer);
for (let i = -1; ++i < dbuf.length;) { expect(dbuf[i]).toEqual(buffer[i]); }
});
test(`writes ${BufferCtor.name} values via subscript accessor`, () => {
const dbuf = new BufferCtor(buffer);
const mult = <T[0]>buffer[buffer.length * .17 | 0];
(() => {
for (let i = -1, n = dbuf.length; ++i < n;) { //
dbuf[i] = (<T[0]>buffer[i]) * mult;
}
})();
const results = [...buffer].map((i: T[0]) => i * mult);
expect(dbuf.toArray()).toEqual(BufferCtor.TypedArray.from(results));
});
test(`slice copies the device memory`, () => {
const dbuf = new BufferCtor(buffer);
const copy = dbuf.slice();
expect(copy.toArray()).toEqual(buffer);
expect(dbuf.buffer.ptr).not.toEqual(copy.buffer.ptr);
});
test(`slice copies the device memory range`, () => {
const start = 300, end = 700;
const dbuf = new BufferCtor(buffer);
const copy = dbuf.slice(start, end);
expect(copy.toArray()).toEqual(buffer.slice(start, end));
expect(dbuf.buffer.ptr).not.toEqual(copy.buffer.ptr);
expect(copy.byteOffset).toEqual(buffer.slice(start, end).byteOffset);
expect(copy.byteLength).toEqual(buffer.slice(start, end).byteLength);
});
test(`subarray does not copy the device memory`, () => {
const dbuf = new BufferCtor(buffer);
const span = dbuf.subarray();
expect(span.toArray()).toEqual(buffer);
expect(dbuf.buffer.ptr).toEqual(span.buffer.ptr);
});
test(`subarray does not copy the device memory range`, () => {
const start = 300, end = 700;
const dbuf = new BufferCtor(buffer);
const span = dbuf.subarray(start, end);
expect(span.toArray()).toEqual(buffer.subarray(start, end));
expect(dbuf.buffer.ptr).toEqual(span.buffer.ptr);
expect(span.byteOffset).toEqual(buffer.subarray(start, end).byteOffset);
expect(span.byteLength).toEqual(buffer.subarray(start, end).byteLength);
});
test(`can copy from unregistered host memory`, () => {
const source = buffer.slice();
const target = new BufferCtor(source.length);
target.copyFrom(source);
expect(target.toArray()).toEqual(source);
});
test(`can copy into unregistered host memory`, () => {
const source = new BufferCtor(buffer);
const target = new BufferCtor.TypedArray(source.length);
source.copyInto(target);
expect(target).toEqual(buffer);
});
test(`can copy from device memory with offsets and lengths`, () => {
const source = new BufferCtor(buffer);
const target = new BufferCtor(buffer.length);
// swap the halves
target.copyFrom(source, 0, target.length / 2, target.length);
target.copyFrom(source, source.length / 2, 0, target.length / 2);
expect(target.toArray()).toEqual(new BufferCtor.TypedArray([
...buffer.subarray(buffer.length / 2),
...buffer.subarray(0, buffer.length / 2)
]));
});
test(`can copy into device memory with offsets and lengths`, () => {
const source = new BufferCtor(buffer);
const target = new BufferCtor(buffer.length);
// swap the halves
source.copyInto(target, 0, source.length / 2, source.length);
source.copyInto(target, target.length / 2, 0, source.length / 2);
expect(target.toArray()).toEqual(new BufferCtor.TypedArray([
...buffer.subarray(buffer.length / 2), //
...buffer.subarray(0, buffer.length / 2)
]));
});
test(`can copy from unregistered host memory with offsets and lengths`, () => {
const source = buffer.slice();
const target = new BufferCtor(buffer.length);
// swap the halves
target.copyFrom(source, 0, target.length / 2, target.length);
target.copyFrom(source, source.length / 2, 0, target.length / 2);
expect(target.toArray()).toEqual(new BufferCtor.TypedArray([
...buffer.subarray(buffer.length / 2),
...buffer.subarray(0, buffer.length / 2)
]));
});
test(`can copy into unregistered host memory with offsets and lengths`, () => {
const source = new BufferCtor(buffer);
const target = new BufferCtor.TypedArray(source.length);
// swap the halves
source.copyInto(target, 0, source.length / 2, source.length);
source.copyInto(target, buffer.length / 2, 0, source.length / 2);
expect(target).toEqual(new BufferCtor.TypedArray([
...buffer.subarray(buffer.length / 2), //
...buffer.subarray(0, buffer.length / 2)
]));
});
});
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/test/tsconfig.json
|
{
"extends": "../tsconfig.json",
"include": [
"../src/**/*.ts",
"../test/**/*.ts"
],
"compilerOptions": {
"target": "esnext",
"module": "commonjs",
"allowJs": true,
"importHelpers": false,
"noEmitHelpers": false,
"noEmitOnError": false,
"sourceMap": false,
"inlineSources": false,
"inlineSourceMap": false,
"downlevelIteration": false,
"baseUrl": "../",
"paths": {
"@rapidsai/cuda": ["src/index"],
"@rapidsai/cuda/*": ["src/*"]
}
}
}
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/test/setdefaultallocator-tests.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {test} from '@jest/globals';
import {
DeviceMemory,
Float32Buffer,
ManagedMemory,
PinnedMemory,
setDefaultAllocator
} from '@rapidsai/cuda';
test('setDefaultAllocator with null resets to the default DeviceMemory allocator', () => {
// Set a custom allocate fn
setDefaultAllocator((n) => new ManagedMemory(n));
const mbuf = new Float32Buffer(1024).fill(100);
expect(mbuf.buffer).toBeInstanceOf(ManagedMemory);
// Reset to the default
setDefaultAllocator(null);
const dbuf = new Float32Buffer(1024).fill(100);
expect(dbuf.buffer).toBeInstanceOf(DeviceMemory);
});
test.each([
DeviceMemory,
PinnedMemory,
ManagedMemory,
])(`setDefaultAllocator works with %s`, (MemoryCtor) => {
setDefaultAllocator((n) => new MemoryCtor(n));
const buf = new Float32Buffer(1024).fill(100);
expect(buf.buffer).toBeInstanceOf(MemoryCtor);
expect(buf.toArray()).toEqual(new Float32Array(1024).fill(100));
setDefaultAllocator(null);
});
| 0 |
rapidsai_public_repos/node/modules/cuda
|
rapidsai_public_repos/node/modules/cuda/test/ipc-tests.ts
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {ChildProcessByStdio, spawn} from 'child_process';
import {Readable, Writable} from 'stream';
test(`ipc works between subprocesses`, async () => {
let src: ChildProcessByStdio<Writable, Readable, null>|undefined;
let dst: ChildProcessByStdio<Writable, Readable, null>|undefined;
try {
src = spawnIPCSourceSubprocess(7, 8);
const hndl = await readChildProcessOutput(src);
if (hndl) {
dst = spawnIPCTargetSubprocess(JSON.parse(hndl));
const data = await readChildProcessOutput(dst);
if (data) {
expect(data).toStrictEqual('[7,7,7,7,8,8,8,8]');
} else {
throw new Error(`Invalid data from target child process: ${JSON.stringify(data)}`);
}
} else {
throw new Error(`Invalid IPC handle from source child process: ${JSON.stringify(hndl)}`);
}
} finally {
dst && !dst.killed && dst.kill();
src && !src.killed && src.kill();
}
});
async function readChildProcessOutput(proc: ChildProcessByStdio<Writable, Readable, null>) {
const {stdout} = proc;
return (async () => {
for await (const chunk of stdout) {
if (chunk) {
// eslint-disable-next-line @typescript-eslint/restrict-plus-operands
return '' + chunk;
}
}
return '';
})();
}
function spawnIPCSourceSubprocess(first: number, second: number) {
return spawn('node',
[
`-e`,
`
const { Uint8Buffer } = require(".");
const dmem = new Uint8Buffer(8);
const hndl = dmem.getIpcHandle();
dmem.fill(${first}, 0, 4).fill(${second}, 4, 8);
process.stdout.write(JSON.stringify(hndl));
process.on("exit", () => hndl.close());
setInterval(() => { }, 60 * 1000);
`
],
{stdio: ['pipe', 'pipe', 'inherit']});
}
function spawnIPCTargetSubprocess({handle}: {handle: Array<number>}) {
return spawn('node',
[
'-e',
`
const { Uint8Buffer, IpcMemory } = require(".");
const hmem = new Uint8Array(8);
const dmem = new IpcMemory([${handle.toString()}]);
new Uint8Buffer(dmem).copyInto(hmem).buffer.close();
process.stdout.write(JSON.stringify([...hmem]));
`
],
{stdio: ['pipe', 'pipe', 'inherit']});
}
| 0 |
rapidsai_public_repos/node/modules/cuda/test
|
rapidsai_public_repos/node/modules/cuda/test/device/create-with-flags-test.ts
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Device, DeviceFlags, devices} from '@rapidsai/cuda';
test.each([
DeviceFlags.scheduleAuto,
DeviceFlags.scheduleSpin,
DeviceFlags.scheduleYield,
DeviceFlags.scheduleBlockingSync,
DeviceFlags.lmemResizeToMax
])(`Creates each device with DeviceFlag %i`, (flags) => {
for (const i of Array.from({length: devices.length}, (_, i) => i)) {
const device = new Device(i, flags);
try {
expect(device.id).toBeDefined();
expect(device.getFlags()).toBe(flags);
} finally { device.reset().synchronize(); }
}
});
| 0 |
rapidsai_public_repos/node/modules/cuda/test
|
rapidsai_public_repos/node/modules/cuda/test/device/set-flags-test.ts
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {DeviceFlags, devices} from '@rapidsai/cuda';
test.each([
DeviceFlags.scheduleAuto,
DeviceFlags.scheduleSpin,
DeviceFlags.scheduleYield,
DeviceFlags.scheduleBlockingSync,
DeviceFlags.lmemResizeToMax
])(`Sets device flags to DeviceFlag %i`, (flags) => {
for (const device of devices) {
try {
expect(device.id).toBeDefined();
expect(device.getFlags()).toBe(DeviceFlags.scheduleAuto);
device.reset().setFlags(flags);
expect(device.getFlags()).toBe(flags);
} finally { device.reset().synchronize(); }
}
});
| 0 |
rapidsai_public_repos/node/modules/cuda/test
|
rapidsai_public_repos/node/modules/cuda/test/device/device-properties-test.ts
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {devices} from '@rapidsai/cuda';
test(`device.properties`, () => {
for (const device of devices) {
const props = device.getProperties();
expect(props).toBeDefined();
expect(props.name).toBeDefined();
expect(props.name).toBe(device.name);
}
});
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/package.json
|
{
"name": "@rapidsai/cuspatial",
"version": "22.12.2",
"description": "cuSpatial - NVIDIA RAPIDS GIS and Spatiotemporal Analytics Library",
"license": "Apache-2.0",
"main": "index.js",
"types": "build/js",
"author": "NVIDIA, Inc. (https://nvidia.com/)",
"maintainers": [
"Paul Taylor <[email protected]>"
],
"homepage": "https://github.com/rapidsai/node/tree/main/modules/cuspatial#readme",
"bugs": {
"url": "https://github.com/rapidsai/node/issues"
},
"repository": {
"type": "git",
"url": "git+https://github.com/rapidsai/node.git"
},
"scripts": {
"install": "npx rapidsai-install-native-module",
"clean": "rimraf build doc compile_commands.json",
"doc": "rimraf doc && typedoc --options typedoc.js",
"test": "node -r dotenv/config node_modules/.bin/jest -c jest.config.js",
"build": "yarn tsc:build && yarn cpp:build",
"build:debug": "yarn tsc:build && yarn cpp:build:debug",
"compile": "yarn tsc:build && yarn cpp:compile",
"compile:debug": "yarn tsc:build && yarn cpp:compile:debug",
"rebuild": "yarn tsc:build && yarn cpp:rebuild",
"rebuild:debug": "yarn tsc:build && yarn cpp:rebuild:debug",
"cpp:clean": "npx cmake-js clean -O build/Release",
"cpp:clean:debug": "npx cmake-js clean -O build/Debug",
"cpp:build": "npx cmake-js build -g -O build/Release",
"cpp:build:debug": "npx cmake-js build -g -D -O build/Debug",
"cpp:compile": "npx cmake-js compile -g -O build/Release",
"postcpp:compile": "npx rapidsai-merge-compile-commands",
"cpp:compile:debug": "npx cmake-js compile -g -D -O build/Debug",
"postcpp:compile:debug": "npx rapidsai-merge-compile-commands",
"cpp:configure": "npx cmake-js configure -g -O build/Release",
"postcpp:configure": "npx rapidsai-merge-compile-commands",
"cpp:configure:debug": "npx cmake-js configure -g -D -O build/Debug",
"postcpp:configure:debug": "npx rapidsai-merge-compile-commands",
"cpp:rebuild": "npx cmake-js rebuild -g -O build/Release",
"postcpp:rebuild": "npx rapidsai-merge-compile-commands",
"cpp:rebuild:debug": "npx cmake-js rebuild -g -D -O build/Debug",
"postcpp:rebuild:debug": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure": "npx cmake-js reconfigure -g -O build/Release",
"postcpp:reconfigure": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure:debug": "npx cmake-js reconfigure -g -D -O build/Debug",
"postcpp:reconfigure:debug": "npx rapidsai-merge-compile-commands",
"tsc:clean": "rimraf build/js",
"tsc:build": "yarn tsc:clean && tsc -p ./tsconfig.json",
"tsc:watch": "yarn tsc:clean && tsc -p ./tsconfig.json -w",
"dev:cpack:enabled": "echo $npm_package_name"
},
"dependencies": {
"@rapidsai/cudf": "~22.12.2"
},
"files": [
"LICENSE",
"README.md",
"index.js",
"package.json",
"CMakeLists.txt",
"src/node_cuspatial",
"build/js"
]
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/index.js
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
module.exports = require('./build/js/index');
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/jest.config.js
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
try {
require('dotenv').config();
} catch (e) {}
module.exports = {
'verbose': true,
'testEnvironment': 'node',
'maxWorkers': process.env.PARALLEL_LEVEL || 1,
'globals': {'ts-jest': {'diagnostics': false, 'tsconfig': 'test/tsconfig.json'}},
'rootDir': './',
'roots': ['<rootDir>/test/'],
'moduleFileExtensions': ['js', 'ts', 'tsx'],
'coverageReporters': ['lcov'],
'coveragePathIgnorePatterns': ['test\\/.*\\.(ts|tsx|js)$', '/node_modules/'],
'transform': {'^.+\\.jsx?$': 'ts-jest', '^.+\\.tsx?$': 'ts-jest'},
'transformIgnorePatterns':
['/build/(js|Debug|Release)/*$', '/node_modules/(?!web-stream-tools).+\\.js$'],
'testRegex': '(.*(-|\\.)(test|spec)s?)\\.(ts|tsx|js)$',
'preset': 'ts-jest',
'testMatch': null,
'moduleNameMapper': {
'^@rapidsai\/cuspatial(.*)': '<rootDir>/src/$1',
'^\.\.\/(Debug|Release)\/(rapidsai_cuspatial.node)$': '<rootDir>/build/$1/$2',
}
};
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE)
option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON)
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
execute_process(COMMAND node -p
"require('@rapidsai/core').cmake_modules_path"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake")
project(rapidsai_cuspatial VERSION $ENV{npm_package_version} LANGUAGES C CXX)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/core'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cuda'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDA_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/rmm'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_RMM_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cudf'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDF_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUSPATIAL.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake")
###################################################################################################
# - rapidsai_cuspatial target -------------------------------------------------------------------------
file(GLOB_RECURSE NODE_CUSPATIAL_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
add_library(${PROJECT_NAME} SHARED ${NODE_CUSPATIAL_SRC_FILES} ${CMAKE_JS_SRC})
set_target_properties(${PROJECT_NAME}
PROPERTIES PREFIX ""
SUFFIX ".node"
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
NO_SYSTEM_FROM_IMPORTED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${PROJECT_NAME}
PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>"
)
target_compile_definitions(${PROJECT_NAME}
PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
"$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
)
target_include_directories(${PROJECT_NAME}
PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>"
)
target_link_libraries(${PROJECT_NAME}
PUBLIC ${CMAKE_JS_LIB}
cuspatial::cuspatial
"${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node"
"${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node"
"${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node"
"${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake")
generate_arch_specific_custom_targets(
NAME ${PROJECT_NAME}
DEPENDENCIES "cudf::cudf"
"cuspatial::cuspatial"
)
generate_install_rules(
NAME ${PROJECT_NAME}
CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
# Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin
execute_process(COMMAND
${CMAKE_COMMAND} -E create_symlink
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/README.md
|
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> node-rapids cuSpatial - GPU-Accelerated Spatial and Trajectory Data Management and Analytics Library</div>
### Installation
`npm install @rapidsai/cuspatial`
### About
JS bindings for [cuSpatial](https://github.com/rapidsai/cuspatial).
For detailed node-cuSpatial API, follow our [API Documentation](https://rapidsai.github.io/node/modules/cuspatial_src.html).
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/tsconfig.json
|
{
"include": ["src"],
"exclude": ["node_modules"],
"compilerOptions": {
"baseUrl": "./",
"paths": {
"@rapidsai/cuspatial": ["src/index"],
"@rapidsai/cuspatial/*": ["src/*"]
},
"target": "ESNEXT",
"module": "commonjs",
"outDir": "./build/js",
/* Decorators */
"experimentalDecorators": false,
/* Basic stuff */
"moduleResolution": "node",
"skipLibCheck": true,
"skipDefaultLibCheck": true,
"lib": ["dom", "esnext", "esnext.asynciterable"],
/* Control what is emitted */
"declaration": true,
"declarationMap": true,
"noEmitOnError": true,
"removeComments": false,
"downlevelIteration": true,
/* Create inline sourcemaps with sources */
"sourceMap": false,
"inlineSources": true,
"inlineSourceMap": true,
/* The most restrictive settings possible */
"strict": true,
"importHelpers": true,
"noEmitHelpers": true,
"noImplicitAny": true,
"noUnusedLocals": true,
"noImplicitReturns": true,
"allowUnusedLabels": false,
"noUnusedParameters": true,
"allowUnreachableCode": false,
"noFallthroughCasesInSwitch": true,
"forceConsistentCasingInFileNames": true
}
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
include/visit_struct/visit_struct.hpp (modified): BSL 1.0
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cuspatial/typedoc.js
|
module.exports = {
entryPoints: ['src/index.ts'],
out: 'doc',
name: '@rapidsai/cuspatial',
tsconfig: 'tsconfig.json',
excludePrivate: true,
excludeProtected: true,
excludeExternals: true,
};
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/.vscode/launch.json
|
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"compounds": [
{
"name": "Debug Tests (TS and C++)",
"configurations": [
"Debug Tests (launch gdb)",
// "Debug Tests (launch lldb)",
"Debug Tests (attach node)",
]
}
],
"configurations": [
{
"name": "Debug Tests (TS only)",
"type": "node",
"request": "launch",
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"program": "${workspaceFolder}/node_modules/.bin/jest",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
"env": {
"NODE_NO_WARNINGS": "1",
"NODE_ENV": "production",
"READABLE_STREAM": "disable",
},
"args": [
"--verbose",
"--runInBand",
"-c", "jest.config.js",
"${input:TEST_FILE}"
]
},
// {
// "name": "Debug Tests (launch lldb)",
// // hide the individual configurations from the debug dropdown list
// "presentation": { "hidden": true },
// "type": "lldb",
// "request": "launch",
// "stdio": null,
// "cwd": "${workspaceFolder}",
// "preLaunchTask": "cpp:ensure:debug:build",
// "env": {
// "NODE_DEBUG": "1",
// "NODE_NO_WARNINGS": "1",
// "NODE_ENV": "production",
// "READABLE_STREAM": "disable",
// },
// "stopOnEntry": false,
// "terminal": "console",
// "program": "${input:NODE_BINARY}",
// "initCommands": [
// "settings set target.disable-aslr false",
// ],
// "sourceLanguages": ["cpp", "cuda", "javascript"],
// "args": [
// "--inspect=9229",
// "--expose-internals",
// "${workspaceFolder}/node_modules/.bin/jest",
// "--verbose",
// "--runInBand",
// "-c",
// "jest.config.js",
// "${input:TEST_FILE}"
// ],
// },
{
"name": "Debug Tests (launch gdb)",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"type": "cppdbg",
"request": "launch",
"stopAtEntry": false,
"externalConsole": false,
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"MIMode": "gdb",
"miDebuggerPath": "/usr/bin/gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
],
"program": "${input:NODE_BINARY}",
"environment": [
{ "name": "NODE_DEBUG", "value": "1" },
{ "name": "NODE_NO_WARNINGS", "value": "1" },
{ "name": "NODE_ENV", "value": "production" },
{ "name": "READABLE_STREAM", "value": "disable" },
],
"args": [
"--inspect=9229",
"--expose-internals",
"${workspaceFolder}/node_modules/.bin/jest",
"--verbose",
"--runInBand",
"-c",
"jest.config.js",
"${input:TEST_FILE}"
],
},
{
"name": "Debug Tests (attach node)",
"type": "node",
"request": "attach",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"port": 9229,
"timeout": 60000,
"cwd": "${workspaceFolder}",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
},
],
"inputs": [
{
"type": "command",
"id": "NODE_BINARY",
"command": "shellCommand.execute",
"args": {
"description": "path to node",
"command": "which node",
"useFirstResult": true,
}
},
{
"type": "command",
"id": "TEST_FILE",
"command": "shellCommand.execute",
"args": {
"cwd": "${workspaceFolder}/modules/cuspatial",
"description": "Select a file to debug",
"command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"",
}
},
],
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/.vscode/tasks.json
|
{
"version": "2.0.0",
"tasks": [
{
"type": "shell",
"label": "Rebuild node_cuspatial TS and C++ (slow)",
"group": { "kind": "build", "isDefault": true, },
"command": "if [[ \"${input:CMAKE_BUILD_TYPE}\" == \"Release\" ]]; then yarn rebuild; else yarn rebuild:debug; fi",
"problemMatcher": [
"$tsc",
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
{
"type": "npm",
"group": "build",
"label": "Recompile node_cuspatial TS (fast)",
"script": "tsc:build",
"detail": "yarn tsc:build",
"problemMatcher": ["$tsc"],
},
{
"type": "shell",
"group": "build",
"label": "Recompile node_cuspatial C++ (fast)",
"command": "ninja -C ${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}",
"problemMatcher": [
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
],
"inputs": [
{
"type": "pickString",
"default": "Release",
"id": "CMAKE_BUILD_TYPE",
"options": ["Release", "Debug"],
"description": "C++ Build Type",
}
]
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/index.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export * from './geometry';
export * from './quadtree';
export * from './spatial';
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/spatial.ts
|
// Copyright (c) 2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
Column,
FloatingPoint,
Series,
} from '@rapidsai/cudf';
import {makePoints} from '@rapidsai/cuspatial';
import {MemoryResource} from '@rapidsai/rmm';
import {
lonLatToCartesian,
} from './addon';
export function convertLonLatToCartesian<T extends FloatingPoint>(centerX: number,
centerY: number,
lonPoints: Series<T>,
latPoints: Series<T>,
memoryResource?: MemoryResource) {
const {x, y} = lonLatToCartesian(
centerX, centerY, lonPoints._col as Column<T>, latPoints._col as Column<T>, memoryResource);
return makePoints(Series.new(x), Series.new(y));
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/node_cuspatial.ts
|
// Copyright (c) 2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Column, FloatingPoint, Int32, Table, Uint32} from '@rapidsai/cudf';
import {MemoryResource} from '@rapidsai/rmm';
/** @ignore */
export declare const _cpp_exports: any;
export declare function createQuadtree<T extends FloatingPoint>(xs: Column<T>,
ys: Column<T>,
xMin: number,
xMax: number,
yMin: number,
yMax: number,
scale: number,
maxDepth: number,
minSize: number,
memoryResource?: MemoryResource):
{keyMap: Column<Uint32>, table: Table, names: ['key', 'level', 'is_quad', 'length', 'offset']};
export declare function findQuadtreeAndBoundingBoxIntersections(
quadtree: Table,
boundingBoxes: Table,
xMin: number,
xMax: number,
yMin: number,
yMax: number,
scale: number,
maxDepth: number,
memoryResource?: MemoryResource): {table: Table, names: ['poly_offset', 'quad_offset']};
export declare function computePolygonBoundingBoxes<T extends FloatingPoint>(
poly_offsets: Column<Int32>,
ring_offsets: Column<Int32>,
xs: Column<T>,
ys: Column<T>,
memoryResource?: MemoryResource): {table: Table, names: ['x_min', 'y_min', 'x_max', 'y_max']};
export declare function computePolylineBoundingBoxes<T extends FloatingPoint>(
poly_offsets: Column<Int32>,
xs: Column<T>,
ys: Column<T>,
expansionRadius: number,
memoryResource?: MemoryResource): {table: Table, names: ['x_min', 'y_min', 'x_max', 'y_max']};
export declare function findPointsInPolygons<T extends FloatingPoint>(
intersections: Table,
quadtree: Table,
keyMap: Column<Uint32>,
x: Column<T>,
y: Column<T>,
polygonOffsets: Column<Int32>,
ringOffsets: Column<Int32>,
polygonPointsX: Column<T>,
polygonPointsY: Column<T>,
memoryResource?: MemoryResource): {table: Table, names: ['polygon_index', 'point_index']};
export declare function findPolylineNearestToEachPoint<T extends FloatingPoint>(
intersections: Table,
quadtree: Table,
keyMap: Column<Uint32>,
x: Column<T>,
y: Column<T>,
polylineOffsets: Column<Int32>,
polylinePointsX: Column<T>,
polylinePointsY: Column<T>,
memoryResource
?: MemoryResource): {table: Table, names: ['point_index', 'polyline_index', 'distance']};
export declare function lonLatToCartesian<T extends FloatingPoint>(
origin_lon: number,
origin_lat: number,
lats: Column<T>,
lons: Column<T>,
memoryResource?: MemoryResource): {x: Column<T>, y: Column<T>};
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/geometry.cpp
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cuspatial/geometry.hpp>
#include <node_cudf/column.hpp>
#include <node_cudf/table.hpp>
#include <node_rmm/utilities/napi_to_cpp.hpp>
#include <cuspatial/coordinate_transform.hpp>
#include <cuspatial/error.hpp>
#include <cuspatial/linestring_bounding_box.hpp>
#include <cuspatial/polygon_bounding_box.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
Napi::Value compute_polygon_bounding_boxes(CallbackArgs const& args) {
Column::wrapper_t poly_offsets = args[0];
Column::wrapper_t ring_offsets = args[1];
Column::wrapper_t point_x = args[2];
Column::wrapper_t point_y = args[3];
rmm::mr::device_memory_resource* mr = args[4];
auto result = [&]() {
try {
return cuspatial::polygon_bounding_boxes(
*poly_offsets, *ring_offsets, *point_x, *point_y, 0.0, mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 4);
names.Set(0u, "x_min");
names.Set(1u, "y_min");
names.Set(2u, "x_max");
names.Set(3u, "y_max");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result)));
return output;
}
Napi::Value compute_polyline_bounding_boxes(CallbackArgs const& args) {
Column::wrapper_t poly_offsets = args[0];
Column::wrapper_t point_x = args[1];
Column::wrapper_t point_y = args[2];
double expansion_radius = args[3];
rmm::mr::device_memory_resource* mr = args[4];
auto result = [&]() {
try {
return cuspatial::linestring_bounding_boxes(
*poly_offsets, *point_x, *point_y, expansion_radius, mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 4);
names.Set(0u, "x_min");
names.Set(1u, "y_min");
names.Set(2u, "x_max");
names.Set(3u, "y_max");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result)));
return output;
}
Napi::Value lonlat_to_cartesian(CallbackArgs const& args) {
double origin_lon = args[0];
double origin_lat = args[1];
Column::wrapper_t lons_column = args[2];
Column::wrapper_t lats_column = args[3];
rmm::mr::device_memory_resource* mr = args[4];
auto result = [&]() {
try {
return cuspatial::lonlat_to_cartesian(origin_lon, origin_lat, *lons_column, *lats_column, mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
output.Set("x", Column::New(args.Env(), std::move(result.first)));
output.Set("y", Column::New(args.Env(), std::move(result.second)));
return output;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/quadtree.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
Bool8,
Column,
DataFrame,
FloatingPoint,
Int32,
Series,
Table,
Uint32,
Uint8
} from '@rapidsai/cudf';
import {MemoryResource} from '@rapidsai/rmm';
import {
createQuadtree,
findPointsInPolygons,
findPolylineNearestToEachPoint,
findQuadtreeAndBoundingBoxIntersections
} from './addon';
import {
BoundingBoxes,
polygonBoundingBoxes,
Polygons,
polylineBoundingBoxes,
Polylines
} from './geometry';
type QuadtreeSchema = {
/** Uint32 quad node keys */
key: Uint32,
/** Uint8 level for each quadtree node */
level: Uint8,
/** Boolean indicating whether a node is a quad or leaf */
is_quad: Bool8,
/**
* If this is a non-leaf quadrant (i.e. `is_quad` is `true`), this is the number of children in
* the non-leaf quadrant.
*
* Otherwise this is the number of points contained in the leaf quadrant.
*/
length: Uint32,
/**
* If this is a non-leaf quadrant (i.e. `is_quad` is `true`), this is the position of the non-leaf
* quadrant's first child.
*
* Otherwise this column's value is the position of the leaf quadrant's first point.
*/
offset: Uint32,
};
export class Quadtree<T extends FloatingPoint> {
/**
* @summary Construct a quadtree from a set of points for a given area-of-interest bounding box.
*
* @note Swaps `xMin` and `xMax`` if `xMin > xMax`
* @note Swaps `yMin` and `yMax`` if `yMin > yMax`
*
* @param options Object of quadtree options
* @param options.x Column of x-coordinates for each point
* @param options.y Column of y-coordinates for each point
* @param options.xMin The lower-left x-coordinate of the area of interest bounding box
* @param options.xMax The upper-right x-coordinate of the area of interest bounding box
* @param options.yMin The lower-left y-coordinate of the area of interest bounding box
* @param options.yMax The upper-right y-coordinate of the area of interest bounding box
* @param options.scale Scale to apply to each point's distance from ``(x_min, y_min)``
* @param options.maxDepth Maximum quadtree depth in range [0, 15)
* @param options.minSize Minimum number of points for a non-leaf quadtree node
* @param options.memoryResource Optional resource to use for output device memory allocations.
* @returns Quadtree
*/
static new<T extends Series<FloatingPoint>>(options: {
x: T,
y: T,
xMin: number,
xMax: number,
yMin: number,
yMax: number,
scale: number,
maxDepth: number,
minSize: number,
memoryResource?: MemoryResource
}) {
const x = options.x._col;
const y = options.y._col;
const minSize = Math.max(1, options.minSize || 0);
const {xMin, xMax, yMin, yMax, scale, maxDepth} = normalizeQuadtreeOptions(options);
const {keyMap, names, table} = createQuadtree<T['type']>(
x, y, xMin, xMax, yMin, yMax, scale, maxDepth, minSize, options.memoryResource);
return new Quadtree<T['type']>({
x,
y,
keyMap,
xMin,
xMax,
yMin,
yMax,
scale,
maxDepth,
minSize,
quadtree: new DataFrame({
[names[0]]: table.getColumnByIndex<Uint32>(0),
[names[1]]: table.getColumnByIndex<Uint8>(1),
[names[2]]: table.getColumnByIndex<Bool8>(2),
[names[3]]: table.getColumnByIndex<Uint32>(3),
[names[4]]: table.getColumnByIndex<Uint32>(4),
})
});
}
protected constructor(options: {
x: Column<T>,
y: Column<T>,
xMin: number,
xMax: number,
yMin: number,
yMax: number,
scale: number,
maxDepth: number,
minSize: number,
keyMap: Column<Uint32>,
quadtree: DataFrame<QuadtreeSchema>
}) {
this._x = options.x;
this._y = options.y;
this.xMin = options.xMin;
this.xMax = options.xMax;
this.yMin = options.yMin;
this.yMax = options.yMax;
this.scale = options.scale;
this.maxDepth = options.maxDepth;
this.minSize = options.minSize;
this._keyMap = options.keyMap;
this._quadtree = options.quadtree;
}
/** @summary The x-coordinates for each point used to construct the Quadtree. */
protected readonly _x: Column<T>;
/** @summary The y-coordinates for each point used to construct the Quadtree. */
protected readonly _y: Column<T>;
/** @summary `xMin` used to construct the Quadtree. */
public readonly xMin: number;
/** @summary `xMax` used to construct the Quadtree. */
public readonly xMax: number;
/** @summary `yMin` used to construct the Quadtree. */
public readonly yMin: number;
/** @summary `yMax` used to construct the Quadtree. */
public readonly yMax: number;
/** @summary `scale` used to construct the Quadtree. */
public readonly scale: number;
/** @summary `maxDepth` used to construct the Quadtree. * */
public readonly maxDepth: number;
/** @summary `minSize` used to construct the Quadtree. */
public readonly minSize: number;
/** @summary A Uint32 Series of sorted keys to original point indices. */
protected readonly _keyMap: Column<Uint32>;
/** @summary A complete quadtree for the set of input points. */
protected readonly _quadtree: DataFrame<QuadtreeSchema>;
/** @summary x-coordinate for each point in their original order. */
public get x(): Series<T> { return Series.new(this._x); }
/** @summary y-coordinate for each point in their original order. */
public get y(): Series<T> { return Series.new(this._y); }
/**
* @summary A Uint32 Series of quadtree node keys.
*/
public get key() { return this._quadtree.get('key'); }
/**
* @summary A Uint8 Series of the level for each quadtree node.
*/
public get level() { return this._quadtree.get('level'); }
/**
* @summary Boolean indicating whether a node is a quad or leaf.
*/
public get isQuad() { return this._quadtree.get('is_quad'); }
/**
* @summary The number of children or points in each quadrant or leaf node.
*
* If this is a non-leaf quadrant (i.e. `isQuad` is `true`), this is the number of children in
* the non-leaf quadrant.
*
* Otherwise this is the number of points contained in the leaf quadrant.
*/
public get length() { return this._quadtree.get('length'); }
/**
* @summary The position of the first child or point in each quadrant or leaf node.
*
* If this is a non-leaf quadrant (i.e. `isQuad` is `true`), this is the position of the non-leaf
* quadrant's first child.
*
* Otherwise this column's value is the position of the leaf quadrant's first point.
*/
public get offset() { return this._quadtree.get('offset'); }
/**
* @summary A Uint32 Series mapping each original point index to its sorted position in the
* Quadtree.
*/
public get keyMap() { return Series.new(this._keyMap); }
/**
* @summary Point x-coordinates in the sorted order they appear in the Quadtree.
*/
public get pointX(): Series<T> { return Series.new(this._x.gather(this._keyMap, false)); }
/**
* @summary Point y-coordinates in the sorted order they appear in the Quadtree.
*/
public get pointY(): Series<T> { return Series.new(this._y.gather(this._keyMap, false)); }
/**
* @summary Point x and y-coordinates in the sorted order they appear in the Quadtree.
*/
public get points() {
const remap = new Table({columns: [this._x, this._y]}).gather(this._keyMap, false);
return new DataFrame({
x: remap.getColumnByIndex<T>(0),
y: remap.getColumnByIndex<T>(1),
});
}
/** @ignore */
public asTable() { return this._quadtree.asTable(); }
/**
* @summary Find the subset of the given polygons that contain points in the Quadtree.
* @param polygons Series of Polygons to test.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns Series of each polygon that contains any points
*/
public polygonsWithPoints<R extends Polygons<T>>(polygons: R, memoryResource?: MemoryResource) {
return polygons.gather(this.pointInPolygon(polygons, memoryResource).get('polygon_index'));
}
/**
* @summary Find the subset of points in the Quadtree contained by the given polygons.
* @param polygons Series of Polygons to test.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns DataFrame x and y-coordinates of each found point
*/
public pointsInPolygons<R extends Polygons<T>>(polygons: R, memoryResource?: MemoryResource) {
return new DataFrame({x: this.x, y: this.y})
.gather(this.pointInPolygon(polygons, memoryResource).get('point_index'));
}
/**
* @summary Find the subset of points in the Quadtree contained by the given polygons.
* @param polygons Series of Polygons to test.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns DataFrame Indices for each intersecting point and polygon pair.
*/
public pointInPolygon<R extends Polygons<T>>(polygons: R, memoryResource?: MemoryResource) {
const intersections =
this.spatialJoin(polygonBoundingBoxes(polygons, memoryResource), memoryResource);
const rings = polygons.elements;
const polygonPointX = rings.elements.getChild('x');
const polygonPointY = rings.elements.getChild('y');
const {names, table} = findPointsInPolygons(intersections.asTable(),
this._quadtree.asTable(),
this._keyMap,
this._x,
this._y,
offsetsMinus1(polygons.offsets),
offsetsMinus1(rings.offsets),
polygonPointX._col as Column<T>,
polygonPointY._col as Column<T>,
memoryResource);
return new DataFrame({
[names[0]]: table.getColumnByIndex<Uint32>(0),
[names[1]]: table.getColumnByIndex<Uint32>(1),
});
}
/**
* @summary Find a subset of points nearest to each given polyline.
* @param polylines Series of Polylines to test.
* @param expansionRadius Radius of each polyline point.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns DataFrame containing the closest point to each polyline.
*/
public pointsNearestPolylines<R extends Polylines<T>>(polylines: R,
expansionRadius = 1,
memoryResource?: MemoryResource) {
const result = this.pointToNearestPolyline(polylines, expansionRadius, memoryResource);
return new DataFrame({x: this.x, y: this.y}).gather(result.get('point_index'));
}
/**
* @summary Finds the nearest polyline to each point, and computes the distances between each
* point/polyline pair.
* @param polylines Series of Polylines to test.
* @param expansionRadius Radius of each polyline point.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns DataFrame Indices for each point/nearest polyline pair, and distance between them.
*/
public pointToNearestPolyline<R extends Polylines<T>>(polylines: R,
expansionRadius = 1,
memoryResource?: MemoryResource) {
const intersections = this.spatialJoin(
polylineBoundingBoxes(polylines, expansionRadius, memoryResource), memoryResource);
const polylinePointX = polylines.elements.getChild('x');
const polylinePointY = polylines.elements.getChild('y');
const {names, table} = findPolylineNearestToEachPoint(intersections.asTable(),
this._quadtree.asTable(),
this._keyMap,
this._x,
this._y,
offsetsMinus1(polylines.offsets),
polylinePointX._col as Column<T>,
polylinePointY._col as Column<T>,
memoryResource);
return new DataFrame({
[names[0]]: table.getColumnByIndex<Uint32>(0),
[names[1]]: table.getColumnByIndex<Uint32>(1),
[names[2]]: table.getColumnByIndex<T>(2),
});
}
/**
* @summary Search a quadtree for bounding box intersections.
* @param boundingBoxes Minimum bounding boxes for a set of polygons or polylines.
* @param memoryResource Optional resource used to allocate the output device memory.
* @returns DataFrame Indices for each intersecting bounding box and leaf quadrant.
*/
public spatialJoin(boundingBoxes: BoundingBoxes<T>, memoryResource?: MemoryResource) {
const {names, table} = findQuadtreeAndBoundingBoxIntersections(this._quadtree.asTable(),
boundingBoxes.asTable(),
this.xMin,
this.xMax,
this.yMin,
this.yMax,
this.scale,
this.maxDepth,
memoryResource);
return new DataFrame({
[names[0]]: table.getColumnByIndex<Uint32>(0),
[names[1]]: table.getColumnByIndex<Uint32>(1),
});
}
}
function normalizeQuadtreeOptions(options: {
xMin: number,
xMax: number,
yMin: number,
yMax: number,
scale: number,
maxDepth: number,
}) {
const maxDepth = Math.max(0, Math.min(15, options.maxDepth | 0));
const [xMin, xMax, yMin, yMax] = [
Math.min(options.xMin, options.xMax),
Math.max(options.xMin, options.xMax),
Math.min(options.yMin, options.yMax),
Math.max(options.yMin, options.yMax),
];
const scale = Math.max(options.scale,
// minimum valid value for the scale based on bbox and max tree depth
Math.max(xMax - xMin, yMax - yMin) / ((1 << maxDepth) + 2));
return {xMin, xMax, yMin, yMax, scale, maxDepth};
}
function offsetsMinus1(offsets: Series<Int32>) {
return new Column({type: new Int32, data: offsets.data, length: offsets.length - 1});
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/geometry.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Column, DataFrame, FloatingPoint, Int32, List, Series, Struct} from '@rapidsai/cudf';
import {MemoryResource} from '@rapidsai/rmm';
import * as arrow from 'apache-arrow';
import {computePolygonBoundingBoxes, computePolylineBoundingBoxes} from './addon';
export type BoundingBoxes<T extends FloatingPoint> =
DataFrame<{x_min: T, y_min: T, x_max: T, y_max: T}>;
export type Point<T extends FloatingPoint> = Struct<{x: T, y: T}>;
export type Points<T extends FloatingPoint> = Series<Point<T>>;
export type Polyline<T extends FloatingPoint> = List<Point<T>>;
export type Polylines<T extends FloatingPoint> = Series<Polyline<T>>;
export type Polygon<T extends FloatingPoint> = List<Polyline<T>>;
export type Polygons<T extends FloatingPoint> = Series<Polygon<T>>;
export function makePoints<T extends Series<FloatingPoint>>(x: T, y: T): Points<T['type']> {
return Series.new({
type: new Struct([
arrow.Field.new('x', x.type),
arrow.Field.new('y', y.type),
]),
children: [x, y]
});
}
export function makePolylines<T extends FloatingPoint>(points: Points<T>,
offsets: Series<Int32>): Polylines<T> {
return Series.new({
children: [offsets, points],
type: new List(arrow.Field.new('points', points.type)),
});
}
export function polylineBoundingBoxes<T extends FloatingPoint>(
polylines: Polylines<T>, expansionRadius = 1, memoryResource?: MemoryResource) {
const points = polylines.elements;
const xs = points.getChild('x');
const ys = points.getChild('y');
const {names, table} = computePolylineBoundingBoxes(offsetsMinus1(polylines.offsets),
xs._col as Column<T>,
ys._col as Column<T>,
expansionRadius,
memoryResource);
return new DataFrame({
[names[0]]: table.getColumnByIndex<T>(0),
[names[1]]: table.getColumnByIndex<T>(1),
[names[2]]: table.getColumnByIndex<T>(2),
[names[3]]: table.getColumnByIndex<T>(3),
});
}
export function makePolygons<T extends FloatingPoint>(rings: Polylines<T>,
offsets: Series<Int32>): Polygons<T> {
return Series.new({
children: [offsets, rings],
type: new List(arrow.Field.new('rings', rings.type)),
});
}
export function polygonBoundingBoxes<T extends FloatingPoint>(polygons: Polygons<T>,
memoryResource?: MemoryResource) {
const rings = polygons.elements;
const points = rings.elements;
const xs = points.getChild('x');
const ys = points.getChild('y');
const {names, table} = computePolygonBoundingBoxes(offsetsMinus1(polygons.offsets),
offsetsMinus1(rings.offsets),
xs._col as Column<T>,
ys._col as Column<T>,
memoryResource);
return new DataFrame({
[names[0]]: table.getColumnByIndex<T>(0),
[names[1]]: table.getColumnByIndex<T>(1),
[names[2]]: table.getColumnByIndex<T>(2),
[names[3]]: table.getColumnByIndex<T>(3),
});
}
function offsetsMinus1(offsets: Series<Int32>) {
return new Column({type: new Int32, data: offsets.data, length: offsets.length - 1});
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/quadtree.cpp
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cuspatial/quadtree.hpp>
#include <node_cudf/column.hpp>
#include <node_cudf/table.hpp>
#include <node_rmm/utilities/napi_to_cpp.hpp>
#include <cuspatial/error.hpp>
#include <cuspatial/point_quadtree.hpp>
#include <cuspatial/spatial_join.hpp>
#include <nv_node/utilities/args.hpp>
namespace nv {
Napi::Value create_quadtree(CallbackArgs const& args) {
Column::wrapper_t xs = args[0];
Column::wrapper_t ys = args[1];
double x_min = args[2];
double x_max = args[3];
double y_min = args[4];
double y_max = args[5];
double scale = args[6];
int8_t max_depth = args[7];
cudf::size_type min_size = args[8];
rmm::mr::device_memory_resource* mr = args[9];
auto result = [&]() {
try {
return cuspatial::quadtree_on_points(
*xs, *ys, x_min, x_max, y_min, y_max, scale, max_depth, min_size, mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 5);
names.Set(0u, "key");
names.Set(1u, "level");
names.Set(2u, "is_quad");
names.Set(3u, "length");
names.Set(4u, "offset");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result.second)));
output.Set("keyMap", Column::New(args.Env(), std::move(result.first)));
return output;
}
Napi::Value quadtree_bounding_box_intersections(CallbackArgs const& args) {
Table::wrapper_t quadtree = args[0];
Table::wrapper_t poly_bbox = args[1];
double x_min = args[2];
double x_max = args[3];
double y_min = args[4];
double y_max = args[5];
double scale = args[6];
int8_t max_depth = args[7];
rmm::mr::device_memory_resource* mr = args[8];
auto result = [&]() {
try {
return cuspatial::join_quadtree_and_bounding_boxes(
*quadtree, *poly_bbox, x_min, x_max, y_min, y_max, scale, max_depth, mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 2);
names.Set(0u, "polygon_index");
names.Set(1u, "point_index");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result)));
return output;
}
Napi::Value find_points_in_polygons(CallbackArgs const& args) {
Table::wrapper_t intersections = args[0];
Table::wrapper_t quadtree = args[1];
Column::wrapper_t point_indices = args[2];
Column::wrapper_t x = args[3];
Column::wrapper_t y = args[4];
Column::wrapper_t polygon_offsets = args[5];
Column::wrapper_t ring_offsets = args[6];
Column::wrapper_t polygon_points_x = args[7];
Column::wrapper_t polygon_points_y = args[8];
rmm::mr::device_memory_resource* mr = args[9];
auto result = [&]() {
try {
return cuspatial::quadtree_point_in_polygon(*intersections,
*quadtree,
*point_indices,
*x,
*y,
*polygon_offsets,
*ring_offsets,
*polygon_points_x,
*polygon_points_y,
mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 2);
names.Set(0u, "polygon_index");
names.Set(1u, "point_index");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result)));
return output;
}
Napi::Value find_polyline_nearest_to_each_point(CallbackArgs const& args) {
Table::wrapper_t intersections = args[0];
Table::wrapper_t quadtree = args[1];
Column::wrapper_t point_indices = args[2];
Column::wrapper_t x = args[3];
Column::wrapper_t y = args[4];
Column::wrapper_t polyline_offsets = args[5];
Column::wrapper_t polyline_points_x = args[6];
Column::wrapper_t polyline_points_y = args[7];
rmm::mr::device_memory_resource* mr = args[8];
auto result = [&]() {
try {
return cuspatial::quadtree_point_to_nearest_linestring(*intersections,
*quadtree,
*point_indices,
*x,
*y,
*polyline_offsets,
*polyline_points_x,
*polyline_points_y,
mr);
} catch (std::exception const& e) { throw Napi::Error::New(args.Env(), e.what()); }
}();
auto output = Napi::Object::New(args.Env());
auto names = Napi::Array::New(args.Env(), 3);
names.Set(0u, "point_index");
names.Set(1u, "polyline_index");
names.Set(2u, "distance");
output.Set("names", names);
output.Set("table", Table::New(args.Env(), std::move(result)));
return output;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/addon.cpp
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cuspatial/geometry.hpp"
#include "node_cuspatial/quadtree.hpp"
#include <nv_node/addon.hpp>
struct rapidsai_cuspatial : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_cuspatial> {
rapidsai_cuspatial(Napi::Env const& env, Napi::Object exports) : nv::EnvLocalAddon(env, exports) {
DefineAddon(
exports,
{InstanceMethod("init", &rapidsai_cuspatial::InitAddon),
InstanceValue("_cpp_exports", _cpp_exports.Value()),
InstanceMethod<&rapidsai_cuspatial::create_quadtree>("createQuadtree"),
InstanceMethod<&rapidsai_cuspatial::quadtree_bounding_box_intersections>(
"findQuadtreeAndBoundingBoxIntersections"),
InstanceMethod<&rapidsai_cuspatial::find_points_in_polygons>("findPointsInPolygons"),
InstanceMethod<&rapidsai_cuspatial::find_polyline_nearest_to_each_point>(
"findPolylineNearestToEachPoint"),
InstanceMethod<&rapidsai_cuspatial::compute_polygon_bounding_boxes>(
"computePolygonBoundingBoxes"),
InstanceMethod<&rapidsai_cuspatial::compute_polyline_bounding_boxes>(
"computePolylineBoundingBoxes"),
InstanceMethod<&rapidsai_cuspatial::lonlat_to_cartesian>("lonLatToCartesian")
});
}
private:
Napi::Value create_quadtree(Napi::CallbackInfo const& info) { return nv::create_quadtree(info); }
Napi::Value quadtree_bounding_box_intersections(Napi::CallbackInfo const& info) {
return nv::quadtree_bounding_box_intersections(info);
}
Napi::Value find_points_in_polygons(Napi::CallbackInfo const& info) {
return nv::find_points_in_polygons(info);
}
Napi::Value find_polyline_nearest_to_each_point(Napi::CallbackInfo const& info) {
return nv::find_polyline_nearest_to_each_point(info);
}
Napi::Value compute_polygon_bounding_boxes(Napi::CallbackInfo const& info) {
return nv::compute_polygon_bounding_boxes(info);
}
Napi::Value compute_polyline_bounding_boxes(Napi::CallbackInfo const& info) {
return nv::compute_polyline_bounding_boxes(info);
}
Napi::Value lonlat_to_cartesian(Napi::CallbackInfo const& info) {
return nv::lonlat_to_cartesian(info);
}
};
NODE_API_ADDON(rapidsai_cuspatial);
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/src/addon.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/unbound-method */
import {addon as CORE} from '@rapidsai/core';
import {addon as CUDA} from '@rapidsai/cuda';
import {addon as CUDF} from '@rapidsai/cudf';
import {addon as RMM} from '@rapidsai/rmm';
export const {
createQuadtree,
findQuadtreeAndBoundingBoxIntersections,
computePolygonBoundingBoxes,
computePolylineBoundingBoxes,
findPointsInPolygons,
findPolylineNearestToEachPoint,
lonLatToCartesian,
_cpp_exports,
} = require('bindings')('rapidsai_cuspatial.node').init(CORE, CUDA, RMM, CUDF) as
typeof import('./node_cuspatial');
| 0 |
rapidsai_public_repos/node/modules/cuspatial/src
|
rapidsai_public_repos/node/modules/cuspatial/src/node_cuspatial/geometry.hpp
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <nv_node/utilities/args.hpp>
#include <napi.h>
namespace nv {
/**
* @brief Compute the minimum bounding-boxes for a set of polygons.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value compute_polygon_bounding_boxes(CallbackArgs const& args);
/**
* @brief Compute the minimum bounding-boxes for a set of polylines.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value compute_polyline_bounding_boxes(CallbackArgs const& args);
/**
* @brief Convert lon/lat coordinate columns into cartesian coordinates.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value lonlat_to_cartesian(CallbackArgs const& args);
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuspatial/src
|
rapidsai_public_repos/node/modules/cuspatial/src/node_cuspatial/quadtree.hpp
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <nv_node/utilities/args.hpp>
#include <napi.h>
namespace nv {
/**
* @brief Construct a quadtree from a set of points for a given area-of-interest bounding box.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value create_quadtree(CallbackArgs const& args);
/**
* @brief Search a quadtree for polygon or polyline bounding box intersections.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value quadtree_bounding_box_intersections(CallbackArgs const& args);
/**
* @brief Test whether the specified points are inside any of the specified polygons.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value find_points_in_polygons(CallbackArgs const& args);
/**
* @brief Finds the nearest polyline to each point in a quadrant, and computes the distances between
* each point and polyline.
*
* @param args CallbackArgs JavaScript arguments list.
*/
Napi::Value find_polyline_nearest_to_each_point(CallbackArgs const& args);
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/test/utils.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {DataFrame, Float64, Int32, Series} from '@rapidsai/cudf';
import {
makePoints,
makePolygons,
makePolylines,
} from '@rapidsai/cuspatial';
export function testPolygons() {
return makePolygons(testPolylines(), Series.new({type: new Int32, data: [0, 1, 2, 3, 4]}));
}
export function testPolylines() {
return makePolylines(testPolyPoints(), Series.new({type: new Int32, data: [0, 4, 10, 14, 19]}));
}
export function testPolyPoints() {
const xs = Series.new({
type: new Float64,
data: new Float64Array([
// ring 1
2.488450,
1.333584,
3.460720,
2.488450,
// ring 2
5.039823,
5.561707,
7.103516,
7.190674,
5.998939,
5.039823,
// ring 3
5.998939,
5.573720,
6.703534,
5.998939,
// ring 4
2.088115,
1.034892,
2.415080,
3.208660,
2.088115
]),
});
const ys = Series.new({
type: new Float64,
data: new Float64Array([
// ring 1
5.856625,
5.008840,
4.586599,
5.856625,
// ring 2
4.229242,
1.825073,
1.503906,
4.025879,
5.653384,
4.229242,
// ring 3
1.235638,
0.197808,
0.086693,
1.235638,
// ring 4
4.541529,
3.530299,
2.896937,
3.745936,
4.541529
]),
});
return makePoints(
xs,
ys,
);
}
export function testPoints() {
return new DataFrame({
x: Series.new({
type: new Float64,
data: new Float64Array([
1.9804558865545805, 0.1895259128530169, 1.2591725716781235, 0.8178039499335275,
0.48171647380517046, 1.3890664414691907, 0.2536015260915061, 3.1907684812039956,
3.028362149164369, 3.918090468102582, 3.710910700915217, 3.0706987088385853,
3.572744183805594, 3.7080407833612004, 3.70669993057843, 3.3588457228653024,
2.0697434332621234, 2.5322042870739683, 2.175448214220591, 2.113652420701984,
2.520755151373394, 2.9909779614491687, 2.4613232527836137, 4.975578758530645,
4.07037627210835, 4.300706849071861, 4.5584381091040616, 4.822583857757069,
4.849847745942472, 4.75489831780737, 4.529792124514895, 4.732546857961497,
3.7622247877537456, 3.2648444465931474, 3.01954722322135, 3.7164018490892348,
3.7002781846945347, 2.493975723955388, 2.1807636574967466, 2.566986568683904,
2.2006520196663066, 2.5104987015171574, 2.8222482218882474, 2.241538022180476,
2.3007438625108882, 6.0821276168848994, 6.291790729917634, 6.109985464455084,
6.101327777646798, 6.325158445513714, 6.6793884701899, 6.4274219368674315,
6.444584786789386, 7.897735998643542, 7.079453687660189, 7.430677191305505,
7.5085184104988, 7.886010001346151, 7.250745898479374, 7.769497359206111,
1.8703303641352362, 1.7015273093278767, 2.7456295127617385, 2.2065031771469,
3.86008672302403, 1.9143371250907073, 3.7176098065039747, 0.059011873032214,
3.1162712022943757, 2.4264509160270813, 3.154282922203257
]),
}),
y: Series.new({
type: new Float64,
data: new Float64Array([
1.3472225743317712, 0.5431061133894604, 0.1448705855995005, 0.8138440641113271,
1.9022922214961997, 1.5177694304735412, 1.8762161698642947, 0.2621847215928189,
0.027638405909631958, 0.3338651960183463, 0.9937713340192049, 0.9376313558467103,
0.33184908855075124, 0.09804238103130436, 0.7485845679979923, 0.2346381514128677,
1.1809465376402173, 1.419555755682142, 1.2372448404986038, 1.2774712415624014,
1.902015274420646, 1.2420487904041893, 1.0484414482621331, 0.9606291981013242,
1.9486902798139454, 0.021365525588281198, 1.8996548860019926, 0.3234041700489503,
1.9531893897409585, 0.7800065259479418, 1.942673409259531, 0.5659923375279095,
2.8709552313924487, 2.693039435509084, 2.57810040095543, 2.4612194182614333,
2.3345952955903906, 3.3999020934055837, 3.2296461832828114, 3.6607732238530897,
3.7672478678985257, 3.0668114607133137, 3.8159308233351266, 3.8812819070357545,
3.6045900851589048, 2.5470532680258002, 2.983311357415729, 2.2235950639628523,
2.5239201807166616, 2.8765450351723674, 2.5605928243991434, 2.9754616970668213,
2.174562817047202, 3.380784914178574, 3.063690547962938, 3.380489849365283,
3.623862886287816, 3.538128217886674, 3.4154469467473447, 3.253257011908445,
4.209727933188015, 7.478882372510933, 7.474216636277054, 6.896038613284851,
7.513564222799629, 6.885401350515916, 6.194330707468438, 5.823535317960799,
6.789029097334483, 5.188939408363776, 5.788316610960881
]),
})
});
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/test/quadtree-tests.ts
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import '@rapidsai/cudf/test/jest-extensions';
import {setDefaultAllocator} from '@rapidsai/cuda';
import {Float32, Float64, FloatingPoint} from '@rapidsai/cudf';
import {Quadtree} from '@rapidsai/cuspatial';
import {DeviceBuffer} from '@rapidsai/rmm';
import {testPoints, testPolygons, testPolylines} from './utils';
setDefaultAllocator((byteLength: number) => new DeviceBuffer(byteLength));
const floatingPointTypes = [
['Float32', new Float32], //
['Float64', new Float64]
] as [string, FloatingPoint][];
describe('Quadtree', () => {
test.each(floatingPointTypes)(
'`new` constructs a quadtree from points and a bounding box (%s)', (_, type) => {
const points = testPoints().castAll(type);
const quadtree = Quadtree.new({
x: points.get('x'),
y: points.get('y'),
xMin: 0,
xMax: 8,
yMin: 0,
yMax: 8,
scale: 1,
maxDepth: 3,
minSize: 12,
});
expect(quadtree.key.data.toArray())
.toEqualTypedArray(new Uint32Array([0, 1, 2, 0, 1, 3, 4, 7, 5, 6, 13, 14, 28, 31]));
expect(quadtree.level.data.toArray())
.toEqualTypedArray(new Uint8Array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]));
expect(quadtree.isQuad.data.toArray())
.toEqualTypedArray(new Uint8ClampedArray([1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0]));
expect(quadtree.length.data.toArray())
.toEqualTypedArray(new Uint32Array([3, 2, 11, 7, 2, 2, 9, 2, 9, 7, 5, 8, 8, 7]));
expect(quadtree.offset.data.toArray())
.toEqualTypedArray(new Uint32Array([3, 6, 60, 0, 8, 10, 36, 12, 7, 16, 23, 28, 45, 53]));
const remapped = points.gather(quadtree.keyMap);
expect(quadtree.pointX.data.toArray()).toEqualTypedArray(remapped.get('x').data.toArray());
expect(quadtree.pointY.data.toArray()).toEqualTypedArray(remapped.get('y').data.toArray());
});
test(`point in polygon`, () => {
const points = testPoints();
const quadtree = Quadtree.new({
x: points.get('x'),
y: points.get('y'),
xMin: 0,
xMax: 8,
yMin: 0,
yMax: 8,
scale: 1,
maxDepth: 3,
minSize: 12,
});
const polygonAndPointIdxs = quadtree.pointInPolygon(testPolygons());
expect(polygonAndPointIdxs.get('polygon_index').data.toArray())
.toEqualTypedArray(
new Uint32Array([3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 3]));
expect(polygonAndPointIdxs.get('point_index').data.toArray())
.toEqualTypedArray(new Uint32Array(
[28, 29, 30, 31, 32, 33, 34, 35, 45, 46, 47, 48, 49, 50, 51, 52, 54, 62, 60]));
});
test(`point to nearest polyline`, () => {
const points = testPoints();
const quadtree = Quadtree.new({
x: points.get('x'),
y: points.get('y'),
xMin: 0,
xMax: 8,
yMin: 0,
yMax: 8,
scale: 1,
maxDepth: 3,
minSize: 12,
});
const polylinePointPairsAndDistances = quadtree.pointToNearestPolyline(testPolylines(), 2);
expect(polylinePointPairsAndDistances.get('point_index').data.toArray())
.toEqualTypedArray(new Uint32Array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70
]));
expect(polylinePointPairsAndDistances.get('polyline_index').data.toArray())
.toEqualTypedArray(new Uint32Array([
3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]));
expect(polylinePointPairsAndDistances.get('distance').data.toArray())
.toEqualTypedArray(new Float64Array([
3.0675562686570932, 2.5594501016565698, 2.9849608928964071, 1.7103652150920774,
1.8293181280383963, 1.6095070428899729, 1.681412227243898, 2.3838209461314879,
2.5510398428020409, 1.6612106150272572, 2.0255119347250292, 2.0660867596957564,
2.005460353737949, 1.8683447535522375, 1.9465658908648766, 2.215180472008103,
1.7503944159063249, 1.4820166799617225, 1.6769023397521503, 1.6472789467219351,
1.0005181046076022, 1.7522309916961678, 1.8490738879835735, 1.0018961233717569,
0.76002760100291122, 0.65931355999132091, 1.2482129257770731, 1.3229005055827028,
0.28581819228716798, 0.20466187296772376, 0.41061901127492934, 0.56618357460517321,
0.046292709584059538, 0.16663093663041179, 0.44953247369220306, 0.56675685520587671,
0.8426949387264755, 1.2851826443010033, 0.7615641155638555, 0.97842040913621187,
0.91796378078050755, 1.4311654461101424, 0.96461369875795078, 0.66847988653443491,
0.98348202146010699, 0.66173276971965733, 0.86233789031448094, 0.50195678903916696,
0.6755886291567379, 0.82530249944765133, 0.46037120394920633, 0.72651648874084795,
0.52218906793095576, 0.72892093000338909, 0.077921089704128393, 0.26215098141130333,
0.33153993710577778, 0.71176747526132511, 0.081119666144327182, 0.60516346789266895,
0.088508309264124049, 1.5127004224070386, 0.38943741327066272, 0.48717099143018805,
1.1781283344854494, 1.8030436222567465, 1.0769747770485747, 1.181276832710481,
1.1240715558969043, 1.6379084234284416, 2.1510078772519496
]));
});
});
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/test/tsconfig.json
|
{
"extends": "../tsconfig.json",
"include": [
"../src/**/*.ts",
"../test/**/*.ts"
],
"compilerOptions": {
"target": "esnext",
"module": "commonjs",
"allowJs": true,
"importHelpers": false,
"noEmitHelpers": false,
"noEmitOnError": false,
"sourceMap": false,
"inlineSources": false,
"inlineSourceMap": false,
"downlevelIteration": false,
"baseUrl": "../",
"paths": {
"@rapidsai/cuspatial": ["src/index"],
"@rapidsai/cuspatial/*": ["src/*"],
"@rapidsai/cudf/test/*": ["../../cudf/test/*"]
}
}
}
| 0 |
rapidsai_public_repos/node/modules/cuspatial
|
rapidsai_public_repos/node/modules/cuspatial/test/spatial-tests.ts
|
// Copyright (c) 2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import '@rapidsai/cudf/test/jest-extensions';
import {setDefaultAllocator} from '@rapidsai/cuda';
import {Series} from '@rapidsai/cudf';
import {convertLonLatToCartesian} from '@rapidsai/cuspatial';
import {DeviceBuffer} from '@rapidsai/rmm';
setDefaultAllocator((byteLength: number) => new DeviceBuffer(byteLength));
describe('Spatial', () => {
test(`convertLonLatToCartesian`, () => {
const seriesX = Series.new([-120.0, -120.0]);
const seriesY = Series.new([48.0, 49.0]);
const result = convertLonLatToCartesian(1.0, 1.0, seriesX, seriesY);
expect(result.getChild('y').toArray())
.toMatchObject({'0': -5222.222222222223, '1': -5333.333333333334});
expect(result.getChild('x').toArray())
.toMatchObject({'0': 12233.92375289575, '1': 12184.804692381627});
});
});
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/package.json
|
{
"name": "@rapidsai/cugraph",
"version": "22.12.2",
"description": "cuGraph - NVIDIA RAPIDS Graph Analytics Library",
"license": "Apache-2.0",
"main": "index.js",
"types": "build/js",
"author": "NVIDIA, Inc. (https://nvidia.com/)",
"maintainers": [
"Paul Taylor <[email protected]>"
],
"homepage": "https://github.com/rapidsai/node/tree/main/modules/cugraph#readme",
"bugs": {
"url": "https://github.com/rapidsai/node/issues"
},
"repository": {
"type": "git",
"url": "git+https://github.com/rapidsai/node.git"
},
"scripts": {
"install": "npx rapidsai-install-native-module",
"clean": "rimraf build doc compile_commands.json",
"doc": "rimraf doc && typedoc --options typedoc.js",
"test": "node -r dotenv/config node_modules/.bin/jest -c jest.config.js",
"build": "yarn tsc:build && yarn cpp:build",
"build:debug": "yarn tsc:build && yarn cpp:build:debug",
"compile": "yarn tsc:build && yarn cpp:compile",
"compile:debug": "yarn tsc:build && yarn cpp:compile:debug",
"rebuild": "yarn tsc:build && yarn cpp:rebuild",
"rebuild:debug": "yarn tsc:build && yarn cpp:rebuild:debug",
"cpp:clean": "npx cmake-js clean -O build/Release",
"cpp:clean:debug": "npx cmake-js clean -O build/Debug",
"cpp:build": "npx cmake-js build -g -O build/Release",
"cpp:build:debug": "npx cmake-js build -g -D -O build/Debug",
"cpp:compile": "npx cmake-js compile -g -O build/Release",
"postcpp:compile": "npx rapidsai-merge-compile-commands",
"cpp:compile:debug": "npx cmake-js compile -g -D -O build/Debug",
"postcpp:compile:debug": "npx rapidsai-merge-compile-commands",
"cpp:configure": "npx cmake-js configure -g -O build/Release",
"postcpp:configure": "npx rapidsai-merge-compile-commands",
"cpp:configure:debug": "npx cmake-js configure -g -D -O build/Debug",
"postcpp:configure:debug": "npx rapidsai-merge-compile-commands",
"cpp:rebuild": "npx cmake-js rebuild -g -O build/Release",
"postcpp:rebuild": "npx rapidsai-merge-compile-commands",
"cpp:rebuild:debug": "npx cmake-js rebuild -g -D -O build/Debug",
"postcpp:rebuild:debug": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure": "npx cmake-js reconfigure -g -O build/Release",
"postcpp:reconfigure": "npx rapidsai-merge-compile-commands",
"cpp:reconfigure:debug": "npx cmake-js reconfigure -g -D -O build/Debug",
"postcpp:reconfigure:debug": "npx rapidsai-merge-compile-commands",
"tsc:clean": "rimraf build/js",
"tsc:build": "yarn tsc:clean && tsc -p ./tsconfig.json",
"tsc:watch": "yarn tsc:clean && tsc -p ./tsconfig.json -w",
"dev:cpack:enabled": "echo $npm_package_name"
},
"dependencies": {
"@rapidsai/cudf": "~22.12.2"
},
"devDependencies": {
"@rapidsai/cuml": "~22.12.2"
},
"files": [
"LICENSE",
"README.md",
"index.js",
"package.json",
"CMakeLists.txt",
"src/node_cugraph",
"build/js"
]
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/index.js
|
// Copyright (c) 2020, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
module.exports = require('./build/js/index');
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/jest.config.js
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
try {
require('dotenv').config();
} catch (e) {}
module.exports = {
'verbose': true,
'testEnvironment': 'node',
'maxWorkers': process.env.PARALLEL_LEVEL || 1,
'globals': {'ts-jest': {'diagnostics': false, 'tsconfig': 'test/tsconfig.json'}},
'rootDir': './',
'roots': ['<rootDir>/test/'],
'moduleFileExtensions': ['js', 'ts', 'tsx'],
'coverageReporters': ['lcov'],
'coveragePathIgnorePatterns': ['test\\/.*\\.(ts|tsx|js)$', '/node_modules/'],
'transform': {'^.+\\.jsx?$': 'ts-jest', '^.+\\.tsx?$': 'ts-jest'},
'transformIgnorePatterns':
['/build/(js|Debug|Release)/*$', '/node_modules/(?!web-stream-tools).+\\.js$'],
'testRegex': '(.*(-|\\.)(test|spec)s?)\\.(ts|tsx|js)$',
'preset': 'ts-jest',
'testMatch': null,
'moduleNameMapper': {
'^@rapidsai\/cugraph(.*)': '<rootDir>/src/$1',
'^\.\.\/(Debug|Release)\/(rapidsai_cugraph.node)$': '<rootDir>/build/$1/$2',
}
};
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY)
unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE)
option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON)
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
execute_process(COMMAND node -p
"require('@rapidsai/core').cmake_modules_path"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake")
project(rapidsai_cugraph VERSION $ENV{npm_package_version} LANGUAGES C CXX)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
return()
endif()
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/core'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cuda'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDA_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/rmm'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_RMM_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND node -p
"require('path').dirname(require.resolve('@rapidsai/cudf'))"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
OUTPUT_VARIABLE NODE_RAPIDS_CUDF_MODULE_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE)
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake")
if(NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS})
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDF.cmake")
endif()
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUGRAPH.cmake")
###################################################################################################
# - rapidsai_cugraph target ---------------------------------------------------------------------------
file(GLOB_RECURSE NODE_CUGRAPH_CPP_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp")
file(GLOB_RECURSE NODE_CUGRAPH_CUDA_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cu")
list(APPEND NODE_CUGRAPH_SRC_FILES ${NODE_CUGRAPH_CPP_FILES})
list(APPEND NODE_CUGRAPH_SRC_FILES ${NODE_CUGRAPH_CUDA_FILES})
add_library(${PROJECT_NAME} SHARED ${NODE_CUGRAPH_SRC_FILES} ${CMAKE_JS_SRC})
set_target_properties(${PROJECT_NAME}
PROPERTIES PREFIX ""
SUFFIX ".node"
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
NO_SYSTEM_FROM_IMPORTED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(${PROJECT_NAME}
PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>"
)
target_compile_definitions(${PROJECT_NAME}
PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
"$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>"
)
if(TARGET cudf::cudf)
set(LIBCUDF_LIBRARY cudf::cudf)
set(LIBCUDF_INCLUDE_DIRS "")
else()
set(LIBCUDF_LIBRARY "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-build/libcudf.a")
list(APPEND LIBCUDF_INCLUDE_DIRS "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-build/include")
list(APPEND LIBCUDF_INCLUDE_DIRS "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-src/cpp/include")
endif()
target_include_directories(${PROJECT_NAME}
PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>"
"$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${LIBCUDF_INCLUDE_DIRS}>"
"$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>"
)
target_link_libraries(${PROJECT_NAME}
PUBLIC ${CMAKE_JS_LIB}
cugraph::cugraph
"${LIBCUDF_LIBRARY}"
"${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node"
"${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node"
"${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node"
"${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node")
include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake")
generate_arch_specific_custom_targets(
NAME ${PROJECT_NAME}
DEPENDENCIES "cudf::cudf"
"cugraph::cugraph"
"cugraph-ops::cugraph-ops++"
)
generate_install_rules(
NAME ${PROJECT_NAME}
CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
# Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin
execute_process(COMMAND
${CMAKE_COMMAND} -E create_symlink
${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/README.md
|
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> node-rapids cuGraph - GPU Graph Analytics</div>
### Installation
`npm install @rapidsai/cugraph`
### About
The js bindings for [cuGraph](https://github.com/rapidsai/cugraph) is a collection of GPU accelerated graph algorithms that process data found in GPU DataFrames.
For detailed node-cuGraph API, follow our [API Documentation](https://rapidsai.github.io/node/modules/cugraph_src.html).
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/tsconfig.json
|
{
"include": ["src"],
"exclude": ["node_modules"],
"compilerOptions": {
"baseUrl": "./",
"paths": {
"@rapidsai/cugraph": ["src/index"],
"@rapidsai/cugraph/*": ["src/*"]
},
"target": "ESNEXT",
"module": "commonjs",
"outDir": "./build/js",
/* Decorators */
"experimentalDecorators": false,
/* Basic stuff */
"moduleResolution": "node",
"skipLibCheck": true,
"skipDefaultLibCheck": true,
"lib": ["dom", "esnext", "esnext.asynciterable"],
/* Control what is emitted */
"declaration": true,
"declarationMap": true,
"noEmitOnError": true,
"removeComments": false,
"downlevelIteration": true,
/* Create inline sourcemaps with sources */
"sourceMap": false,
"inlineSources": true,
"inlineSourceMap": true,
/* The most restrictive settings possible */
"strict": true,
"importHelpers": true,
"noEmitHelpers": true,
"noImplicitAny": true,
"noUnusedLocals": true,
"noImplicitReturns": true,
"allowUnusedLabels": false,
"noUnusedParameters": true,
"allowUnreachableCode": false,
"noFallthroughCasesInSwitch": true,
"forceConsistentCasingInFileNames": true
}
}
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
include/visit_struct/visit_struct.hpp (modified): BSL 1.0
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 0 |
rapidsai_public_repos/node/modules
|
rapidsai_public_repos/node/modules/cugraph/typedoc.js
|
module.exports = {
entryPoints: ['src/index.ts'],
out: 'doc',
name: '@rapidsai/cugraph',
tsconfig: 'tsconfig.json',
excludePrivate: true,
excludeProtected: true,
excludeExternals: true,
};
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/.vscode/launch.json
|
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"compounds": [
{
"name": "Debug Tests (TS and C++)",
"configurations": [
"Debug Tests (launch gdb)",
// "Debug Tests (launch lldb)",
"Debug Tests (attach node)",
]
}
],
"configurations": [
{
"name": "Debug Tests (TS only)",
"type": "node",
"request": "launch",
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"program": "${workspaceFolder}/node_modules/.bin/jest",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
"env": {
"NODE_NO_WARNINGS": "1",
"NODE_ENV": "production",
"READABLE_STREAM": "disable",
},
"args": [
"--verbose",
"--runInBand",
"-c", "jest.config.js",
"${input:TEST_FILE}"
]
},
// {
// "name": "Debug Tests (launch lldb)",
// // hide the individual configurations from the debug dropdown list
// "presentation": { "hidden": true },
// "type": "lldb",
// "request": "launch",
// "stdio": null,
// "cwd": "${workspaceFolder}",
// "preLaunchTask": "cpp:ensure:debug:build",
// "env": {
// "NODE_DEBUG": "1",
// "NODE_NO_WARNINGS": "1",
// "NODE_ENV": "production",
// "READABLE_STREAM": "disable",
// },
// "stopOnEntry": false,
// "terminal": "console",
// "program": "${input:NODE_BINARY}",
// "initCommands": [
// "settings set target.disable-aslr false",
// ],
// "sourceLanguages": ["cpp", "cuda", "javascript"],
// "args": [
// "--inspect=9229",
// "--expose-internals",
// "${workspaceFolder}/node_modules/.bin/jest",
// "--verbose",
// "--runInBand",
// "-c",
// "jest.config.js",
// "${input:TEST_FILE}"
// ],
// },
{
"name": "Debug Tests (launch gdb)",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"type": "cppdbg",
"request": "launch",
"stopAtEntry": false,
"externalConsole": false,
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"MIMode": "gdb",
"miDebuggerPath": "/usr/bin/gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}
],
"program": "${input:NODE_BINARY}",
"environment": [
{ "name": "NODE_DEBUG", "value": "1" },
{ "name": "NODE_NO_WARNINGS", "value": "1" },
{ "name": "NODE_ENV", "value": "production" },
{ "name": "READABLE_STREAM", "value": "disable" },
],
"args": [
"--inspect=9229",
"--expose-internals",
"${workspaceFolder}/node_modules/.bin/jest",
"--verbose",
"--runInBand",
"-c",
"jest.config.js",
"${input:TEST_FILE}"
],
},
{
"name": "Debug Tests (attach node)",
"type": "node",
"request": "attach",
// hide the individual configurations from the debug dropdown list
"presentation": { "hidden": true },
"port": 9229,
"timeout": 60000,
"cwd": "${workspaceFolder}",
"skipFiles": [
"<node_internals>/**",
"${workspaceFolder}/node_modules/**"
],
},
],
"inputs": [
{
"type": "command",
"id": "NODE_BINARY",
"command": "shellCommand.execute",
"args": {
"description": "path to node",
"command": "which node",
"useFirstResult": true,
}
},
{
"type": "command",
"id": "TEST_FILE",
"command": "shellCommand.execute",
"args": {
"cwd": "${workspaceFolder}/modules/cugraph",
"description": "Select a file to debug",
"command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"",
}
},
],
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/.vscode/tasks.json
|
{
"version": "2.0.0",
"tasks": [
{
"type": "shell",
"label": "Rebuild node_cugraph TS and C++ (slow)",
"group": { "kind": "build", "isDefault": true, },
"command": "if [[ \"${input:CMAKE_BUILD_TYPE}\" == \"Release\" ]]; then yarn rebuild; else yarn rebuild:debug; fi",
"problemMatcher": [
"$tsc",
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
{
"type": "npm",
"group": "build",
"label": "Recompile node_cugraph TS (fast)",
"script": "tsc:build",
"detail": "yarn tsc:build",
"problemMatcher": ["$tsc"],
},
{
"type": "shell",
"group": "build",
"label": "Recompile node_cugraph C++ (fast)",
"command": "ninja -C ${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}",
"problemMatcher": [
{
"owner": "cuda",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 3,
"message": 4,
"regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
{
"owner": "cpp",
"fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"],
"pattern": {
"file": 1,
"line": 2,
"severity": 4,
"message": 5,
"regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$"
}
},
],
},
],
"inputs": [
{
"type": "pickString",
"default": "Release",
"id": "CMAKE_BUILD_TYPE",
"options": ["Release", "Debug"],
"description": "C++ Build Type",
}
]
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/notebooks/Hypergraph.ipynb
|
var {Series, DataFrame} = require("@rapidsai/cudf")
var {hypergraphDirect} = require("@rapidsai/cugraph")var log = new DataFrame({
src_ip: ['128.0.0.1', '128.0.0.1'],
dest_ip: ['172.0.0.2', '172.0.0.3'],
vuln: ['cve-123', 'cve-123']
})var h = hypergraphDirect(log)console.log(h.nodes.toString())console.log(h.edges.toString())console.log(h.entities.toString())
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/graph.ts
|
// Copyright (c) 2022-2023, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Float32Buffer, MemoryView} from '@rapidsai/cuda';
import {DataFrame, DataType, Float32, Int32, scope, Series} from '@rapidsai/cudf';
import {DeviceBuffer} from '@rapidsai/rmm';
import {Graph as CUGraph} from './addon';
import {ForceAtlas2Options, SpectralClusteringOptions} from './node_cugraph';
import {renumberEdges, renumberNodes} from './renumber';
export interface GraphOptions {
directed?: boolean;
}
export class Graph<T extends DataType = any> {
public static fromEdgeList<T extends Series<DataType>>(
src: T,
dst: T,
weights = Series.sequence({type: new Float32, size: src.length, init: 1, step: 0}),
options: GraphOptions = {directed: true}) {
const nodes = renumberNodes(src, dst);
const edges = renumberEdges(src, dst, weights, nodes);
return new Graph<T['type']>(nodes, edges, options);
}
protected constructor(nodes: DataFrame<{id: Int32, node: T}>,
edges: DataFrame<{id: Int32, src: Int32, dst: Int32, weight: Float32}>,
options: GraphOptions = {directed: true}) {
this._edges = edges;
this._nodes = nodes;
this._directed = options?.directed ?? true;
}
declare protected _nodes: DataFrame<{id: Int32, node: T}>;
declare protected _edges: DataFrame<{id: Int32, src: Int32, dst: Int32, weight: Float32}>;
declare protected _directed: boolean;
declare protected _graph: CUGraph;
protected get graph() {
return this._graph || (this._graph = new CUGraph({
src: this._edges.get('src')._col,
dst: this._edges.get('dst')._col,
weight: this._edges.get('weight')._col,
directed: this._directed,
}));
}
/**
* @summary The number of edges in this Graph
*/
public get numEdges() { return this.graph.numEdges(); }
/**
* @summary The number of nodes in this Graph
*/
public get numNodes() { return this.graph.numNodes(); }
public get nodes() { return this._nodes.drop(['id']); }
public get edges() {
const unnumber = (typ: 'src'|'dst') => {
const id = this._edges.get(typ);
const eid = this._edges.get('id');
const lhs = new DataFrame({id, eid, weight: this._edges.get('weight')});
const rhs = this._nodes.rename({node: typ});
return lhs.join({on: ['id'], other: rhs});
};
return scope(() => unnumber('src') //
.join({on: ['eid'], other: unnumber('dst')})
.sortValues({eid: {ascending: true}}),
[this])
.rename({eid: 'id'})
.select(['id', 'src', 'dst', 'weight']);
}
public get nodeIds() { return this._nodes.select(['id']); }
public get edgeIds() { return this._edges.select(['id', 'src', 'dst']); }
public dedupeEdges() {
const src = this.edges.get('src');
const dst = this.edges.get('dst');
const weight = this.edges.get('weight');
return DedupedEdgesGraph.fromEdgeList(src, dst, weight, {directed: this._directed});
}
/**
* @summary Compute the total number of edges incident to a vertex (both in and out edges).
*/
public degree() {
return new DataFrame({vertex: this._nodes.get('id')._col, degree: this.graph.degree()});
}
/**
* @summary ForceAtlas2 is a continuous graph layout algorithm for handy network visualization.
*
* @note Peak memory allocation occurs at 30*V.
*
* @param {ForceAtlas2Options} options
*
* @returns {Float32Buffer} The new positions.
*/
public forceAtlas2(options: ForceAtlas2Options<any> = {positions: undefined}) {
const {numNodes} = this;
let positions: Float32Buffer|void = undefined;
if (options.positions && typeof options.positions === 'object') {
positions = new Float32Buffer(
options.positions instanceof MemoryView && options.positions.buffer || options.positions);
if (positions.length < numNodes * 2) {
// reallocate new positions and copy over old X/Y positions
const p = new Float32Buffer(
new DeviceBuffer(numNodes * 2 * Float32Buffer.BYTES_PER_ELEMENT, options.memoryResource));
if (positions.length > 0) {
const pn = positions.length / 2;
const sx = positions.subarray(0, Math.min(numNodes, pn));
const sy = positions.subarray(pn, pn + Math.min(numNodes, pn));
p.copyFrom(sx, 0, 0).copyFrom(sy, 0, numNodes);
}
positions = p;
}
}
return new Float32Buffer(this.graph.forceAtlas2({...options, positions: positions?.buffer}));
}
}
export interface ClusteringOptions extends SpectralClusteringOptions {
type: 'balanced_cut'|'modularity_maximization';
}
export interface AnalyzeClusteringOptions {
num_clusters: number;
cluster: Series<Int32>;
type: 'modularity'|'edge_cut'|'ratio_cut';
}
export class DedupedEdgesGraph<T extends DataType = any> extends Graph<T> {
public static fromEdgeList<T extends Series<DataType>>(
src: T,
dst: T,
weights = Series.sequence({type: new Float32, size: src.length, init: 1, step: 0}),
options: GraphOptions = {directed: true}) {
return scope(() => {
const ids = new DataFrame({src, dst, id: Series.sequence({size: src.length})})
.groupBy({by: ['src', 'dst'], index_key: 'src_dst'})
.min();
const weight = new DataFrame({src, dst, weights: weights.cast(new Float32)})
.groupBy({by: ['src', 'dst'], index_key: 'src_dst'})
.sum();
const edges = ids.join({on: ['src_dst'], other: weight}).sortValues({id: {ascending: true}});
const dd_src = edges.get('src_dst').getChild('src') as T;
const dd_dst = edges.get('src_dst').getChild('dst') as T;
const rn_nodes = renumberNodes(dd_src, dd_dst);
const rn_edges = renumberEdges(dd_src, dd_dst, edges.get('weights'), rn_nodes);
return new DedupedEdgesGraph<T['type']>(rn_nodes, rn_edges, options);
}, [src, dst, weights]);
}
/**
* @summary Compute a clustering/partitioning of this graph using either the spectral balanced cut
* method, or the spectral modularity maximization method.
*
* @see https://en.wikipedia.org/wiki/Cluster_analysis
* @see https://en.wikipedia.org/wiki/Spectral_clustering
*
* @param {ClusteringOptions} options Options for the clustering method
*/
public computeClusters(options: ClusteringOptions) {
Object.assign(options, {
num_eigen_vecs: Math.min(2, options.num_clusters),
evs_tolerance: 0.00001,
evs_max_iter: 100,
kmean_tolerance: 0.00001,
kmean_max_iter: 100,
});
const cluster = (() => {
switch (options.type) {
case 'balanced_cut': return this.graph.spectralBalancedCutClustering(options);
case 'modularity_maximization':
return this.graph.spectralModularityMaximizationClustering(options);
default: throw new Error(`Unrecognized clustering type "${options.type as string}"`);
}
})();
return new DataFrame({vertex: this._nodes.get('id')._col, cluster});
}
/**
* @summary Compute a score for a given partitioning/clustering. The assumption is
* that `options.clustering` is the results from a call to {@link computeClusters} and
* contains columns named `vertex` and `cluster`.
*
* @param {AnalyzeClusteringOptions} options
*
* @returns {number} The computed clustering score
*/
public analyzeClustering(options: AnalyzeClusteringOptions) {
switch (options.type) {
case 'edge_cut':
return this.graph.analyzeEdgeCutClustering(options.num_clusters, options.cluster._col);
case 'ratio_cut':
return this.graph.analyzeRatioCutClustering(options.num_clusters, options.cluster._col);
case 'modularity':
return this.graph.analyzeModularityClustering(options.num_clusters, options.cluster._col);
default: throw new Error(`Unrecognized clustering type "${options.type as string}"`);
}
}
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/graph.cpp
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cugraph/graph.hpp>
#include <node_cugraph/utilities/error.hpp>
#include <node_cudf/table.hpp>
#include <node_cudf/utilities/dtypes.hpp>
#include <node_cuda/utilities/error.hpp>
#include <node_cuda/utilities/napi_to_cpp.hpp>
#include <cugraph/legacy/functions.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/filling.hpp>
#include <cudf/reduction.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/device_buffer.hpp>
#include <napi.h>
namespace nv {
namespace {
template <cudf::type_id type>
Column::wrapper_t get_col(NapiToCPP::Object opts, std::string const& name) {
auto env = opts.Env();
auto val = opts.Get(name);
if (val.IsEmpty() || val.IsNull() || val.IsUndefined()) {
NODE_CUGRAPH_THROW("Graph requires `" + name + "` to be a Column");
}
NODE_CUGRAPH_EXPECT(Column::IsInstance(val), "Graph requires `" + name + "` to be a Column", env);
Column::wrapper_t col = val.As<Napi::Object>();
NODE_CUGRAPH_EXPECT(col->type().id() == type,
"Graph requires `" + name + "` to be a Column of " +
cudf::type_dispatcher(cudf::data_type{type}, cudf::type_to_name{}),
env);
return col;
}
template <typename vertex_t, typename edge_t, typename weight_t>
std::tuple<Column::wrapper_t, Column::wrapper_t, Column::wrapper_t> coo_to_csr(
Napi::Env env,
Column::wrapper_t const& src,
Column::wrapper_t const& dst,
Column::wrapper_t const& e_weights,
int32_t const num_nodes,
int32_t const num_edges) {
auto csr = cugraph::coo_to_csr(cugraph::legacy::GraphCOOView<vertex_t, edge_t, weight_t>(
src->mutable_view().begin<edge_t>(),
dst->mutable_view().begin<edge_t>(),
e_weights->mutable_view().begin<weight_t>(),
num_nodes,
num_edges))
->release();
auto csr_col = [&](cudf::type_id type_id, rmm::device_buffer& data) {
auto type = cudf::data_type{type_id};
auto size = data.size() / cudf::size_of(type);
return Column::New(env, std::make_unique<cudf::column>(type, size, std::move(data)));
};
auto offsets = csr_col(cudf::type_to_id<edge_t>(), *csr.offsets.release());
auto indices = csr_col(cudf::type_to_id<vertex_t>(), *csr.indices.release());
auto a_weights = csr_col(cudf::type_to_id<weight_t>(), *csr.edge_data.release());
return std::make_tuple(offsets, indices, a_weights);
}
} // namespace
Napi::Function Graph::Init(Napi::Env const& env, Napi::Object exports) {
return DefineClass(
env,
"Graph",
{
InstanceMethod<&Graph::num_edges>("numEdges"),
InstanceMethod<&Graph::num_nodes>("numNodes"),
InstanceMethod<&Graph::force_atlas2>("forceAtlas2"),
InstanceMethod<&Graph::degree>("degree"),
InstanceMethod<&Graph::spectral_modularity_maximization_clustering>(
"spectralModularityMaximizationClustering"),
InstanceMethod<&Graph::spectral_balanced_cut_clustering>("spectralBalancedCutClustering"),
InstanceMethod<&Graph::analyze_modularity_clustering>("analyzeModularityClustering"),
InstanceMethod<&Graph::analyze_edge_cut_clustering>("analyzeEdgeCutClustering"),
InstanceMethod<&Graph::analyze_ratio_cut_clustering>("analyzeRatioCutClustering"),
});
}
Graph::wrapper_t Graph::New(Napi::Env const& env,
Column::wrapper_t const& src,
Column::wrapper_t const& dst) {
return EnvLocalObjectWrap<Graph>::New(env, src, dst);
}
Graph::Graph(CallbackArgs const& args) : EnvLocalObjectWrap<Graph>(args) {
NapiToCPP::Object opts = args[0];
directed_edges_ = opts.Get("directed").ToBoolean();
src_ = Napi::Persistent(get_col<cudf::type_id::INT32>(opts, "src"));
dst_ = Napi::Persistent(get_col<cudf::type_id::INT32>(opts, "dst"));
e_weights_ = Napi::Persistent(get_col<cudf::type_id::FLOAT32>(opts, "weight"));
}
int32_t Graph::num_nodes() {
if (!node_count_computed_) {
node_count_ =
1 + std::max(src_.Value()->minmax().second->get_value().ToNumber().Int32Value(), //
dst_.Value()->minmax().second->get_value().ToNumber().Int32Value());
node_count_computed_ = true;
}
return node_count_;
}
int32_t Graph::num_edges() {
if (!edge_count_computed_) {
auto const& src = *src_.Value();
auto const& dst = *dst_.Value();
edge_count_ = directed_edges_ ? src.size() : src[src >= dst]->size();
edge_count_computed_ = true;
}
return edge_count_;
}
cugraph::legacy::GraphCOOView<int32_t, int32_t, float> Graph::coo_view() {
return cugraph::legacy::GraphCOOView<int32_t, int32_t, float>(
src_.Value()->mutable_view().begin<int32_t>(),
dst_.Value()->mutable_view().begin<int32_t>(),
e_weights_.Value()->mutable_view().begin<float>(),
num_nodes(),
num_edges());
}
cugraph::legacy::GraphCSRView<int32_t, int32_t, float> Graph::csr_view() {
if (offsets_.IsEmpty()) {
auto csr = coo_to_csr<int32_t, int32_t, float>(
Env(), src_.Value(), dst_.Value(), e_weights_.Value(), num_nodes(), num_edges());
offsets_ = Napi::Persistent(std::move(std::get<0>(csr)));
indices_ = Napi::Persistent(std::move(std::get<1>(csr)));
a_weights_ = Napi::Persistent(std::move(std::get<2>(csr)));
}
return cugraph::legacy::GraphCSRView<int32_t, int32_t, float>(
offsets_.Value()->mutable_view().begin<int32_t>(),
indices_.Value()->mutable_view().begin<int32_t>(),
a_weights_.Value()->mutable_view().begin<float>(),
num_nodes(),
num_edges());
}
Napi::Value Graph::num_nodes(Napi::CallbackInfo const& info) {
return Napi::Value::From(info.Env(), num_nodes());
}
Napi::Value Graph::num_edges(Napi::CallbackInfo const& info) {
return Napi::Value::From(info.Env(), num_edges());
}
Napi::Value Graph::degree(Napi::CallbackInfo const& info) {
auto degree = Column::zeros(info.Env(), cudf::type_id::INT32, num_nodes());
coo_view().degree(degree->mutable_view().begin<int32_t>(),
cugraph::legacy::DegreeDirection::IN_PLUS_OUT);
return degree;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/index.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export * as addon from './addon';
export {DedupedEdgesGraph, Graph} from './graph';
export {hypergraph, hypergraphDirect} from './hypergraph';
export {renumberEdges, renumberNodes} from './renumber';
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/renumber.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {DataFrame, DataType, Float32, Int32, scope, Series} from '@rapidsai/cudf';
type Nodes<TNode extends DataType> = DataFrame<{id: Int32, node: TNode}>;
type Edges<TSource extends DataType, TTarget extends DataType> =
DataFrame<{idx: Int32, src: TSource, dst: TTarget, weight: Float32}>;
export function renumberNodes<TNode extends DataType>(src: Series<TNode>, dst: Series<TNode>) {
return scope(() => {
const node = src.concat(dst).unique();
const id = node.encodeLabels(undefined, new Int32);
return new DataFrame<{id: Int32, node: TNode}>({id, node: node as any}) //
.sortValues({id: {ascending: true}});
}, [src, dst]);
}
export function renumberEdges<TNode extends DataType>(
src: Series<TNode>, dst: Series<TNode>, weight: Series<Float32>, nodes: Nodes<TNode>) {
return scope(() => {
const idx = Series.sequence({size: src.length});
const edges = new DataFrame<{idx: Int32, src: TNode, dst: TNode, weight: Float32}>(
{idx, src: src as any, dst: dst as any, weight});
return renumberTargets(renumberSources(edges, nodes), nodes)
.sortValues({idx: {ascending: true}})
.rename({idx: 'id'})
.select(['id', 'src', 'dst', 'weight']);
}, [src, dst]);
}
function renumberSources<TSource extends DataType, TTarget extends DataType>(
edges: Edges<TSource, TTarget>, nodes: Nodes<TSource>) {
return scope(() => {
const src = edges.assign({node: edges.get('src')})
.join({other: nodes, on: ['node']})
.sortValues({idx: {ascending: true}})
.get('id');
return edges.assign({src});
}, [edges, nodes]);
}
function renumberTargets<TSource extends DataType, TTarget extends DataType>(
edges: Edges<Int32, TTarget>, nodes: Nodes<TSource>) {
return scope(() => {
const dst = edges.assign({node: edges.get('dst')})
.join({other: nodes, on: ['node']})
.sortValues({idx: {ascending: true}})
.get('id');
return edges.assign({dst});
}, [edges, nodes]);
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph.ts
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Memory, MemoryData, MemoryView} from '@rapidsai/cuda';
import {Column, Float32, Int32} from '@rapidsai/cudf';
import {DeviceBuffer, MemoryResource} from '@rapidsai/rmm';
/** @ignore */
export declare const _cpp_exports: any;
export declare class Graph {
constructor(props: {
src: Column<Int32>,
dst: Column<Int32>,
weight: Column<Float32>,
directed?: boolean,
});
/**
* @summary The number of edges in this Graph
*/
numEdges(): number;
/**
* @summary The number of nodes in this Graph
*/
numNodes(): number;
/**
* @summary ForceAtlas2 is a continuous graph layout algorithm for handy network visualization.
*
* @note Peak memory allocation occurs at 30*V.
*
* @param {ForceAtlas2Options} options
*
* @returns {DeviceBuffer} The new positions.
*/
forceAtlas2<T extends ForceAtlas2Options<Memory>>(options: T): T['positions'];
forceAtlas2<T extends ForceAtlas2Options<MemoryView>>(options: T): T['positions'];
forceAtlas2<T extends ForceAtlas2Options<MemoryData|DeviceBuffer|void>>(options: T): DeviceBuffer;
/**
* @summary Compute the total number of edges incident to a vertex (both in and out edges).
*/
degree(): Column<Int32>;
/**
* @summary Compute a clustering/partitioning of the given graph using the spectral modularity
* maximization method.
*
* @param {SpectralClusteringOptions} options
*/
spectralModularityMaximizationClustering(options: SpectralClusteringOptions): Column<Int32>;
/**
* @summary Compute a clustering/partitioning of the given graph using the spectral balanced cut
* method.
*
* @param {SpectralClusteringOptions} options
*/
spectralBalancedCutClustering(options: SpectralClusteringOptions): Column<Int32>;
/**
* @summary Compute the modularity score for a given partitioning/clustering. The assumption is
* that "clustering" is the results from a call from a special clustering algorithm and contains
* columns named "vertex" and "cluster".
*
* @param {number} num_clusters The number of clusters.
* @param {Column<Int32>} clusters The Column of cluster ids.
*
* @returns {number} The computed modularity score
*/
analyzeModularityClustering(num_clusters: number, clusters: Column<Int32>): number;
/**
* @summary Compute the edge cut score for a partitioning/clustering The assumption is that
* "clustering" is the results from a call from a special clustering algorithm and contains
* columns named "vertex" and "cluster".
*
* @param {number} num_clusters The number of clusters.
* @param {Column<Int32>} clusters The Column of cluster ids.
*
* @returns {number} The computed edge cut score
*/
analyzeEdgeCutClustering(num_clusters: number, clusters: Column<Int32>): number;
/**
* @summary Compute the ratio cut score for a partitioning/clustering.
*
* @param {number} num_clusters The number of clusters.
* @param {Column<Int32>} clusters The Column of cluster ids.
*
* @returns {number} The computed ratio cut score
*/
analyzeRatioCutClustering(num_clusters: number, clusters: Column<Int32>): number;
}
export interface ForceAtlas2Options<TPositions = void> {
/**
* Optional buffer of initial vertex positions.
*/
positions: TPositions;
/**
* The maximum number of levels/iterations of the Force Atlas algorithm. When specified the
* algorithm will terminate after no more than the specified number of iterations. No error occurs
* when the algorithm terminates in this manner. Good short-term quality can be achieved with
* 50-100 iterations. Above 1000 iterations is discouraged.
*/
numIterations?: number;
/**
* Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the
* borders.
*/
outboundAttraction?: boolean;
/**
* Switch Force Atlas model from lin-lin to lin-log. Makes clusters more tight.
*/
linLogMode?: boolean;
/**
* Prevent nodes from overlapping.
*/
// preventOverlap?: boolean, ///< not implemented in cuGraph yet
/**
* How much influence you give to the edges weight. 0 is "no influence" and 1 is "normal".
*/
edgeWeightInfluence?: number;
/**
* How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision.
*/
jitterTolerance?: number;
/**
* Float between 0 and 1. Tradeoff for speed (1) vs accuracy (0).
*/
barnesHutTheta?: number;
/**
* How much repulsion you want. More makes a more sparse graph. Switching from regular mode to
* LinLog mode needs a readjustment of the scaling parameter.
*/
scalingRatio?: number;
/**
* Sets a force that attracts the nodes that are distant from the center more. It is so strong
* that it can sometimes dominate other forces.
*/
strongGravityMode?: boolean;
/**
* Attracts nodes to the center. Prevents islands from drifting away.
*/
gravity?: number;
/**
* Output convergence info at each interation.
*/
verbose?: boolean;
memoryResource?: MemoryResource;
}
export interface SpectralClusteringOptions {
/**
* @summary Specifies the number of clusters to find
*/
num_clusters: number;
/**
* @summary Specifies the number of eigenvectors to use. Must be less than or equal to
* `num_clusters`. Default is 2.
*/
num_eigen_vecs?: number;
/**
* @summary Specifies the tolerance to use in the eigensolver. Default is 0.00001.
*/
evs_tolerance?: number;
/**
* @summary Specifies the maximum number of iterations for the eigensolver. Default is 100.
*/
evs_max_iter?: number;
/**
* @summary Specifies the tolerance to use in the k-means solver. Default is 0.00001.
*/
kmean_tolerance?: number;
/**
* @summary Specifies the maximum number of iterations for the k-means solver. Default is 100.
*/
kmean_max_iter?: number;
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/hypergraph.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {
Categorical,
DataFrame,
Int32,
Series,
StringSeries,
TypeMap,
Utf8String
} from '@rapidsai/cudf';
import {Graph} from './graph';
export type HypergraphBaseProps<T extends TypeMap = any> = {
/** An optional sequence of column names to process. */
columns?: readonly(keyof T&string)[],
/** If True, do not include null values in the graph. */
dropNulls?: boolean;
/**
Dictionary mapping column names to distinct categories. If the same
value appears columns mapped to the same category, the transform will
generate one node for it, instead of one for each column.
*/
categories?: {[key: string]: string};
/** If True, exclude each row's attributes from its edges (default: False) */
dropEdgeAttrs?: boolean;
/** A sequence of column names not to transform into nodes. */
skip?: readonly(keyof T&string)[],
/** The delimiter to use when joining column names, categories, and ids. */
delim?: string;
/** The name to use as the node id column in the graph and node DataFrame. */
nodeId?: string;
/** The name to use as the event id column in the graph and node DataFrames. */
eventId?: string;
/** The name to use as the category column in the graph and DataFrames */
category?: string;
/** The name to use as the edge type column in the graph and edge DataFrame */
edgeType?: string;
/** The name to use as the node type column in the graph and node DataFrames. */
nodeType?: string;
}
export type HypergraphProps<T extends TypeMap = any> = HypergraphBaseProps<T>&{
/** The name to use as the category column in the graph and DataFrames */
attribId?: string;
}
export type HypergraphDirectProps<T extends TypeMap = any> = HypergraphBaseProps<T>&{
/** Select column pairs instead of making all edges. */
edgeShape?: {[key: string]: string[]};
/** The name to use as the source column in the graph and edge DataFrame. */
source?: string;
/** The name to use as the target column in the graph and edge DataFrame. */
target?: string;
}
export type HypergraphReturn = {
/** A DataFrame of found entity and hyper node attributes. */
nodes: DataFrame,
/** A DataFrame of edge attributes. */
edges: DataFrame,
/** Graph of the found entity nodes, hyper nodes, and edges. */
graph: Graph,
/** a DataFrame of hyper node attributes for direct graphs, else empty. */
events: DataFrame,
/** A DataFrame of the found entity node attributes. */
entities: DataFrame,
}
/**
* Creates a hypergraph out of the given dataframe, returning the graph components as dataframes.
*
* The transform reveals relationships between the rows and unique values. This transform is useful
* for lists of events, samples, relationships, and other structured high-dimensional data. The
* transform creates a node for every row, and turns a row's column entries into node attributes.
* Every unique value within a column is also turned into a node.
*
* Edges are added to connect a row's nodes to each of its column nodes. Nodes are given the
* attribute specified by ``nodeType`` that corresponds to the originating column name, or if a row
* ``eventId``.
*
* Consider a list of events. Each row represents a distinct event, and each column some metadata
* about an event. If multiple events have common metadata, they will be transitively connected
* through those metadata values. Conversely, if an event has unique metadata, the unique metadata
* will turn into nodes that only have connections to the event node. For best results, set
* ``eventId`` to a row's unique ID, ``skip`` to all non-categorical columns (or ``columns`` to all
* categorical columns), and ``categories`` to group columns with the same kinds of values.
*/
export function hypergraph<T extends TypeMap = any>(values: DataFrame<T>, {
columns = values.names,
dropNulls = true,
categories = {},
dropEdgeAttrs = false,
skip = [],
delim = '::',
nodeId = 'node_id',
eventId = 'event_id',
attribId = 'attrib_id',
category = 'category',
edgeType = 'edge_type',
nodeType = 'node_type',
}: HypergraphProps<T> = {}): HypergraphReturn {
const computed_columns = _compute_columns(columns, skip);
const initial_events =
_create_events(values, computed_columns, delim, dropNulls, eventId, nodeType);
const entities = _create_entity_nodes(
initial_events, computed_columns, dropNulls, categories, delim, nodeId, category, nodeType);
const edges = _create_hyper_edges(initial_events,
computed_columns,
dropNulls,
categories,
dropEdgeAttrs,
delim,
eventId,
attribId,
category,
edgeType,
nodeType);
const events = _create_hyper_nodes(initial_events, nodeId, eventId, category, nodeType);
const nodes = entities.concat(events);
const graph = Graph.fromEdgeList(edges.get(attribId), edges.get(eventId));
return {nodes, edges, events, entities, graph};
}
/**
* Creates a hypergraph out of the given dataframe, returning the graph components as dataframes.
*
* The transform reveals relationships between the rows and unique values. This transform is useful
* for lists of events, samples, relationships, and other structured high-dimensional data. The
* transform creates a node for every row, and turns a row's column entries into node attributes.
* Every unique value within a column is also turned into a node.
*
* Edges are added to connect a row's nodes to one another. Nodes are given the attribute specified
* by ``nodeType`` that corresponds to the originating column name, or if a row ``eventId``.
*
* Consider a list of events. Each row represents a distinct event, and each column some metadata
* about an event. If multiple events have common metadata, they will be transitively connected
* through those metadata values. Conversely, if an event has unique metadata, the unique metadata
* will turn into nodes that only have connections to the event node. For best results, set
* ``eventId`` to a row's unique ID, ``skip`` to all non-categorical columns (or ``columns`` to all
* categorical columns), and ``categories`` to group columns with the same kinds of values.
*/
export function hypergraphDirect<T extends TypeMap = any>(values: DataFrame<T>, {
columns = values.names,
categories = {},
dropNulls = true,
edgeShape = {},
dropEdgeAttrs = false,
skip = [],
delim = '::',
source = 'src',
target = 'dst',
nodeId = 'node_id',
eventId = 'event_id',
category = 'category',
edgeType = 'edge_type',
nodeType = 'node_type',
}: HypergraphDirectProps = {}): HypergraphReturn {
const computed_columns = _compute_columns(columns, skip);
const initial_events =
_create_events(values, computed_columns, delim, dropNulls, eventId, nodeType);
const entities = _create_entity_nodes(
initial_events, computed_columns, dropNulls, categories, delim, nodeId, category, nodeType);
const edges = _create_direct_edges(initial_events,
computed_columns,
dropNulls,
categories,
edgeShape,
dropEdgeAttrs,
delim,
source,
target,
eventId,
category,
edgeType,
nodeType);
const events = new DataFrame({});
const nodes = entities;
const graph = Graph.fromEdgeList(edges.get(source), edges.get(target));
return {nodes, edges, events, entities, graph};
}
function _compute_columns<T extends TypeMap = any>(columns: readonly(keyof T & string)[],
skip: readonly(keyof T & string)[]) {
const result: string[] = [];
for (const name of columns) {
if (!skip.includes(name)) { result.push(name); }
}
result.sort();
return result;
}
function _create_events(values: DataFrame,
columns: string[],
delim: string,
dropNulls: boolean,
eventId: string,
nodeType: string) {
const series_map: {[key: string]: any} = {};
for (const name of columns) { series_map[name] = values.get(name); }
if (!(eventId in series_map)) {
series_map[eventId] =
Series.sequence({type: new Int32, init: 0, step: 1, size: values.numRows});
}
series_map[eventId] = _prepend_str(series_map[eventId], eventId, delim);
series_map[nodeType] = _scalar_init('event', series_map[eventId].length);
if (!dropNulls) {
for (const name of columns) {
const col = series_map[name];
if (col instanceof StringSeries) { series_map[name] = col.replaceNulls('null'); }
}
}
return new DataFrame(series_map);
}
function _create_entity_nodes(events: DataFrame,
columns: string[],
dropNulls: boolean,
categories: {[key: string]: string},
delim: string,
nodeId: string,
category: string,
nodeType: string) {
const node_dfs: DataFrame[] = [];
for (const name of columns) {
const cat = name in categories ? categories[name] : name;
let col = events.get(name);
col = col.unique();
col = dropNulls ? col.dropNulls() : col;
if (col.length == 0) { continue; }
const df = new DataFrame({
[name]: col,
[nodeId]: _prepend_str(col, cat, delim),
[category]: _scalar_init(cat, col.length),
[nodeType]: _scalar_init(name, col.length),
});
node_dfs.push(df);
}
const nodes = new DataFrame()
.concat(...node_dfs)
.dropDuplicates('first', true, true, [nodeId])
.select(columns.concat([nodeId, nodeType, category]));
return nodes;
}
function _create_hyper_nodes(
events: DataFrame, nodeId: string, eventId: string, category: string, nodeType: string) {
const series_map: {[key: string]: any} = {};
for (const name of events.names) { series_map[name] = events.get(name); }
series_map[nodeType] = _scalar_init(eventId, events.numRows);
series_map[category] = _scalar_init('event', events.numRows);
series_map[nodeId] = series_map[eventId];
return new DataFrame(series_map);
}
function _create_hyper_edges(events: DataFrame,
columns: string[],
dropNulls: boolean,
categories: {[key: string]: string},
dropEdgeAttrs: boolean,
delim: string,
eventId: string,
attribId: string,
category: string,
edgeType: string,
nodeType: string) {
const edge_attrs = events.names.filter(name => name != nodeType);
const edge_dfs: DataFrame[] = [];
for (const name of columns) {
const cat = name in categories ? categories[name] : name;
const fs = dropEdgeAttrs ? [eventId, name] : [eventId, ...edge_attrs];
let df: DataFrame = dropNulls ? events.select(fs).dropNulls(0, 1, [name]) : events.select(fs);
if (df.numRows == 0) { continue; }
df = df.assign({
[edgeType]: _scalar_init(cat, df.numRows),
[attribId]: _prepend_str(df.get(name), cat, delim),
});
if (Object.keys(categories).length > 0) {
df = df.assign({[category]: _scalar_init(name, df.numRows)});
}
edge_dfs.push(df);
}
const cols = [eventId, edgeType, attribId];
if (Object.keys(categories).length > 0) { cols.push(category); }
if (!dropEdgeAttrs) { cols.push(...edge_attrs); }
return new DataFrame().concat(...edge_dfs).select(cols);
}
function _create_direct_edges(events: DataFrame,
columns: string[],
dropNulls: boolean,
categories: {[key: string]: string},
edgeShape: {[key: string]: string[]},
dropEdgeAttrs: boolean,
delim: string,
source: string,
target: string,
eventId: string,
category: string,
edgeType: string,
nodeType: string) {
if (Object.keys(edgeShape).length == 0) {
columns.forEach((value, index) => edgeShape[value] = columns.slice(index + 1));
}
const edge_attrs = events.names.filter(name => name != nodeType);
const edge_dfs: DataFrame[] = [];
for (const key1 of Object.keys(edgeShape).sort()) {
const cat1 = key1 in categories ? categories[key1] : key1;
for (const key2 of edgeShape[key1].sort()) {
const cat2 = key2 in categories ? categories[key2] : key2;
const fs = dropEdgeAttrs ? [eventId, key1, key2] : [eventId, ...edge_attrs];
let df: DataFrame =
dropNulls ? events.select(fs).dropNulls(0, 2, [key1, key2]) : events.select(fs);
if (df.numRows == 0) { continue; }
if (Object.keys(categories).length > 0) {
df = df.assign({[category]: _scalar_init(key1 + delim + key2, df.numRows)});
}
df = df.assign({
[edgeType]: _scalar_init(cat1 + delim + cat2, df.numRows),
[source]: _prepend_str(df.get(key1), cat1, delim),
[target]: _prepend_str(df.get(key2), cat2, delim),
});
edge_dfs.push(df);
}
}
const cols = [eventId, edgeType, source, target];
if (Object.keys(categories).length > 0) { cols.push(category); }
if (!dropEdgeAttrs) { cols.push(...edge_attrs); }
return new DataFrame().concat(...edge_dfs).select(cols);
}
function _prepend_str(series: Series, val: string, delim: string) {
const prefix = val + delim;
const suffix = series.cast(new Categorical(new Utf8String));
const codes = suffix.codes;
const categories =
Series.new(suffix.categories.replaceNulls('null')._col.replaceSlice(prefix, 0, 0));
return Series.new({type: suffix.type, length: codes.length, children: [codes, categories]});
}
function _scalar_init(val: string, size: number): Series<Utf8String> {
return Series.new([val]).gather(Series.sequence({size, step: 0}), false);
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/addon.cpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "node_cugraph/graph.hpp"
#include <nv_node/addon.hpp>
struct rapidsai_cugraph : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_cugraph> {
rapidsai_cugraph(Napi::Env const& env, Napi::Object exports) : nv::EnvLocalAddon(env, exports) {
DefineAddon(exports,
{
InstanceMethod("init", &rapidsai_cugraph::InitAddon),
InstanceValue("_cpp_exports", _cpp_exports.Value()),
InstanceValue("Graph", InitClass<nv::Graph>(env, exports)),
});
}
};
NODE_API_ADDON(rapidsai_cugraph);
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/addon.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-redeclare */
import {addon as CORE} from '@rapidsai/core';
import {addon as CUDA} from '@rapidsai/cuda';
import {addon as CUDF} from '@rapidsai/cudf';
import {addon as RMM} from '@rapidsai/rmm';
export const {
Graph,
_cpp_exports,
} = require('bindings')('rapidsai_cugraph.node').init(CORE, CUDA, RMM, CUDF) as
typeof import('./node_cugraph');
export type Graph = import('./node_cugraph').Graph;
| 0 |
rapidsai_public_repos/node/modules/cugraph/src
|
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph/graph.hpp
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <node_cugraph/cugraph/legacy/graph.hpp>
#include <node_cudf/column.hpp>
#include <node_rmm/device_buffer.hpp>
#include <node_rmm/memory_resource.hpp>
#include <nv_node/objectwrap.hpp>
#include <nv_node/utilities/args.hpp>
#include <napi.h>
namespace nv {
struct Graph : public EnvLocalObjectWrap<Graph> {
/**
* @brief Initialize and export the Graph JavaScript constructor and prototype.
*
* @param env The active JavaScript environment.
* @param exports The exports object to decorate.
* @return Napi::Function The Graph constructor function.
*/
static Napi::Function Init(Napi::Env const& env, Napi::Object exports);
/**
* @brief Construct a new Graph instance from C++.
*
* @param src The source node indices for edges
* @param dst The destination node indices for edges
*/
static wrapper_t New(Napi::Env const& env,
Column::wrapper_t const& src,
Column::wrapper_t const& dst);
/**
* @brief Construct a new Graph instance from JavaScript.
*/
Graph(CallbackArgs const& args);
/**
* @brief Get the number of edges in the graph
*
*/
int32_t num_edges();
/**
* @brief Get the number of nodes in the graph
*
*/
int32_t num_nodes();
/**
* @brief Get a non-owning view of the Graph in COO format
*
*/
cugraph::legacy::GraphCOOView<int32_t, int32_t, float> coo_view();
/**
* @brief Get a non-owning view of the Graph in CSR format
*
*/
cugraph::legacy::GraphCSRView<int32_t, int32_t, float> csr_view();
/**
* @brief Conversion operator to get a non-owning view of the Graph in COO format
*
*/
inline operator cugraph::legacy::GraphCOOView<int32_t, int32_t, float>() { return coo_view(); }
/**
* @brief Conversion operator to get a non-owning view of the Graph in CSR format
*
*/
inline operator cugraph::legacy::GraphCSRView<int32_t, int32_t, float>() { return csr_view(); }
private:
bool directed_edges_{false};
cudf::size_type edge_count_{};
bool edge_count_computed_{false};
cudf::size_type node_count_{};
bool node_count_computed_{false};
// edge list columns
Napi::Reference<Column::wrapper_t> src_;
Napi::Reference<Column::wrapper_t> dst_;
Napi::Reference<Column::wrapper_t> e_weights_;
// adjacency list columns
Napi::Reference<Column::wrapper_t> offsets_;
Napi::Reference<Column::wrapper_t> indices_;
Napi::Reference<Column::wrapper_t> a_weights_;
Napi::Value num_edges(Napi::CallbackInfo const& info);
Napi::Value num_nodes(Napi::CallbackInfo const& info);
Napi::Value degree(Napi::CallbackInfo const& info);
// layout/force_atlas2.cpp
Napi::Value force_atlas2(Napi::CallbackInfo const& info);
// community/spectral_clustering.cpp
Napi::Value spectral_balanced_cut_clustering(Napi::CallbackInfo const& info);
Napi::Value spectral_modularity_maximization_clustering(Napi::CallbackInfo const& info);
Napi::Value analyze_modularity_clustering(Napi::CallbackInfo const& info);
Napi::Value analyze_edge_cut_clustering(Napi::CallbackInfo const& info);
Napi::Value analyze_ratio_cut_clustering(Napi::CallbackInfo const& info);
};
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph/cugraph/algorithms.hpp
|
// Copyright (c) 2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef CUDA_TRY
#undef CUDA_TRY
#endif
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#include <cugraph/algorithms.hpp>
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#ifdef CUDA_TRY
#undef CUDA_TRY
#endif
| 0 |
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph/cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph/cugraph/legacy/graph.hpp
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef CUDA_TRY
#undef CUDA_TRY
#endif
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#include <cugraph/legacy/graph.hpp>
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#ifdef CUDA_TRY
#undef CUDA_TRY
#endif
| 0 |
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph
|
rapidsai_public_repos/node/modules/cugraph/src/node_cugraph/utilities/error.hpp
|
// Copyright (c) 2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#include <cugraph/utilities/error.hpp>
#ifdef CHECK_CUDA
#undef CHECK_CUDA
#endif
#include <napi.h>
namespace nv {
inline cugraph::logic_error cugraphError(std::string const& message,
std::string const& file,
uint32_t line) {
return cugraph::logic_error("cuGraph failure:\n" + message + "\n at " + file + ":" +
std::to_string(line));
}
inline Napi::Error cugraphError(std::string const& message,
std::string const& file,
uint32_t line,
Napi::Env const& env) {
return Napi::Error::New(env, cugraphError(message, file, line).what());
}
} // namespace nv
#ifndef NODE_CUGRAPH_EXPECT
#define NODE_CUGRAPH_EXPECT(expr, message, ...) \
do { \
if (!(expr)) NAPI_THROW(nv::cugraphError(message, __FILE__, __LINE__, ##__VA_ARGS__)); \
} while (0)
#endif
#ifndef NODE_CUGRAPH_THROW
#define NODE_CUGRAPH_THROW(message, ...) \
NAPI_THROW(nv::cugraphError(message, __FILE__, __LINE__, ##__VA_ARGS__))
#endif
/**
* @brief Error checking macro for CUDA runtime API functions.
*
* Invokes a CUDA runtime API function call, if the call does not return
* cudaSuccess, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred.
*
**/
#ifndef NODE_CUGRAPH_TRY
#define NODE_CUGRAPH_TRY(expr, ...) \
do { \
cudaError_t const status = (expr); \
if (status != cudaSuccess) { \
cudaGetLastError(); \
NODE_CUGRAPH_THROW(status, ##__VA_ARGS__); \
} \
} while (0)
#endif
| 0 |
rapidsai_public_repos/node/modules/cugraph/src
|
rapidsai_public_repos/node/modules/cugraph/src/layout/force_atlas2.cpp
|
// Copyright (c) 2021-2023, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cugraph/cugraph/algorithms.hpp>
#include <node_cugraph/graph.hpp>
#include <node_cudf/utilities/buffer.hpp>
#include <node_rmm/device_buffer.hpp>
namespace nv {
namespace {
int get_int(NapiToCPP const& opt, int const default_val) {
return opt.IsNumber() ? opt.operator int() : default_val;
}
bool get_bool(NapiToCPP const& opt, bool const default_val) {
return opt.IsBoolean() ? opt.operator bool() : default_val;
}
float get_float(NapiToCPP const& opt, float const default_val) {
return opt.IsNumber() ? opt.operator float() : default_val;
}
} // namespace
Napi::Value Graph::force_atlas2(Napi::CallbackInfo const& info) {
auto env = info.Env();
CallbackArgs const args{info};
NapiToCPP::Object options = args[0];
auto mr = MemoryResource::IsInstance(options.Get("memoryResource"))
? MemoryResource::wrapper_t(options.Get("memoryResource"))
: MemoryResource::Current(env);
auto max_iter = get_int(options.Get("numIterations"), 1);
auto outbound_attraction = get_bool(options.Get("outboundAttraction"), true);
auto lin_log_mode = get_bool(options.Get("linLogMode"), false);
auto prevent_overlapping = get_bool(options.Get("preventOverlap"), false);
auto edge_weight_influence = get_float(options.Get("edgeWeightInfluence"), 1.0);
auto jitter_tolerance = get_float(options.Get("jitterTolerance"), 1.0);
auto barnes_hut_theta = get_float(options.Get("barnesHutTheta"), 0.5);
auto scaling_ratio = get_float(options.Get("scalingRatio"), 2.0);
auto strong_gravity_mode = get_bool(options.Get("strongGravityMode"), false);
auto gravity = get_float(options.Get("gravity"), 1.0);
auto verbose = get_bool(options.Get("verbose"), false);
size_t positions_offset{0};
float* x_positions{nullptr};
float* y_positions{nullptr};
Napi::Object positions;
bool positions_is_device_memory_wrapper{false};
auto is_device_memory = [&](Napi::Value const& data) -> bool {
return data.IsObject() and //
data.As<Napi::Object>().Has("ptr") and //
data.As<Napi::Object>().Get("ptr").IsNumber();
};
auto is_device_memory_wrapper = [&](Napi::Value const& data) -> bool {
return data.IsObject() and //
data.As<Napi::Object>().Has("buffer") and //
data.As<Napi::Object>().Get("buffer").IsObject() and //
is_device_memory(data.As<Napi::Object>().Get("buffer"));
};
auto get_device_memory_ptr = [&](Napi::Object const& buffer) -> float* {
return reinterpret_cast<float*>(buffer.Get("ptr").ToNumber().Int64Value()) + positions_offset;
};
if (options.Has("positions") && options.Get("positions").IsObject()) {
positions = options.Get("positions");
if (is_device_memory_wrapper(positions)) {
positions_is_device_memory_wrapper = true;
if (positions.Has("byteOffset")) {
auto val = positions.Get("byteOffset");
if (val.IsNumber()) {
positions_offset = val.ToNumber().Int64Value();
} else if (val.IsBigInt()) {
bool lossless{false};
positions_offset = val.As<Napi::BigInt>().Uint64Value(&lossless);
}
}
positions = positions.Get("buffer").ToObject();
} else if (!is_device_memory(positions)) {
positions = data_to_devicebuffer(env, positions, cudf::data_type{cudf::type_id::FLOAT32}, mr);
}
x_positions = get_device_memory_ptr(positions);
y_positions = x_positions + num_nodes();
} else {
positions =
DeviceBuffer::New(env,
std::make_unique<rmm::device_buffer>(
num_nodes() * 2 * sizeof(float), rmm::cuda_stream_default, *mr));
}
auto graph = this->coo_view();
try {
cugraph::force_atlas2({rmm::cuda_stream_default},
graph,
get_device_memory_ptr(positions),
max_iter,
x_positions,
y_positions,
outbound_attraction,
lin_log_mode,
prevent_overlapping,
edge_weight_influence,
jitter_tolerance,
true,
barnes_hut_theta,
scaling_ratio,
strong_gravity_mode,
gravity,
verbose);
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return positions_is_device_memory_wrapper ? options.Get("positions").As<Napi::Object>()
: positions;
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cugraph/src
|
rapidsai_public_repos/node/modules/cugraph/src/community/spectral_clustering.cpp
|
// Copyright (c) 2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <node_cugraph/cugraph/algorithms.hpp>
#include <node_cugraph/graph.hpp>
#include <node_cudf/column.hpp>
namespace nv {
Napi::Value Graph::spectral_balanced_cut_clustering(Napi::CallbackInfo const& info) {
NapiToCPP::Object opts = info[0];
int32_t num_clusters = opts.Get("num_clusters");
int32_t num_eigen_vecs = opts.Get("num_eigen_vecs");
float evs_tolerance = opts.Get("evs_tolerance");
int32_t evs_max_iter = opts.Get("evs_max_iter");
float kmean_tolerance = opts.Get("kmean_tolerance");
int32_t kmean_max_iter = opts.Get("kmean_max_iter");
auto cluster = Column::zeros(info.Env(), cudf::type_id::INT32, num_nodes());
try {
cugraph::ext_raft::balancedCutClustering(csr_view(),
num_clusters,
num_eigen_vecs,
evs_tolerance,
evs_max_iter,
kmean_tolerance,
kmean_max_iter,
cluster->mutable_view().begin<int32_t>());
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return cluster;
}
Napi::Value Graph::spectral_modularity_maximization_clustering(Napi::CallbackInfo const& info) {
NapiToCPP::Object opts = info[0];
int32_t num_clusters = opts.Get("num_clusters");
int32_t num_eigen_vecs = opts.Get("num_eigen_vecs");
float evs_tolerance = opts.Get("evs_tolerance");
int32_t evs_max_iter = opts.Get("evs_max_iter");
float kmean_tolerance = opts.Get("kmean_tolerance");
int32_t kmean_max_iter = opts.Get("kmean_max_iter");
auto cluster = Column::zeros(info.Env(), cudf::type_id::INT32, num_nodes());
try {
cugraph::ext_raft::spectralModularityMaximization(csr_view(),
num_clusters,
num_eigen_vecs,
evs_tolerance,
evs_max_iter,
kmean_tolerance,
kmean_max_iter,
cluster->mutable_view().begin<int32_t>());
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return cluster;
}
Napi::Value Graph::analyze_modularity_clustering(Napi::CallbackInfo const& info) {
int32_t num_clusters = info[0].ToNumber();
Column::wrapper_t cluster = info[1].ToObject();
float score{};
try {
cugraph::ext_raft::analyzeClustering_modularity(
csr_view(), num_clusters, cluster->view().begin<int32_t>(), &score);
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return Napi::Number::New(info.Env(), score);
}
Napi::Value Graph::analyze_edge_cut_clustering(Napi::CallbackInfo const& info) {
int32_t num_clusters = info[0].ToNumber();
Column::wrapper_t cluster = info[1].ToObject();
float score{};
try {
cugraph::ext_raft::analyzeClustering_edge_cut(
csr_view(), num_clusters, cluster->view().begin<int32_t>(), &score);
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return Napi::Number::New(info.Env(), score);
}
Napi::Value Graph::analyze_ratio_cut_clustering(Napi::CallbackInfo const& info) {
int32_t num_clusters = info[0].ToNumber();
Column::wrapper_t cluster = info[1].ToObject();
float score{};
try {
cugraph::ext_raft::analyzeClustering_ratio_cut(
csr_view(), num_clusters, cluster->view().begin<int32_t>(), &score);
} catch (std::exception const& e) { throw Napi::Error::New(info.Env(), e.what()); }
return Napi::Number::New(info.Env(), score);
}
} // namespace nv
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/test/renumber-test.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {setDefaultAllocator} from '@rapidsai/cuda';
import {Series} from '@rapidsai/cudf';
import {renumberNodes} from '@rapidsai/cugraph';
import {CudaMemoryResource, DeviceBuffer} from '@rapidsai/rmm';
const mr = new CudaMemoryResource();
setDefaultAllocator((byteLength: number) => new DeviceBuffer(byteLength, mr));
test('renumberNodes strings', () => {
const src = Series.new([
'192.168.1.1',
'172.217.5.238',
'216.228.121.209',
'192.16.31.23',
]);
const dst = Series.new([
'172.217.5.238',
'216.228.121.209',
'192.16.31.23',
'192.168.1.1',
]);
const df = renumberNodes(src, dst);
expect([...df.get('node')].sort()).toEqual([
'192.168.1.1',
'172.217.5.238',
'216.228.121.209',
'192.16.31.23',
].sort());
expect([...df.get('id')]).toEqual([0, 1, 2, 3]);
});
test('renumberNodes numeric', () => {
const src = Series.new([
10,
20,
30,
40,
]);
const dst = Series.new([
20,
30,
40,
10,
]);
const df = renumberNodes(src, dst);
expect([...df.get('node')].sort()).toEqual([
10,
20,
30,
40,
].sort());
expect([...df.get('id')]).toEqual([0, 1, 2, 3]);
});
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/test/tsconfig.json
|
{
"extends": "../tsconfig.json",
"include": [
"../src/**/*.ts",
"../test/**/*.ts"
],
"compilerOptions": {
"target": "esnext",
"module": "commonjs",
"allowJs": true,
"importHelpers": false,
"noEmitHelpers": false,
"noEmitOnError": false,
"sourceMap": false,
"inlineSources": false,
"inlineSourceMap": false,
"downlevelIteration": false,
"baseUrl": "../",
"paths": {
"@rapidsai/cugraph": ["src/index"],
"@rapidsai/cugraph/*": ["src/*"]
}
}
}
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/test/hypergraph-tests.ts
|
// Copyright (c) 2021-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {setDefaultAllocator} from '@rapidsai/cuda';
import {DataFrame, Int32, Series} from '@rapidsai/cudf';
import {hypergraph, hypergraphDirect} from '@rapidsai/cugraph';
import {CudaMemoryResource, DeviceBuffer} from '@rapidsai/rmm';
const mr = new CudaMemoryResource();
setDefaultAllocator((byteLength: number) => new DeviceBuffer(byteLength, mr));
const simple_df = new DataFrame({
id: Series.new(['a', 'b', 'c']),
a1: Series.new([1, 2, 3]).cast(new Int32),
a2: Series.new(['red', 'blue', 'green']),
'π': Series.new(['Γ¦ski ΔΛmΕjΔ', 'π', 's']),
});
const hyper_df = new DataFrame({
aa: Series.new([0, 1, 2]).cast(new Int32),
bb: Series.new(['a', 'b', 'c']),
cc: Series.new(['b', '0', '1']),
});
test('hyperedges', () => {
const h = hypergraph(simple_df);
expect('entities' in h);
expect(h.entities.numRows).toBe(12);
expect('nodes' in h);
expect(h.nodes.numRows).toBe(15);
expect('edges' in h);
expect(h.edges.numRows).toBe(12);
expect('events' in h);
expect(h.events.numRows).toBe(3);
expect('graph' in h);
const edges = h.edges;
expect([...edges.get('event_id')]).toEqual([
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
]);
expect([
...edges.get('edge_type')
]).toEqual(['a1', 'a1', 'a1', 'a2', 'a2', 'a2', 'id', 'id', 'id', 'π', 'π', 'π']);
expect([...edges.get('attrib_id')]).toEqual([
'a1::1',
'a1::2',
'a1::3',
'a2::red',
'a2::blue',
'a2::green',
'id::a',
'id::b',
'id::c',
'π::Γ¦ski ΔΛmΕjΔ',
'π::π',
'π::s',
]);
expect([
...edges.get('id')
]).toEqual(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']);
expect([...edges.get('a1')]).toEqual([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]);
expect([...edges.get('a2')]).toEqual([
'red',
'blue',
'green',
'red',
'blue',
'green',
'red',
'blue',
'green',
'red',
'blue',
'green'
]);
expect([...edges.get('π')]).toEqual([
'Γ¦ski ΔΛmΕjΔ',
'π',
's',
'Γ¦ski ΔΛmΕjΔ',
'π',
's',
'Γ¦ski ΔΛmΕjΔ',
'π',
's',
'Γ¦ski ΔΛmΕjΔ',
'π',
's'
]);
});
test('hyperedges_direct', () => {
const h = hypergraphDirect(hyper_df);
expect('edges' in h);
expect(h.edges.numRows).toBe(9);
expect('nodes' in h);
expect(h.nodes.numRows).toBe(9);
});
test('hyperedges_direct_categories', () => {
const h = hypergraphDirect(
hyper_df,
{categories: {aa: 'N', bb: 'N', cc: 'N'}},
);
expect('edges' in h);
expect(h.edges.numRows).toBe(9);
expect('nodes' in h);
expect(h.nodes.numRows).toBe(6);
});
test('hyperedges_direct_manual_shaping', () => {
const h1 = hypergraphDirect(
hyper_df,
{edgeShape: {'aa': ['cc'], 'cc': ['cc']}},
);
expect('edges' in h1);
expect(h1.edges.numRows).toBe(6);
const h2 = hypergraphDirect(
hyper_df,
{edgeShape: {'aa': ['cc', 'bb', 'aa'], 'cc': ['cc']}},
);
expect('edges' in h2);
expect(h2.edges.numRows).toBe(12);
});
test('drop_edge_attrs', () => {
const h = hypergraph(simple_df, {columns: ['id', 'a1', 'π'], dropEdgeAttrs: true});
expect('entities' in h);
expect(h.entities.numRows).toBe(9);
expect('nodes' in h);
expect(h.nodes.numRows).toBe(12);
expect('edges' in h);
expect(h.edges.numRows).toBe(9);
expect('events' in h);
expect(h.events.numRows).toBe(3);
expect('graph' in h);
const edges = h.edges;
expect([...edges.get('event_id')]).toEqual([
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
]);
expect([
...edges.get('edge_type')
]).toEqual(['a1', 'a1', 'a1', 'id', 'id', 'id', 'π', 'π', 'π']);
expect([...edges.get('attrib_id')]).toEqual([
'a1::1',
'a1::2',
'a1::3',
'id::a',
'id::b',
'id::c',
'π::Γ¦ski ΔΛmΕjΔ',
'π::π',
'π::s',
]);
});
test('drop_edge_attrs_direct', () => {
const h = hypergraphDirect(
simple_df,
{columns: ['id', 'a1', 'π'], edgeShape: {'id': ['a1'], 'a1': ['π']}, dropEdgeAttrs: true});
expect('entities' in h);
expect(h.entities.numRows).toBe(9);
expect('nodes' in h);
expect(h.nodes.numRows).toBe(9);
expect('edges' in h);
expect(h.edges.numRows).toBe(6);
expect('events' in h);
expect(h.events.numRows).toBe(0);
expect('graph' in h);
const edges = h.edges;
expect([...edges.get('event_id')]).toEqual([
'event_id::0',
'event_id::1',
'event_id::2',
'event_id::0',
'event_id::1',
'event_id::2',
]);
expect([
...edges.get('edge_type')
]).toEqual(['a1::π', 'a1::π', 'a1::π', 'id::a1', 'id::a1', 'id::a1']);
expect([...edges.get('src')]).toEqual(['a1::1', 'a1::2', 'a1::3', 'id::a', 'id::b', 'id::c']);
expect([
...edges.get('dst')
]).toEqual(['π::Γ¦ski ΔΛmΕjΔ', 'π::π', 'π::s', 'a1::1', 'a1::2', 'a1::3']);
});
test('skip_hyper', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'b']),
b: Series.new(['a', 'b', 'c']),
c: Series.new([1, 2, 3]).cast(new Int32),
});
const h = hypergraph(df, {skip: ['c'], dropNulls: false});
expect(h.graph.numNodes).toBe(9);
expect(h.graph.numEdges).toBe(6);
});
test('skip_dropNulls_hyper', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'b']),
b: Series.new(['a', 'b', 'c']),
c: Series.new([1, 2, 3]).cast(new Int32),
});
const h = hypergraph(df, {skip: ['c'], dropNulls: true});
expect(h.graph.numNodes).toBe(8);
expect(h.graph.numEdges).toBe(5);
});
test('skip_direct', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'b']),
b: Series.new(['a', 'b', 'c']),
c: Series.new([1, 2, 3]).cast(new Int32),
});
const h = hypergraphDirect(df, {skip: ['c'], dropNulls: false});
expect(h.graph.numNodes).toBe(6);
expect(h.graph.numEdges).toBe(3);
});
test('skip_dropNulls_direct', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'b']),
b: Series.new(['a', 'b', 'c']),
c: Series.new([1, 2, 3]).cast(new Int32),
});
const h = hypergraphDirect(df, {skip: ['c'], dropNulls: true});
expect(h.graph.numNodes).toBe(4);
expect(h.graph.numEdges).toBe(2);
});
test('dropNulls_hyper', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'c']),
i: Series.new([1, 2, null]).cast(new Int32),
});
const h = hypergraph(df, {dropNulls: true});
expect(h.graph.numNodes).toBe(7);
expect(h.graph.numEdges).toBe(4);
});
test('dropNulls_direct', () => {
const df = new DataFrame({
a: Series.new(['a', null, 'a']),
i: Series.new([1, 1, null]).cast(new Int32),
});
const h = hypergraphDirect(df, {dropNulls: true});
expect(h.graph.numNodes).toBe(2);
expect(h.graph.numEdges).toBe(1);
});
test('skip_skip_null_hyperedge', () => {
const df = new DataFrame({
x: Series.new(['a', 'b', 'c']),
y: Series.new(['aa', null, 'cc']),
});
const expected_hits = ['a', 'b', 'c', 'aa', 'cc'];
const skip_attr_h_edges = hypergraph(df, {dropEdgeAttrs: true}).edges;
expect(skip_attr_h_edges.numRows).toBe(expected_hits.length);
const default_h_edges = hypergraph(df).edges;
expect(default_h_edges.numRows).toBe(expected_hits.length);
});
| 0 |
rapidsai_public_repos/node/modules/cugraph
|
rapidsai_public_repos/node/modules/cugraph/test/graph-tests.ts
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {setDefaultAllocator} from '@rapidsai/cuda';
import {Series} from '@rapidsai/cudf';
import {DedupedEdgesGraph, Graph} from '@rapidsai/cugraph';
import {CudaMemoryResource, DeviceBuffer} from '@rapidsai/rmm';
const mr = new CudaMemoryResource();
setDefaultAllocator((byteLength: number) => new DeviceBuffer(byteLength, mr));
describe('Graph', () => {
const src = Series.new(['192.168.1.1', '172.217.5.238', '216.228.121.209', '192.16.31.23']);
const dst = Series.new(['172.217.5.238', '216.228.121.209', '192.16.31.23', '192.168.1.1']);
const graph = Graph.fromEdgeList(src, dst);
test('numNodes', () => { expect(graph.numNodes).toEqual(4); });
test('numEdges', () => { expect(graph.numNodes).toEqual(4); });
test(`nodes`, () => {
const nodes = graph.nodes;
const expected = src.concat(dst).unique();
expect([...nodes.get('node')]).toEqual([...expected]);
});
test(`edges`, () => {
const edges = graph.edges;
expect([...edges.get('id')]).toEqual([0, 1, 2, 3]);
expect([...edges.get('src')]).toEqual([...src]);
expect([...edges.get('dst')]).toEqual([...dst]);
expect([...edges.get('weight')]).toEqual([1.0, 1.0, 1.0, 1.0]);
});
test(`nodeIds`, () => {
const ids = graph.nodeIds.get('id');
expect([...ids]).toEqual([0, 1, 2, 3]);
});
test(`edgeIds`, () => {
const edgeIds = graph.edgeIds;
expect([...edgeIds.get('id')]).toEqual([0, 1, 2, 3]);
expect([...edgeIds.get('src')]).toEqual([2, 0, 3, 1]);
expect([...edgeIds.get('dst')]).toEqual([0, 3, 1, 2]);
});
test(`degree`, () => {
const degree = graph.degree();
expect([...degree.get('vertex')]).toEqual([0, 1, 2, 3]);
expect([...degree.get('degree')]).toEqual([2, 2, 2, 2]);
});
test(`dedupeEdges`, () => {
const src = Series.new(['1', '2', '2', '3', '4', '1']);
const dst = Series.new(['2', '3', '3', '4', '1', '2']);
const graph = Graph.fromEdgeList(src, dst);
expect(graph.numNodes).toBe(4);
expect(graph.numEdges).toBe(6);
const dd_graph = graph.dedupeEdges();
expect(dd_graph).toBeInstanceOf(DedupedEdgesGraph);
expect(dd_graph.numNodes).toBe(4);
expect(dd_graph.numEdges).toBe(4);
});
});
describe('DedupedEdgesGraph', () => {
const src = Series.new(['1', '2', '2', '3', '4', '1']);
const dst = Series.new(['2', '3', '3', '4', '1', '2']);
const dd_src = Series.new(['1', '2', '3', '4']);
const dd_dst = Series.new(['2', '3', '4', '1']);
const graph = DedupedEdgesGraph.fromEdgeList(src, dst);
test('numNodes', () => { expect(graph.numNodes).toEqual(4); });
test('numEdges', () => { expect(graph.numNodes).toEqual(4); });
test(`nodes`, () => {
const nodes = graph.nodes;
const expected = dd_src.concat(dd_dst).unique();
expect([...nodes.get('node')]).toEqual([...expected]);
});
test(`edges`, () => {
const edges = graph.edges;
expect([...edges.get('id')]).toEqual([0, 1, 2, 3]);
expect([...edges.get('src')]).toEqual([...dd_src]);
expect([...edges.get('dst')]).toEqual([...dd_dst]);
expect([...edges.get('weight')]).toEqual([2.0, 2.0, 1.0, 1.0]);
});
test(`nodeIds`, () => {
const ids = graph.nodeIds.get('id');
expect([...ids]).toEqual([0, 1, 2, 3]);
});
test(`edgeIds`, () => {
const edgeIds = graph.edgeIds;
expect([...edgeIds.get('id')]).toEqual([0, 1, 2, 3]);
expect([...edgeIds.get('src')]).toEqual([0, 1, 2, 3]);
expect([...edgeIds.get('dst')]).toEqual([1, 2, 3, 0]);
});
test(`degree`, () => {
const degree = graph.degree();
expect([...degree.get('vertex')]).toEqual([0, 1, 2, 3]);
expect([...degree.get('degree')]).toEqual([2, 2, 2, 2]);
});
});
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.