repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/patches/thrust_transform_iter_with_reduce_by_key.diff
diff --git a/thrust/iterator/transform_input_output_iterator.h b/thrust/iterator/transform_input_output_iterator.h index f512a36..a5f725d 100644 --- a/thrust/iterator/transform_input_output_iterator.h +++ b/thrust/iterator/transform_input_output_iterator.h @@ -102,6 +102,8 @@ template <typename InputFunction, typename OutputFunction, typename Iterator> /*! \endcond */ + transform_input_output_iterator() = default; + /*! This constructor takes as argument a \c Iterator an \c InputFunction and an * \c OutputFunction and copies them to a new \p transform_input_output_iterator * diff --git a/thrust/iterator/transform_output_iterator.h b/thrust/iterator/transform_output_iterator.h index 66fb46a..4a68cb5 100644 --- a/thrust/iterator/transform_output_iterator.h +++ b/thrust/iterator/transform_output_iterator.h @@ -104,6 +104,8 @@ template <typename UnaryFunction, typename OutputIterator> /*! \endcond */ + transform_output_iterator() = default; + /*! This constructor takes as argument an \c OutputIterator and an \c * UnaryFunction and copies them to a new \p transform_output_iterator *
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/patches/thrust_faster_scan_compile_times.diff
diff --git a/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh b/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh index b188c75f..3f36656f 100644 --- a/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh +++ b/dependencies/cub/cub/device/dispatch/dispatch_radix_sort.cuh @@ -736,7 +736,7 @@ struct DeviceRadixSortPolicy /// SM60 (GP100) - struct Policy600 : ChainedPolicy<600, Policy600, Policy500> + struct Policy600 : ChainedPolicy<600, Policy600, Policy600> { enum { PRIMARY_RADIX_BITS = (sizeof(KeyT) > 1) ? 7 : 5, // 6.9B 32b keys/s (Quadro P100) diff --git a/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh b/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh index e0470ccb..6a0c2ed6 100644 --- a/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh +++ b/dependencies/cub/cub/device/dispatch/dispatch_reduce.cuh @@ -280,7 +280,7 @@ struct DeviceReducePolicy }; /// SM60 - struct Policy600 : ChainedPolicy<600, Policy600, Policy350> + struct Policy600 : ChainedPolicy<600, Policy600, Policy600> { // ReducePolicy (P100: 591 GB/s @ 64M 4B items; 583 GB/s @ 256M 1B items) typedef AgentReducePolicy< diff --git a/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh b/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh index c2d04588..ac2d10e0 100644 --- a/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh +++ b/dependencies/cub/cub/device/dispatch/dispatch_scan.cuh @@ -177,7 +177,7 @@ struct DeviceScanPolicy }; /// SM600 - struct Policy600 : ChainedPolicy<600, Policy600, Policy520> + struct Policy600 : ChainedPolicy<600, Policy600, Policy600> { typedef AgentScanPolicy< 128, 15, ///< Threads per block, items per thread
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/patches/thrust_faster_sort_compile_times.diff
diff --git a/dependencies/cub/cub/block/block_merge_sort.cuh b/dependencies/cub/cub/block/block_merge_sort.cuh index 4769df36..d86d6342 100644 --- a/dependencies/cub/cub/block/block_merge_sort.cuh +++ b/dependencies/cub/cub/block/block_merge_sort.cuh @@ -91,7 +91,7 @@ __device__ __forceinline__ void SerialMerge(KeyT *keys_shared, KeyT key1 = keys_shared[keys1_beg]; KeyT key2 = keys_shared[keys2_beg]; -#pragma unroll +#pragma unroll 1 for (int item = 0; item < ITEMS_PER_THREAD; ++item) { bool p = (keys2_beg < keys2_end) && @@ -383,7 +383,7 @@ public: // KeyT max_key = oob_default; - #pragma unroll + #pragma unroll 1 for (int item = 1; item < ITEMS_PER_THREAD; ++item) { if (ITEMS_PER_THREAD * linear_tid + item < valid_items) @@ -407,7 +407,7 @@ public: // each thread has sorted keys // merge sort keys in shared memory // - #pragma unroll + #pragma unroll 1 for (int target_merged_threads_number = 2; target_merged_threads_number <= NUM_THREADS; target_merged_threads_number *= 2) diff --git a/dependencies/cub/cub/thread/thread_sort.cuh b/dependencies/cub/cub/thread/thread_sort.cuh index 5d486789..b42fb5f0 100644 --- a/dependencies/cub/cub/thread/thread_sort.cuh +++ b/dependencies/cub/cub/thread/thread_sort.cuh @@ -83,10 +83,10 @@ StableOddEvenSort(KeyT (&keys)[ITEMS_PER_THREAD], { constexpr bool KEYS_ONLY = std::is_same<ValueT, NullType>::value; - #pragma unroll + #pragma unroll 1 for (int i = 0; i < ITEMS_PER_THREAD; ++i) { - #pragma unroll + #pragma unroll 1 for (int j = 1 & i; j < ITEMS_PER_THREAD - 1; j += 2) { if (compare_op(keys[j + 1], keys[j]))
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/patches/thrust_disable_64bit_dispatching.diff
diff --git a/thrust/system/cuda/detail/dispatch.h b/thrust/system/cuda/detail/dispatch.h index d0e3f94..76774b0 100644 --- a/thrust/system/cuda/detail/dispatch.h +++ b/thrust/system/cuda/detail/dispatch.h @@ -32,9 +32,8 @@ status = call arguments; \ } \ else { \ - auto THRUST_PP_CAT2(count, _fixed) = static_cast<thrust::detail::int64_t>(count); \ - status = call arguments; \ - } + throw std::runtime_error("THRUST_INDEX_TYPE_DISPATCH 64-bit count is unsupported in libcudf"); \ + } /** * Dispatch between 32-bit and 64-bit index based versions of the same algorithm @@ -52,10 +51,8 @@ status = call arguments; \ } \ else { \ - auto THRUST_PP_CAT2(count1, _fixed) = static_cast<thrust::detail::int64_t>(count1); \ - auto THRUST_PP_CAT2(count2, _fixed) = static_cast<thrust::detail::int64_t>(count2); \ - status = call arguments; \ - } + throw std::runtime_error("THRUST_DOUBLE_INDEX_TYPE_DISPATCH 64-bit count is unsupported in libcudf"); \ + } /** * Dispatch between 32-bit and 64-bit index based versions of the same algorithm * implementation. This version allows using different token sequences for callables
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/patches/cugraph.patch
diff --git a/cpp/cmake/thirdparty/get_nccl.cmake b/cpp/cmake/thirdparty/get_nccl.cmake index 118ae37..2b04d1f 100644 --- a/cpp/cmake/thirdparty/get_nccl.cmake +++ b/cpp/cmake/thirdparty/get_nccl.cmake @@ -23,16 +23,13 @@ function(find_and_configure_nccl) rapids_find_generate_module(NCCL HEADER_NAMES nccl.h LIBRARY_NAMES nccl + BUILD_EXPORT_SET cugraph-exports ) # Currently NCCL has no CMake build-system so we require # it built and installed on the machine already - rapids_find_package(NCCL REQUIRED) + rapids_find_package(NCCL REQUIRED BUILD_EXPORT_SET cugraph-exports) endfunction() find_and_configure_nccl() - - - -
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureOpenGL.cmake
#============================================================================= # Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) # include(FindOpenGL REQUIRED) find_package(OpenGL REQUIRED EGL OpenGL) if(NOT OPENGL_FOUND) message(FATAL_ERROR "OpenGL not found") endif() message(STATUS "OpenGL libraries: " ${OPENGL_LIBRARIES}) message(STATUS "OpenGL includes: " ${OPENGL_INCLUDE_DIR}) message(STATUS "OPENGL_egl_LIBRARY: " ${OPENGL_egl_LIBRARY}) message(STATUS "OPENGL_EGL_INCLUDE_DIRS: " ${OPENGL_EGL_INCLUDE_DIRS})
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureNapi.cmake
#============================================================================= # Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) execute_process(COMMAND node -p "require('node-addon-api').include.replace(/\"/g, '')" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NAPI_INCLUDE_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) list(APPEND NAPI_INCLUDE_DIRS ${CMAKE_JS_INC}) list(APPEND NAPI_INCLUDE_DIRS ${NAPI_INCLUDE_DIR}) list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -DNAPI_EXPERIMENTAL -DNAPI_CPP_EXCEPTIONS -DNODE_ADDON_API_DISABLE_DEPRECATED) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -DNAPI_EXPERIMENTAL -DNAPI_CPP_EXCEPTIONS -DNODE_ADDON_API_DISABLE_DEPRECATED) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -DNAPI_EXPERIMENTAL -DNAPI_CPP_EXCEPTIONS -DNODE_ADDON_API_DISABLE_DEPRECATED) message(STATUS "CMAKE_JS_INC: ${CMAKE_JS_INC}") message(STATUS "NAPI_INCLUDE_DIR: ${NAPI_INCLUDE_DIR}") message(STATUS "NAPI_INCLUDE_DIRS: ${NAPI_INCLUDE_DIRS}") message(STATUS "NODE_RAPIDS_CMAKE_C_FLAGS: ${NODE_RAPIDS_CMAKE_C_FLAGS}") message(STATUS "NODE_RAPIDS_CMAKE_CXX_FLAGS: ${NODE_RAPIDS_CMAKE_CXX_FLAGS}") message(STATUS "NODE_RAPIDS_CMAKE_CUDA_FLAGS: ${NODE_RAPIDS_CMAKE_CUDA_FLAGS}")
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureRMM.cmake
#============================================================================= # Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_rmm) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureThrust.cmake) _get_rapidsai_module_version(rmm VERSION) _set_thrust_dir_if_exists() _set_package_dir_if_exists(rmm rmm) if(NOT TARGET rmm::rmm) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(rmm ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME rmm # EXCLUDE_FROM_ALL TRUE VERSION ${VERSION} GIT_REPOSITORY https://github.com/rapidsai/rmm.git GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "DISABLE_DEPRECATION_WARNING ${DISABLE_DEPRECATION_WARNINGS}") endif() set(rmm_VERSION "${rmm_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_rmm()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureArrow.cmake
# ============================================================================= # Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except # in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing permissions and limitations under # the License. # ============================================================================= include_guard(GLOBAL) # Finding arrow is far more complex than it should be, and as a result we violate multiple linting # rules aiming to limit complexity. Since all our other CMake scripts conform to expectations # without undue difficulty, disabling those rules for just this function is our best approach for # now. The spacing between this comment, the cmake-lint directives, and the function docstring is # necessary to prevent cmake-format from trying to combine the lines. # cmake-lint: disable=R0912,R0913,R0915 # This function finds arrow and sets any additional necessary environment variables. function(find_and_configure_arrow VERSION BUILD_STATIC ENABLE_S3 ENABLE_ORC ENABLE_PYTHON ENABLE_PARQUET ) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) string(TOLOWER "${CMAKE_BUILD_TYPE}" _build_type) _clean_build_dirs_if_not_fully_built(arrow "${_build_type}/libarrow") _clean_build_dirs_if_not_fully_built(arrow "${_build_type}/libparquet") _clean_build_dirs_if_not_fully_built(arrow "${_build_type}/libarrow_cuda") _clean_build_dirs_if_not_fully_built(arrow "${_build_type}/libarrow_dataset") _set_package_dir_if_exists(Arrow arrow) _set_package_dir_if_exists(Parquet arrow) _set_package_dir_if_exists(ArrowCUDA arrow) _set_package_dir_if_exists(ArrowDataset arrow) set(ARROW_BUILD_SHARED ON) set(ARROW_BUILD_STATIC OFF) if(NOT ARROW_ARMV8_ARCH) set(ARROW_ARMV8_ARCH "armv8-a") endif() if(NOT ARROW_SIMD_LEVEL) set(ARROW_SIMD_LEVEL "NONE") endif() if(BUILD_STATIC) set(ARROW_BUILD_STATIC ON) set(ARROW_BUILD_SHARED OFF) # Turn off CPM using `find_package` so we always download and make sure we get proper static # library # set(CPM_DOWNLOAD_ALL TRUE) endif() set(ARROW_PYTHON_OPTIONS "") if(ENABLE_PYTHON) list(APPEND ARROW_PYTHON_OPTIONS "ARROW_PYTHON ON") # Arrow's logic to build Boost from source is busted, so we have to get it from the system. list(APPEND ARROW_PYTHON_OPTIONS "BOOST_SOURCE SYSTEM") list(APPEND ARROW_PYTHON_OPTIONS "ARROW_DEPENDENCY_SOURCE AUTO") endif() set(ARROW_PARQUET_OPTIONS "") if(ENABLE_PARQUET) # Arrow's logic to build Boost from source is busted, so we have to get it from the system. list(APPEND ARROW_PARQUET_OPTIONS "BOOST_SOURCE SYSTEM") list(APPEND ARROW_PARQUET_OPTIONS "Thrift_SOURCE BUNDLED") list(APPEND ARROW_PARQUET_OPTIONS "ARROW_DEPENDENCY_SOURCE AUTO") endif() # Set this so Arrow correctly finds the CUDA toolkit when the build machine does not have the CUDA # driver installed. This must be an env var. set(ENV{CUDA_LIB_PATH} "${CUDAToolkit_LIBRARY_DIR}/stubs") include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) # Set this so Arrow doesn't add `-Werror` to # CMAKE_CXX_FLAGS when CMAKE_BUILD_TYPE=Debug set(BUILD_WARNING_LEVEL "PRODUCTION") set(BUILD_WARNING_LEVEL "PRODUCTION" PARENT_SCOPE) set(BUILD_WARNING_LEVEL "PRODUCTION" CACHE STRING "" FORCE) _get_update_disconnected_state(Arrow ${VERSION} UPDATE_DISCONNECTED) find_package(OpenSSL REQUIRED) rapids_cpm_find( Arrow ${VERSION} GLOBAL_TARGETS arrow_shared arrow_static parquet_shared parquet_static arrow_cuda_shared arrow_cuda_static arrow_dataset_shared arrow_dataset_static CPM_ARGS ${UPDATE_DISCONNECTED} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/apache/arrow.git GIT_TAG apache-arrow-${VERSION} GIT_SHALLOW TRUE SOURCE_SUBDIR cpp OPTIONS "CMAKE_VERBOSE_MAKEFILE ON" "CUDA_USE_STATIC_CUDA_RUNTIME ON" "ARROW_IPC ON" "ARROW_CUDA ON" "ARROW_DATASET ON" "ARROW_WITH_BACKTRACE ON" "ARROW_CXXFLAGS -w" "ARROW_JEMALLOC OFF" "ARROW_S3 ${ENABLE_S3}" "ARROW_ORC ${ENABLE_ORC}" # e.g. needed by blazingsql-io ${ARROW_PARQUET_OPTIONS} "ARROW_PARQUET ${ENABLE_PARQUET}" ${ARROW_PYTHON_OPTIONS} # Arrow modifies CMake's GLOBAL RULE_LAUNCH_COMPILE unless this is off "ARROW_USE_CCACHE OFF" "ARROW_POSITION_INDEPENDENT_CODE ON" "ARROW_ARMV8_ARCH ${ARROW_ARMV8_ARCH}" "ARROW_SIMD_LEVEL ${ARROW_SIMD_LEVEL}" "ARROW_BUILD_STATIC ${ARROW_BUILD_STATIC}" "ARROW_BUILD_SHARED ${ARROW_BUILD_SHARED}" "ARROW_DEPENDENCY_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_BOOST_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_BROTLI_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_GFLAGS_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_GRPC_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_PROTOBUF_USE_SHARED ${ARROW_BUILD_SHARED}" "ARROW_ZSTD_USE_SHARED ${ARROW_BUILD_SHARED}" "xsimd_SOURCE AUTO" ) set(ARROW_FOUND TRUE) set(ARROW_LIBRARIES "") # Arrow_ADDED: set if CPM downloaded Arrow from Github Arrow_DIR: set if CPM found Arrow on the # system/conda/etc. if(Arrow_ADDED OR Arrow_DIR) if(BUILD_STATIC) list(APPEND ARROW_LIBRARIES arrow_static) list(APPEND ARROW_LIBRARIES arrow_cuda_static) list(APPEND ARROW_LIBRARIES parquet_static) list(APPEND ARROW_LIBRARIES arrow_dataset_static) else() list(APPEND ARROW_LIBRARIES arrow_shared) list(APPEND ARROW_LIBRARIES arrow_cuda_shared) list(APPEND ARROW_LIBRARIES parquet_shared) list(APPEND ARROW_LIBRARIES arrow_dataset_shared) endif() if(Arrow_DIR) # Set this to enable `find_package(ArrowCUDA)` set(ArrowCUDA_DIR "${Arrow_DIR}") find_package(Arrow REQUIRED QUIET) find_package(ArrowCUDA REQUIRED QUIET) if(ENABLE_PARQUET) if(NOT Parquet_DIR) # Set this to enable `find_package(Parquet)` set(Parquet_DIR "${Arrow_DIR}") endif() find_package(Parquet REQUIRED QUIET) # Set this to enable `find_package(ArrowDataset)` set(ArrowDataset_DIR "${Arrow_DIR}") find_package(ArrowDataset REQUIRED QUIET) endif() elseif(Arrow_ADDED) # Copy these files so we can avoid adding paths in Arrow_BINARY_DIR to # target_include_directories. That defeats ccache. file(INSTALL "${Arrow_BINARY_DIR}/src/arrow/util/config.h" DESTINATION "${Arrow_SOURCE_DIR}/cpp/src/arrow/util" ) file(INSTALL "${Arrow_BINARY_DIR}/src/arrow/gpu/cuda_version.h" DESTINATION "${Arrow_SOURCE_DIR}/cpp/src/arrow/gpu" ) if(ENABLE_PARQUET) file(INSTALL "${Arrow_BINARY_DIR}/src/parquet/parquet_version.h" DESTINATION "${Arrow_SOURCE_DIR}/cpp/src/parquet" ) endif() # # This shouldn't be necessary! # # Arrow populates INTERFACE_INCLUDE_DIRECTORIES for the `arrow_static` and `arrow_shared` # targets in FindArrow and FindArrowCUDA respectively, so for static source-builds, we have to # do it after-the-fact. # # This only works because we know exactly which components we're using. Don't forget to update # this list if we add more! # foreach(ARROW_LIBRARY ${ARROW_LIBRARIES}) target_include_directories( ${ARROW_LIBRARY} INTERFACE "$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/src>" "$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/src/generated>" "$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/thirdparty/hadoop/include>" "$<BUILD_INTERFACE:${Arrow_SOURCE_DIR}/cpp/thirdparty/flatbuffers/include>" ) endforeach() endif() else() set(ARROW_FOUND FALSE) message(FATAL_ERROR "CUDF: Arrow library not found or downloaded.") endif() if(Arrow_ADDED) set(arrow_code_string [=[ if (TARGET cudf::arrow_shared AND (NOT TARGET arrow_shared)) add_library(arrow_shared ALIAS cudf::arrow_shared) endif() if (TARGET arrow_shared AND (NOT TARGET cudf::arrow_shared)) add_library(cudf::arrow_shared ALIAS arrow_shared) endif() if (TARGET cudf::arrow_static AND (NOT TARGET arrow_static)) add_library(arrow_static ALIAS cudf::arrow_static) endif() if (TARGET arrow_static AND (NOT TARGET cudf::arrow_static)) add_library(cudf::arrow_static ALIAS arrow_static) endif() if (NOT TARGET arrow::flatbuffers) add_library(arrow::flatbuffers INTERFACE IMPORTED) endif() if (NOT TARGET arrow::hadoop) add_library(arrow::hadoop INTERFACE IMPORTED) endif() ]=] ) if(ENABLE_PARQUET) string( APPEND arrow_code_string [=[ find_package(Boost) if (NOT TARGET Boost::headers) add_library(Boost::headers INTERFACE IMPORTED) endif() ]=] ) endif() if(NOT TARGET xsimd) string( APPEND arrow_code_string " if(NOT TARGET xsimd) add_library(xsimd INTERFACE IMPORTED) target_include_directories(xsimd INTERFACE \"${Arrow_BINARY_DIR}/xsimd_ep/src/xsimd_ep-install/include\") endif() " ) endif() set(PROJECT_BINARY_DIR_prev "${PROJECT_BINARY_DIR}") set(PROJECT_BINARY_DIR "${Arrow_BINARY_DIR}") rapids_export( BUILD Arrow VERSION ${VERSION} EXPORT_SET arrow_targets GLOBAL_TARGETS arrow_shared arrow_static NAMESPACE cudf:: FINAL_CODE_BLOCK arrow_code_string ) set(arrow_cuda_code_string [=[ if (TARGET cudf::arrow_cuda_shared AND (NOT TARGET arrow_cuda_shared)) add_library(arrow_cuda_shared ALIAS cudf::arrow_cuda_shared) endif() if (TARGET arrow_cuda_shared AND (NOT TARGET cudf::arrow_cuda_shared)) add_library(cudf::arrow_cuda_shared ALIAS arrow_cuda_shared) endif() if (TARGET cudf::arrow_cuda_static AND (NOT TARGET arrow_cuda_static)) add_library(arrow_cuda_static ALIAS cudf::arrow_cuda_static) endif() if (TARGET arrow_cuda_static AND (NOT TARGET cudf::arrow_cuda_static)) add_library(cudf::arrow_cuda_static ALIAS arrow_cuda_static) endif() ]=] ) rapids_export( BUILD ArrowCUDA VERSION ${VERSION} EXPORT_SET arrow_cuda_targets GLOBAL_TARGETS arrow_cuda_shared arrow_cuda_static NAMESPACE cudf:: FINAL_CODE_BLOCK arrow_cuda_code_string ) if(ENABLE_PARQUET) set(arrow_dataset_code_string [=[ if (TARGET cudf::arrow_dataset_shared AND (NOT TARGET arrow_dataset_shared)) add_library(arrow_dataset_shared ALIAS cudf::arrow_dataset_shared) endif() if (TARGET arrow_dataset_shared AND (NOT TARGET cudf::arrow_dataset_shared)) add_library(cudf::arrow_dataset_shared ALIAS arrow_dataset_shared) endif() if (TARGET cudf::arrow_dataset_static AND (NOT TARGET arrow_dataset_static)) add_library(arrow_dataset_static ALIAS cudf::arrow_dataset_static) endif() if (TARGET arrow_dataset_static AND (NOT TARGET cudf::arrow_dataset_static)) add_library(cudf::arrow_dataset_static ALIAS arrow_dataset_static) endif() ]=] ) rapids_export( BUILD ArrowDataset VERSION ${VERSION} EXPORT_SET arrow_dataset_targets GLOBAL_TARGETS arrow_dataset_shared arrow_dataset_static NAMESPACE cudf:: FINAL_CODE_BLOCK arrow_dataset_code_string ) set(parquet_code_string [=[ if("${THRIFT_CMAKE_DIR}" STREQUAL "") set(THRIFT_CMAKE_DIR "${CMAKE_CURRENT_LIST_DIR}/thrift_ep-install/lib/cmake/thrift") endif() if(EXISTS ${THRIFT_CMAKE_DIR}/thriftTargets.cmake AND (NOT TARGET thrift::thrift)) include("${THRIFT_CMAKE_DIR}/thriftTargets.cmake") endif() if (TARGET cudf::parquet_shared AND (NOT TARGET parquet_shared)) add_library(parquet_shared ALIAS cudf::parquet_shared) endif() if (TARGET parquet_shared AND (NOT TARGET cudf::parquet_shared)) add_library(cudf::parquet_shared ALIAS parquet_shared) endif() if (TARGET cudf::parquet_static AND (NOT TARGET parquet_static)) add_library(parquet_static ALIAS cudf::parquet_static) endif() if (TARGET parquet_static AND (NOT TARGET cudf::parquet_static)) add_library(cudf::parquet_static ALIAS parquet_static) endif() ]=] ) rapids_export( BUILD Parquet VERSION ${VERSION} EXPORT_SET parquet_targets GLOBAL_TARGETS parquet_shared parquet_static NAMESPACE cudf:: FINAL_CODE_BLOCK parquet_code_string ) endif() set(PROJECT_BINARY_DIR "${PROJECT_BINARY_DIR_prev}") # We generate the arrow-config and arrowcuda-config files when we built arrow locally, so always # do `find_dependency` rapids_export_package(BUILD Arrow ${PROJECT_NAME}-exports) rapids_export_package(INSTALL Arrow ${PROJECT_NAME}-exports) # We have to generate the find_dependency(ArrowCUDA) ourselves since we need to specify # ArrowCUDA_DIR to be where Arrow was found, since Arrow packages ArrowCUDA.config in a # non-standard location rapids_export_package(BUILD ArrowCUDA ${PROJECT_NAME}-exports) if(ENABLE_PARQUET) rapids_export_package(BUILD Parquet ${PROJECT_NAME}-exports) rapids_export_package(BUILD ArrowDataset ${PROJECT_NAME}-exports) endif() include("${rapids-cmake-dir}/export/find_package_root.cmake") rapids_export_find_package_root(BUILD Arrow "${Arrow_BINARY_DIR}" ${PROJECT_NAME}-exports) rapids_export_find_package_root(BUILD ArrowCUDA "${Arrow_BINARY_DIR}" ${PROJECT_NAME}-exports) if(ENABLE_PARQUET) rapids_export_find_package_root(BUILD Parquet "${Arrow_BINARY_DIR}" ${PROJECT_NAME}-exports) rapids_export_find_package_root(BUILD ArrowDataset "${Arrow_BINARY_DIR}" ${PROJECT_NAME}-exports) endif() endif() set(ARROW_FOUND ${ARROW_FOUND} PARENT_SCOPE ) set(ARROW_LIBRARIES ${ARROW_LIBRARIES} PARENT_SCOPE ) if(NOT ("${arrow_code_string}" STREQUAL "")) cmake_language(EVAL CODE "${arrow_code_string}") endif() if(NOT ("${arrow_cuda_code_string}" STREQUAL "")) cmake_language(EVAL CODE "${arrow_cuda_code_string}") endif() if(NOT ("${arrow_dataset_code_string}" STREQUAL "")) cmake_language(EVAL CODE "${arrow_dataset_code_string}") endif() if(NOT ("${parquet_code_string}" STREQUAL "")) cmake_language(EVAL CODE "${parquet_code_string}") endif() endfunction() set(CUDF_VERSION_Arrow 9.0.0) find_and_configure_arrow( ${CUDF_VERSION_Arrow} ON # BUILD_STATIC OFF # ENABLE_S3 OFF # ENABLE_ORC OFF # ENABLE_PYTHON ON # ENABLE_PARQUET )
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/get_nccl.cmake
#============================================================================= # Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_nccl) if((NOT TARGET NCCL::NCCL) AND (NOT TARGET nccl::nccl)) rapids_find_generate_module(NCCL HEADER_NAMES nccl.h LIBRARY_NAMES nccl ) # Currently NCCL has no CMake build-system so we require # it built and installed on the machine already rapids_find_package(NCCL REQUIRED) endif() if (TARGET nccl::nccl AND (NOT TARGET NCCL::NCCL)) add_library(NCCL::NCCL ALIAS nccl::nccl) endif() if (TARGET NCCL::NCCL AND (NOT TARGET nccl::nccl)) add_library(nccl::nccl ALIAS NCCL::NCCL) endif() endfunction() find_and_configure_nccl()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureOpenGLEW.cmake
#============================================================================= # Copyright (c) 2020-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_glew) set(options "") set(oneValueArgs VERSION USE_STATIC EXPORT_SET) set(multiValueArgs "") cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if(PKG_USE_STATIC) set(GLEW_USE_STATIC_LIBS ON) set(GLEW_USE_SHARED_LIBS OFF) set(GLEW_LIBRARY libglew_static) else() set(GLEW_USE_SHARED_LIBS ON) set(GLEW_USE_STATIC_LIBS OFF) set(GLEW_LIBRARY libglew_shared) endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) _set_package_dir_if_exists(${GLEW_LIBRARY} glew) if(NOT TARGET ${GLEW_LIBRARY}) rapids_cpm_find(glew ${PKG_VERSION} GLOBAL_TARGETS ${GLEW_LIBRARY} BUILD_EXPORT_SET ${PKG_EXPORT_SET} CPM_ARGS GIT_REPOSITORY https://github.com/Perlmint/glew-cmake.git GIT_TAG glew-cmake-${PKG_VERSION} GIT_SHALLOW TRUE GIT_CONFIG "advice.detachedhead=false" OPTIONS "ONLY_LIBS 0" # Ignore glew's missing VERSION "CMAKE_POLICY_DEFAULT_CMP0048 NEW" "glew-cmake_BUILD_MULTI_CONTEXT OFF" "glew-cmake_BUILD_SINGLE_CONTEXT ON" "glew-cmake_BUILD_SHARED ${GLEW_USE_SHARED_LIBS}" "glew-cmake_BUILD_STATIC ${GLEW_USE_STATIC_LIBS}" ) endif() if(glew_ADDED) install(TARGETS ${GLEW_LIBRARY} EXPORT glew-exports) rapids_export( BUILD glew VERSION ${PKG_VERSION} EXPORT_SET glew-exports GLOBAL_TARGETS ${GLEW_LIBRARY} FINAL_CODE_BLOCK "" ) rapids_export_package(BUILD glew ${PKG_EXPORT_SET}) include("${rapids-cmake-dir}/export/find_package_root.cmake") rapids_export_find_package_root(BUILD glew [=[${CMAKE_CURRENT_LIST_DIR}]=] ${PKG_EXPORT_SET}) endif() # add_compile_definitions(GLEW_EGL) target_compile_definitions(${GLEW_LIBRARY} PUBLIC GLEW_EGL) if(PKG_USE_STATIC) set_target_properties(${GLEW_LIBRARY} PROPERTIES POSITION_INDEPENDENT_CODE ON INTERFACE_POSITION_INDEPENDENT_CODE ON) endif() set(glew_VERSION "${glew_VERSION}" PARENT_SCOPE) set(GLEW_LIBRARY "${GLEW_LIBRARY}" PARENT_SCOPE) endfunction()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUGRAPH.cmake
#============================================================================= # Copyright (c) 2020-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cugraph) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_nccl.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureCUGRAPHOPS.cmake) _get_rapidsai_module_version(cugraph VERSION) _clean_build_dirs_if_not_fully_built(cugraph libcugraph) _set_thrust_dir_if_exists() _set_package_dir_if_exists(cuco cuco) _set_package_dir_if_exists(raft raft) _set_package_dir_if_exists(cugraph cugraph) _set_package_dir_if_exists(cuhornet cuhornet) _set_package_dir_if_exists(cugraph-ops cugraph-ops) if(NOT TARGET cugraph::cugraph) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cugraph ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cugraph VERSION ${VERSION} GIT_REPOSITORY https://github.com/rapidsai/cugraph.git GIT_TAG branch-${MAJOR_AND_MINOR} # EXCLUDE_FROM_ALL TRUE GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_SHARED_LIBS OFF" "CUDA_STATIC_RUNTIME ON" "BUILD_CUGRAPH_MG_TESTS OFF" PATCH_COMMAND patch --reject-file=- -p1 -N < ${CMAKE_CURRENT_LIST_DIR}/../patches/cugraph.patch || true ) endif() if(NOT TARGET cugraph::cuHornet AND (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS})) set(cuhornet_SOURCE_DIR "${CPM_BINARY_CACHE}/cuhornet-src") if (EXISTS "${cuhornet_SOURCE_DIR}") add_library(cugraph::cuHornet IMPORTED INTERFACE GLOBAL) target_include_directories(cugraph::cuHornet INTERFACE "${cuhornet_SOURCE_DIR}/hornet/include" "${cuhornet_SOURCE_DIR}/hornetsnest/include" "${cuhornet_SOURCE_DIR}/xlib/include" "${cuhornet_SOURCE_DIR}/primitives" ) endif() endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cugraph::cugraph) set(cugraph_VERSION "${cugraph_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_cugraph()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureOpenBLAS.cmake
#============================================================================= # Copyright 2022-2023 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_or_configure_OpenBLAS) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) set(oneValueArgs VERSION REPOSITORY BRANCH PINNED_TAG EXCLUDE_FROM_ALL) cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(INTERFACE64 OFF) set(BLA_VENDOR OpenBLAS) set(BLA_SIZEOF_INTEGER 4) set(BLAS_name "OpenBLAS") set(SUFFIX64_UNDERSCORE "") # TODO: should we find (or build) 64-bit BLAS? if(FALSE AND (CMAKE_SIZEOF_VOID_P EQUAL 8)) set(INTERFACE64 ON) set(BLA_SIZEOF_INTEGER 8) set(BLAS_name "OpenBLAS64") set(SUFFIX64_UNDERSCORE "_64") endif() set(BLAS_target "openblas${SUFFIX64_UNDERSCORE}") set(FIND_PKG_ARGS ${PKG_VERSION} GLOBAL_TARGETS ${BLAS_target} BUILD_EXPORT_SET ${PROJECT_NAME}-exports INSTALL_EXPORT_SET ${PROJECT_NAME}-exports) if(PKG_BRANCH) set(PKG_PINNED_TAG "${PKG_BRANCH}") endif() cmake_policy(GET CMP0048 CMP0048_orig) cmake_policy(GET CMP0054 CMP0054_orig) set(CMAKE_POLICY_DEFAULT_CMP0048 OLD) set(CMAKE_POLICY_DEFAULT_CMP0054 NEW) _get_update_disconnected_state(BLAS ${PKG_VERSION} UPDATE_DISCONNECTED) rapids_cpm_find(BLAS ${FIND_PKG_ARGS} CPM_ARGS GIT_REPOSITORY ${PKG_REPOSITORY} GIT_TAG ${PKG_PINNED_TAG} GIT_SHALLOW TRUE EXCLUDE_FROM_ALL ${PKG_EXCLUDE_FROM_ALL} OPTIONS "USE_CUDA 1" "C_LAPACK ON" "USE_THREAD ON" "NUM_PARALLEL 32" "BUILD_TESTING OFF" "BUILD_WITHOUT_CBLAS OFF" "BUILD_WITHOUT_LAPACK OFF" "INTERFACE64 ${INTERFACE64}" "USE_OPENMP ${OpenMP_FOUND}" "SUFFIX64_UNDERSCORE ${SUFFIX64_UNDERSCORE}") set(CMAKE_POLICY_DEFAULT_CMP0048 ${CMP0048_orig}) set(CMAKE_POLICY_DEFAULT_CMP0054 ${CMP0054_orig}) if(BLAS_ADDED AND (TARGET ${BLAS_target})) # Ensure we export the name of the actual target, not an alias target get_target_property(BLAS_aliased_target ${BLAS_target} ALIASED_TARGET) if(TARGET ${BLAS_aliased_target}) set(BLAS_target ${BLAS_aliased_target}) endif() # Make an BLAS::BLAS alias target if(NOT TARGET BLAS::BLAS) add_library(BLAS::BLAS ALIAS ${BLAS_target}) endif() # Set build INTERFACE_INCLUDE_DIRECTORIES appropriately get_target_property(BLAS_include_dirs ${BLAS_target} INCLUDE_DIRECTORIES) target_include_directories(${BLAS_target} PUBLIC $<BUILD_INTERFACE:${BLAS_BINARY_DIR}> # lapack[e] etc. include paths $<BUILD_INTERFACE:${BLAS_include_dirs}> # contains openblas_config.h $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}> # contains cblas.h and f77blas.h $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/generated> ) string(JOIN "\n" code_string "if(NOT TARGET BLAS::BLAS)" " add_library(BLAS::BLAS ALIAS ${BLAS_target})" "endif()" ) install(EXPORT "${BLAS_name}Targets" FILE ${BLAS_name}Targets.cmake NAMESPACE ${BLAS_name} DESTINATION "${BLAS_BINARY_DIR}") export(EXPORT "${BLAS_name}Targets" NAMESPACE ${BLAS_name} FILE "${BLAS_BINARY_DIR}/${BLAS_name}Targets.cmake") # Generate openblas-config.cmake in build dir rapids_export(BUILD BLAS VERSION ${PKG_VERSION} EXPORT_SET "${BLAS_name}Targets" GLOBAL_TARGETS ${BLAS_target} FINAL_CODE_BLOCK code_string) # Do `CPMFindPackage(BLAS)` in build dir rapids_export_package(BUILD BLAS ${PROJECT_NAME}-exports VERSION ${PKG_VERSION} GLOBAL_TARGETS ${BLAS_target}) # Tell cmake where it can find the generated blas-config.cmake include("${rapids-cmake-dir}/export/find_package_root.cmake") rapids_export_find_package_root(BUILD BLAS [=[${CMAKE_CURRENT_LIST_DIR}]=] ${PROJECT_NAME}-exports) endif() set(BLAS_FOUND TRUE PARENT_SCOPE) set(BLAS_ADDED ${BLAS_ADDED} PARENT_SCOPE) set(BLAS_BINARY_DIR ${BLAS_BINARY_DIR} PARENT_SCOPE) set(BLAS_SOURCE_DIR ${BLAS_SOURCE_DIR} PARENT_SCOPE) set(LAPACK_FOUND TRUE PARENT_SCOPE) set(LAPACK_ADDED ${BLAS_ADDED} PARENT_SCOPE) set(LAPACK_ROOT ${BLAS_BINARY_DIR} PARENT_SCOPE) set(LAPACK_BINARY_DIR ${BLAS_BINARY_DIR} PARENT_SCOPE) set(LAPACK_SOURCE_DIR ${BLAS_SOURCE_DIR} PARENT_SCOPE) set(BLA_VENDOR OpenBLAS) set(BLAS_ROOT ${BLAS_BINARY_DIR}) find_package(BLAS REQUIRED) set(BLAS_DIR ${BLAS_DIR} PARENT_SCOPE) set(BLAS_FOUND ${BLAS_FOUND} PARENT_SCOPE) set(BLAS_VERSION ${BLAS_VERSION} PARENT_SCOPE) set(BLAS_LIBRARIES ${BLAS_LIBRARIES} PARENT_SCOPE) set(BLAS_LIBRARY ${BLAS_LIBRARY} PARENT_SCOPE) set(BLAS_LINKER_FLAGS ${BLAS_LINKER_FLAGS} PARENT_SCOPE) endfunction() if(NOT DEFINED OPENBLAS_VERSION) # Before v0.3.18, OpenBLAS's throws CMake errors when configuring set(OPENBLAS_VERSION "0.3.20") endif() if(NOT DEFINED OPENBLAS_BRANCH) set(OPENBLAS_BRANCH "") endif() if(NOT DEFINED OPENBLAS_TAG) set(OPENBLAS_TAG v${OPENBLAS_VERSION}) endif() if(NOT DEFINED OPENBLAS_REPOSITORY) set(OPENBLAS_REPOSITORY https://github.com/xianyi/OpenBLAS.git) endif() find_or_configure_OpenBLAS(VERSION ${OPENBLAS_VERSION} REPOSITORY ${OPENBLAS_REPOSITORY} BRANCH ${OPENBLAS_BRANCH} PINNED_TAG ${OPENBLAS_TAG} EXCLUDE_FROM_ALL ${EXCLUDE_OPENBLAS_FROM_ALL} )
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/get_cpm.cmake
#============================================================================= # Copyright (c) 2021-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= if(DEFINED CPM_SOURCE_CACHE AND (DEFINED ENV{CPM_SOURCE_CACHE}) AND (DEFINED CPM_DOWNLOAD_VERSION) AND (DEFINED CPM_DOWNLOAD_LOCATION)) if(DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) message(VERBOSE "get_cpm: CPM already loaded") return() endif() if(DEFINED CPM_BINARY_CACHE AND (DEFINED ENV{CPM_BINARY_CACHE})) message(VERBOSE "get_cpm: CPM already loaded") return() endif() endif() if (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) execute_process(COMMAND node -p "require('@rapidsai/core').cpm_source_cache_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CPM_SOURCE_CACHE OUTPUT_STRIP_TRAILING_WHITESPACE) set(CPM_SOURCE_CACHE "${NODE_RAPIDS_CPM_SOURCE_CACHE}") set(ENV{CPM_SOURCE_CACHE} "${NODE_RAPIDS_CPM_SOURCE_CACHE}") message(VERBOSE "get_cpm: Using CPM source cache: $ENV{CPM_SOURCE_CACHE}") execute_process(COMMAND node -p "require('@rapidsai/core').cpm_binary_cache_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CPM_BINARY_CACHE OUTPUT_STRIP_TRAILING_WHITESPACE) set(CPM_BINARY_CACHE "${NODE_RAPIDS_CPM_BINARY_CACHE}/${CMAKE_BUILD_TYPE}") set(ENV{CPM_BINARY_CACHE} "${CPM_BINARY_CACHE}") message(VERBOSE "get_cpm: Using CPM BINARY cache: $ENV{CPM_BINARY_CACHE}") message(VERBOSE "get_cpm: Using CMake FetchContent base dir: ${CPM_BINARY_CACHE}") set(FETCHCONTENT_BASE_DIR "${CPM_BINARY_CACHE}" CACHE STRING "" FORCE) endif() function(_set_thrust_dir_if_exists) if(Thrust_ROOT) message(STATUS "get_cpm: Thrust_ROOT is '${Thrust_ROOT}'") return() endif() if (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) file(GLOB _thrust_srcs "${CPM_SOURCE_CACHE}/thrust/*/thrust" LIST_DIRECTORIES TRUE) foreach(_thrust_src IN LISTS _thrust_srcs) if(_thrust_src AND (EXISTS "${_thrust_src}/cmake")) message(STATUS "get_cpm: setting Thrust_ROOT to '${_thrust_src}/cmake'") set(Thrust_DIR "${_thrust_src}/cmake" PARENT_SCOPE) set(Thrust_ROOT "${_thrust_src}/cmake" PARENT_SCOPE) break() else() if(NOT _thrust_src) set(_thrust_src "thrust/cmake") endif() message(STATUS "get_cpm: not setting Thrust_ROOT because '${_thrust_src}' does not exist") endif() endforeach() endif() endfunction() function(_set_package_dir_if_exists pkg dir) if (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) set(_build_dir "${CPM_BINARY_CACHE}/${dir}-build") if(EXISTS "${_build_dir}") message(STATUS "get_cpm: setting ${pkg}_ROOT to '${_build_dir}'") set(${pkg}_DIR "${_build_dir}" PARENT_SCOPE) set(${pkg}_ROOT "${_build_dir}" PARENT_SCOPE) else() message(STATUS "get_cpm: not setting ${pkg}_ROOT because '${_build_dir}' does not exist") endif() endif() endfunction() function(_clean_build_dirs_if_not_fully_built dir libname) if (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) if (EXISTS "${CPM_BINARY_CACHE}/${dir}-build/${libname}.a") message(STATUS "get_cpm: not clearing shared build dirs since '${CPM_BINARY_CACHE}/${dir}-build/${libname}.a' exists") elseif (EXISTS "${CPM_BINARY_CACHE}/${dir}-build/${libname}.so") message(STATUS "get_cpm: not clearing shared build dirs since '${CPM_BINARY_CACHE}/${dir}-build/${libname}.so' exists") else() file(REMOVE_RECURSE "${CPM_BINARY_CACHE}/${dir}-build") file(REMOVE_RECURSE "${CPM_BINARY_CACHE}/${dir}-subbuild") message(STATUS "get_cpm: clearing shared build dirs since '${CPM_BINARY_CACHE}/${dir}-build/${libname}.(a|so)' does not exist") endif() endif() endfunction() function(_set_interface_include_dirs_as_system target) get_target_property(_real ${target} ALIASED_TARGET) if (NOT TARGET ${_real}) set(_real ${target}) endif() if (TARGET ${_real}) get_target_property(normal_includes ${target} INTERFACE_INCLUDE_DIRECTORIES) get_target_property(system_includes ${target} INTERFACE_SYSTEM_INCLUDE_DIRECTORIES) if (normal_includes) if (NOT system_includes) set(system_includes ${normal_includes}) else() list(APPEND system_includes ${normal_includes}) endif() set_property(TARGET ${_real} PROPERTY INTERFACE_INCLUDE_DIRECTORIES "") target_include_directories(${_real} SYSTEM INTERFACE ${system_includes}) endif() endif() endfunction() function(_get_major_minor_version version out_var) if(${version} MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(${out_var} "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}" PARENT_SCOPE) else() set(${out_var} "${version}" PARENT_SCOPE) endif() endfunction() function(_get_update_disconnected_state target version out_var) # We only want to set `UPDATE_DISCONNECTED` while # the GIT tag hasn't moved from the last time we cloned set(cpm_${target}_disconnect_update "UPDATE_DISCONNECTED TRUE") set(cpm_${target}_CURRENT_VERSION ${version} CACHE STRING "version of ${target} we checked out" PARENT_SCOPE) if(NOT VERSION VERSION_EQUAL cpm_${target}_CURRENT_VERSION) set(cpm_${target}_CURRENT_VERSION ${version} CACHE STRING "version of ${target} we checked out" FORCE PARENT_SCOPE) set(cpm_${target}_disconnect_update "") endif() set(${out_var} cpm_${target}_disconnect_update PARENT_SCOPE) endfunction() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) _get_rapidsai_module_version(rapids-cmake rapids-cmake-version) _get_major_minor_version(${rapids-cmake-version} rapids-cmake-version) file( DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-${rapids-cmake-version}/RAPIDS.cmake ${CMAKE_BINARY_DIR}/RAPIDS.cmake) include(${CMAKE_BINARY_DIR}/RAPIDS.cmake) include(rapids-export) include(rapids-cmake) include(rapids-find) include(rapids-cpm) execute_process(COMMAND node -p "require('@rapidsai/core').cmake_modules_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) rapids_cpm_init(OVERRIDE "${NODE_RAPIDS_CMAKE_MODULES_PATH}/../versions.json")
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUDA.cmake
#============================================================================= # Copyright (c) 2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) # If `CMAKE_CUDA_ARCHITECTURES` is not defined, build for all supported architectures. If # `CMAKE_CUDA_ARCHITECTURES` is set to an empty string (""), build for only the current # architecture. If `CMAKE_CUDA_ARCHITECTURES` is specified by the user, use user setting. # This needs to be run before enabling the CUDA language due to the default initialization behavior # of `CMAKE_CUDA_ARCHITECTURES`, https://gitlab.kitware.com/cmake/cmake/-/issues/21302 set(NODE_RAPIDS_CMAKE_BUILD_FOR_ALL_CUDA_ARCHS FALSE) set(NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS FALSE) if(DEFINED ENV{CUDAARCHS}) if("$ENV{CUDAARCHS}" STREQUAL "") # If CUDAARCHS is <empty_string>, auto-detect current GPU arch set(NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS TRUE) message(STATUS "Auto-detecting GPU architecture because the CUDAARCHS environment variable = '") elseif("$ENV{CUDAARCHS}" STREQUAL "ALL") # If CUDAARCHS is "ALL," build for all supported archs set(NODE_RAPIDS_CMAKE_BUILD_FOR_ALL_CUDA_ARCHS TRUE) message(STATUS "Building all supported GPU architectures because the CUDAARCHS environment variable = 'ALL'") else() # Use the current value of the CUDAARCHS env var set(CMAKE_CUDA_ARCHITECTURES "$ENV{CUDAARCHS}") message(STATUS "Using GPU architectures from CUDAARCHS env var: $ENV{CUDAARCHS}") endif() elseif(DEFINED CMAKE_CUDA_ARCHITECTURES) if(CMAKE_CUDA_ARCHITECTURES STREQUAL "") # If CMAKE_CUDA_ARCHITECTURES is <empty_string>, auto-detect current GPU arch set(NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS TRUE) message(STATUS "Auto-detecting GPU architecture because CMAKE_CUDA_ARCHITECTURES = ''") elseif(CMAKE_CUDA_ARCHITECTURES STREQUAL "ALL") # If CMAKE_CUDA_ARCHITECTURES is "ALL," build for all supported archs set(NODE_RAPIDS_CMAKE_BUILD_FOR_ALL_CUDA_ARCHS TRUE) message(STATUS "Building all supported GPU architectures because CMAKE_CUDA_ARCHITECTURES = 'ALL'") else() # Use the current value of CMAKE_CUDA_ARCHITECTURES message(STATUS "Using GPU architectures defined in CMAKE_CUDA_ARCHITECTURES: ${CMAKE_CUDA_ARCHITECTURES}") endif() else() # Fall-back to auto-detecting the current GPU architecture set(NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS TRUE) message(STATUS "Auto-detecting GPU architectures because CUDAARCHS env var is not defined, and CMAKE_CUDA_ARCHITECTURES was not specified.") endif() # Build the list of supported architectures set(SUPPORTED_CUDA_ARCHITECTURES "60" "70" "75" "80" "86") find_package(CUDAToolkit REQUIRED) # CMake < 3.20 has a bug in FindCUDAToolkit where it won't properly detect the CUDAToolkit version # when find_package(CUDAToolkit) occurs before enable_language(CUDA) if(NOT DEFINED CUDAToolkit_VERSION AND CMAKE_CUDA_COMPILER) execute_process(COMMAND ${CMAKE_CUDA_COMPILER} "--version" OUTPUT_VARIABLE NVCC_OUT) if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") endif() unset(NVCC_OUT) endif() if(CUDAToolkit_VERSION_MAJOR EQUAL 11 AND CUDAToolkit_VERSION_MINOR LESS 2) list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES "86") endif() if(CUDAToolkit_VERSION_MAJOR LESS 11) list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES "86") list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES "80") endif() if(CUDAToolkit_VERSION_MAJOR LESS 10) list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES "75") endif() if(CUDAToolkit_VERSION_MAJOR LESS 9) list(REMOVE_ITEM SUPPORTED_CUDA_ARCHITECTURES "70") endif() if(NODE_RAPIDS_CMAKE_BUILD_FOR_ALL_CUDA_ARCHS) set(CMAKE_CUDA_ARCHITECTURES ${SUPPORTED_CUDA_ARCHITECTURES}) # CMake architecture list entry of "80" means to build compute and sm. What we want is for the # newest arch only to build that way while the rest built only for sm. list(POP_BACK CMAKE_CUDA_ARCHITECTURES latest_arch) list(TRANSFORM CMAKE_CUDA_ARCHITECTURES APPEND "-real") list(APPEND CMAKE_CUDA_ARCHITECTURES ${latest_arch}) elseif(NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS) # Auto-detect available GPU compute architectures execute_process(COMMAND node -p "require('@rapidsai/core').cmake_modules_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) include(${NODE_RAPIDS_CMAKE_MODULES_PATH}/EvalGpuArchs.cmake) evaluate_gpu_archs(CMAKE_CUDA_ARCHITECTURES) list(TRANSFORM CMAKE_CUDA_ARCHITECTURES APPEND "-real") endif() message(STATUS "BUILD_FOR_DETECTED_ARCHS: ${NODE_RAPIDS_CMAKE_BUILD_FOR_DETECTED_ARCHS}") message(STATUS "BUILD_FOR_ALL_CUDA_ARCHS: ${NODE_RAPIDS_CMAKE_BUILD_FOR_ALL_CUDA_ARCHS}") message(STATUS "CMAKE_CUDA_ARCHITECTURES: ${CMAKE_CUDA_ARCHITECTURES}") # Override the cached version from enable_language(CUDA) set(CMAKE_CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}" CACHE STRING "" FORCE) # set(BLA_STATIC ON) set(CUDA_STATIC_RUNTIME ON) set(CUDA_USE_STATIC_CUDA_RUNTIME ON) set(CMAKE_CUDA_RUNTIME_LIBRARY STATIC) # Enable the CUDA language enable_language(CUDA) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -Werror=cross-execution-space-call) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCXX.cmake
#============================================================================= # Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) ################################################################################################### # - CMake properties ------------------------------------------------------------------------------ if(UNIX AND NOT APPLE) set(LINUX TRUE) endif() if(NODE_RAPIDS_USE_SCCACHE) find_program(SCCACHE_PROGRAM_PATH sccache) if(SCCACHE_PROGRAM_PATH) message(STATUS "Using sccache: ${SCCACHE_PROGRAM_PATH}") set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${SCCACHE_PROGRAM_PATH}") if(DEFINED ENV{SCCACHE_DIR}) message(STATUS "Using sccache directory: $ENV{SCCACHE_DIR}") set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "SCCACHE_DIR=${SCCACHE_DIR} ${SCCACHE_PROGRAM_PATH}") endif(DEFINED ENV{SCCACHE_DIR}) endif(SCCACHE_PROGRAM_PATH) endif(NODE_RAPIDS_USE_SCCACHE) execute_process(COMMAND node -p "require('@rapidsai/core').cpp_core_include_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE RAPIDS_CORE_INCLUDE_DIR OUTPUT_STRIP_TRAILING_WHITESPACE) message(STATUS "RAPIDS core include: ${RAPIDS_CORE_INCLUDE_DIR}") ################################################################################################### # - compiler options ------------------------------------------------------------------------------ list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS ${CMAKE_CUDA_FLAGS}) unset(CMAKE_C_FLAGS) unset(CMAKE_C_FLAGS CACHE) unset(CMAKE_CXX_FLAGS) unset(CMAKE_CXX_FLAGS CACHE) unset(CMAKE_CUDA_FLAGS) unset(CMAKE_CUDA_FLAGS CACHE) if(CMAKE_COMPILER_IS_GNUCXX) option(NODE_RAPIDS_CMAKE_CXX11_ABI "Enable the GLIBCXX11 ABI" ON) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -Wall -Werror -Wno-unknown-pragmas -Wno-error=deprecated-declarations) if(NODE_RAPIDS_CMAKE_CXX11_ABI) message(STATUS "Enabling the GLIBCXX11 ABI") else() message(STATUS "Disabling the GLIBCXX11 ABI") list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -Xcompiler=-D_GLIBCXX_USE_CXX11_ABI=0) endif(NODE_RAPIDS_CMAKE_CXX11_ABI) # always colors list(APPEND CMAKE_C_FLAGS -fdiagnostics-color=always) list(APPEND CMAKE_CXX_FLAGS -fdiagnostics-color=always) list(APPEND CMAKE_CUDA_FLAGS -Xcompiler=-fdiagnostics-color=always) endif(CMAKE_COMPILER_IS_GNUCXX) if(WIN32) list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -D_WIN32) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -D_WIN32) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -D_WIN32) elseif(LINUX) list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -D__linux__) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -D__linux__) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -D__linux__) elseif(APPLE) list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -D__APPLE__) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -D__APPLE__) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -D__APPLE__) endif() if(DISABLE_DEPRECATION_WARNINGS) list(APPEND NODE_RAPIDS_CMAKE_C_FLAGS -Wno-deprecated-declarations) list(APPEND NODE_RAPIDS_CMAKE_CXX_FLAGS -Wno-deprecated-declarations) list(APPEND NODE_RAPIDS_CMAKE_CUDA_FLAGS -Xcompiler=-Wno-deprecated-declarations) endif(DISABLE_DEPRECATION_WARNINGS) # Enable -fPIC for all libs set(CMAKE_POSITION_INDEPENDENT_CODE ON) # https://cmake.org/cmake/help/latest/variable/CMAKE_FIND_PACKAGE_TARGETS_GLOBAL.html#variable:CMAKE_FIND_PACKAGE_TARGETS_GLOBAL set(CMAKE_FIND_PACKAGE_TARGETS_GLOBAL ON)
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUML.cmake
#============================================================================= # Copyright (c) 2021-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cuml) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_nccl.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureCUMLPRIMS.cmake) _get_rapidsai_module_version(cuml VERSION) _clean_build_dirs_if_not_fully_built(cuml libcuml++) _set_thrust_dir_if_exists() _set_package_dir_if_exists(cuml cuml) _set_package_dir_if_exists(raft raft) _set_package_dir_if_exists(faiss faiss) _set_package_dir_if_exists(Treelite cuml) _set_package_dir_if_exists(GPUTreeShap cuml) _set_package_dir_if_exists(cumlprims_mg cumlprims_mg) if(NOT TARGET cuml::cuml) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cuml ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cuml VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/rapidsai/cuml.git GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "WITH_UCX ON" "SINGLEGPU OFF" "CUDA_STATIC_RUNTIME ON" "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "DISABLE_OPENMP OFF" "DETECT_CONDA_ENV OFF" "ENABLE_CUMLPRIMS_MG ON" "BUILD_SHARED_LIBS OFF" "BUILD_CUML_MG_TESTS OFF" "BUILD_CUML_MG_BENCH OFF" "BUILD_CUML_STD_COMMS ON" "BUILD_CUML_MPI_COMMS ON" "BUILD_CUML_TESTS OFF" "BUILD_CUML_BENCH OFF" "BUILD_PRIMS_TESTS OFF" "BUILD_CUML_EXAMPLES OFF" "BUILD_CUML_C_LIBRARY OFF" "BUILD_CUML_CPP_LIBRARY ON" "BUILD_CUML_PRIMS_BENCH OFF" "RAFT_USE_FAISS_STATIC ON" "CUML_USE_FAISS_STATIC ON" "CUML_USE_TREELITE_STATIC ON" "CUML_EXPORT_TREELITE_LINKAGE ON" "CUML_USE_CUMLPRIMS_MG_STATIC ON" PATCH_COMMAND patch --reject-file=- -p1 -N < ${CMAKE_CURRENT_LIST_DIR}/../patches/cuml.patch || true ) endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cuml::cuml++) set(cuml_VERSION "${cuml_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_cuml()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/cuda_arch_helpers.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(_get_lib_location_info dep out out_a out_a_all) set(location_prop "IMPORTED_LOCATION_${CMAKE_BUILD_TYPE}") string(TOUPPER "${location_prop}" location_prop) get_target_property(loc ${dep} ${location_prop}) if(loc) string(REPLACE "\.a" "\.a\.all" loc_all "${loc}") else() set(loc "$<TARGET_FILE:${dep}>") set(loc_all "$<TARGET_FILE:${dep}>.all") endif() string(REPLACE "::" "_" dep_ "${dep}") set(${out} "${dep_}" PARENT_SCOPE) set(${out_a} "${loc}" PARENT_SCOPE) set(${out_a_all} "${loc_all}" PARENT_SCOPE) endfunction() function(_generate_arch_specific_custom_target) set(options "") set(oneValueArgs "NAME" "ARCH") set(multiValueArgs "DEPENDENCIES") cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(arch "${__ARCH}") set(name "${__NAME}") set(deps "${__DEPENDENCIES}") set(name_all "${name}_all") set(name_arch "${name}_${arch}") set(nv_prune_commands) set(do_rename_commands) set(un_rename_commands) if(NOT depends_on) set(depends_on "${name}") endif() foreach(dep IN LISTS deps) if(TARGET ${dep}) _get_lib_location_info(${dep} dep dep_a dep_a_all) if (dep_a) # Rename `${dep_a}` to `${dep_a_all}` add_custom_target("${dep}_${arch}_rename_a_to_all_a" ALL COMMAND ${CMAKE_COMMAND} -E rename "${dep_a}" "${dep_a_all}" DEPENDS "${depends_on}" VERBATIM COMMAND_EXPAND_LISTS) set(depends_on "${dep}_${arch}_rename_a_to_all_a") # Run nvprune to remove archs that aren't ${arch} add_custom_target("${dep}_${arch}_nvprune" ALL COMMAND nvprune -gencode=arch=compute_${arch},code=[sm_${arch}] -o "${dep_a}" "${dep_a_all}" DEPENDS "${depends_on}" VERBATIM COMMAND_EXPAND_LISTS) set(depends_on "${dep}_${arch}_nvprune") endif() endif() endforeach() add_custom_target("${name_arch}" ALL # Rename `${name}.node` to `${name_all}.node` COMMAND ${CMAKE_COMMAND} -E rename ${name}.node ${name_all}.node # Relink arch-specific `${name}.node` COMMAND ${CMAKE_COMMAND} --build ${CMAKE_CURRENT_BINARY_DIR} --target ${name}.node # Rename arch-specific `${name}.node` to `${name_arch}.node` COMMAND ${CMAKE_COMMAND} -E rename ${name}.node ${name_arch}.node # Rename `${name_all}.node` back to `${name}.node` COMMAND ${CMAKE_COMMAND} -E rename ${name_all}.node ${name}.node VERBATIM COMMAND_EXPAND_LISTS DEPENDS "${depends_on}" BYPRODUCTS "${name_arch}.node" ) set(depends_on "${name_arch}") foreach(dep IN LISTS deps) if(TARGET ${dep}) _get_lib_location_info(${dep} dep dep_a dep_a_all) if (dep_a) # Rename `${dep_a_all}` to `${dep_a}` add_custom_target("${dep}_${arch}_rename_all_a_to_a" ALL COMMAND ${CMAKE_COMMAND} -E rename "${dep_a_all}" "${dep_a}" VERBATIM DEPENDS "${depends_on}" COMMAND_EXPAND_LISTS) set(depends_on "${dep}_${arch}_rename_all_a_to_a") endif() endif() endforeach() set(depends_on "${depends_on}" PARENT_SCOPE) endfunction() function(generate_arch_specific_custom_targets) set(options "") set(oneValueArgs "NAME") set(multiValueArgs "DEPENDENCIES") cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(name ${__NAME}) set(deps ${__DEPENDENCIES}) set(depends_on "") get_target_property(cuda_archs ${name} CUDA_ARCHITECTURES) foreach(arch IN LISTS cuda_archs) if(arch MATCHES "^(.*)-(real|virtual)$") set(arch "${CMAKE_MATCH_1}") endif() _generate_arch_specific_custom_target( NAME "${name}" ARCH "${arch}" DEPENDENCIES ${deps}) endforeach() endfunction()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/install_utils.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(generate_install_rules) set(options "") set(oneValueArgs NAME) set(multiValueArgs GLOBAL_TARGETS CUDA_ARCHITECTURES) cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(libs) set(name "${__NAME}") set(targets ${__GLOBAL_TARGETS}) set(cuda_archs ${__CUDA_ARCHITECTURES}) if(NOT targets) list(APPEND targets ${name}) endif() foreach(arch IN LISTS cuda_archs) if(arch MATCHES "^(.*)-(real|virtual)$") set(arch "${CMAKE_MATCH_1}") endif() list(APPEND libs "${CMAKE_CURRENT_BINARY_DIR}/${name}_${arch}.node") endforeach() include(CPack) include(GNUInstallDirs) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) rapids_cmake_install_lib_dir(lib_dir) set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME ${name}) # install target install(TARGETS ${targets} DESTINATION ${lib_dir} EXPORT ${name}-exports) if(libs) install(FILES ${libs} TYPE LIB) endif() set(doc_string "") set(install_code_string "") rapids_export( BUILD ${name} EXPORT_SET ${name}-exports GLOBAL_TARGETS ${targets} NAMESPACE rapids:: DOCUMENTATION doc_string FINAL_CODE_BLOCK install_code_string ) endfunction()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUMLPRIMS.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cumlprims_mg) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureRAFT.cmake) _get_rapidsai_module_version(cumlprims_mg VERSION) _clean_build_dirs_if_not_fully_built(cumlprims_mg libcumlprims_mg) _set_package_dir_if_exists(cumlprims_mg cumlprims_mg) find_package(OpenMP REQUIRED) if(NOT TARGET cumlprims_mg::cumlprims_mg) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cumlprims_mg ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cumlprims_mg VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY "[email protected]:rapidsai/cumlprims_mg.git" GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "DETECT_CONDA_ENV OFF" "BUILD_SHARED_LIBS OFF") endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cumlprims_mg::cumlprims_mg) set(cumlprims_mg_VERSION "${cumlprims_mg_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_cumlprims_mg()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUDF.cmake
#============================================================================= # Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cudf) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureRMM.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureArrow.cmake) _get_rapidsai_module_version(cudf VERSION) _clean_build_dirs_if_not_fully_built(cudf libcudf) _clean_build_dirs_if_not_fully_built(nvcomp libnvcomp) _set_thrust_dir_if_exists() _set_package_dir_if_exists(cudf cudf) _set_package_dir_if_exists(cuco cuco) _set_package_dir_if_exists(dlpack dlpack) _set_package_dir_if_exists(jitify jitify) _set_package_dir_if_exists(nvcomp nvcomp) _set_package_dir_if_exists(Arrow arrow) _set_package_dir_if_exists(Parquet arrow) _set_package_dir_if_exists(ArrowCUDA arrow) _set_package_dir_if_exists(ArrowDataset arrow) if(NOT TARGET cudf::cudf) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cudf ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cudf VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/rapidsai/cudf.git GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_SHARED_LIBS OFF" "JITIFY_USE_CACHE ON" "BOOST_SOURCE SYSTEM" "Thrift_SOURCE BUNDLED" "CUDA_STATIC_RUNTIME ON" "CUDF_USE_ARROW_STATIC ON" "CUDF_ENABLE_ARROW_S3 OFF" # "CUDF_ENABLE_ARROW_S3 ON" "CUDF_ENABLE_ARROW_ORC OFF" "CUDF_ENABLE_ARROW_PYTHON OFF" "CUDF_ENABLE_ARROW_PARQUET ON" # "ARROW_DEPENDENCY_SOURCE AUTO" "DISABLE_DEPRECATION_WARNING ON" "CUDF_USE_PROPRIETARY_NVCOMP OFF" "CUDF_USE_PER_THREAD_DEFAULT_STREAM ON") endif() set(cudf_VERSION "${cudf_VERSION}" PARENT_SCOPE) set(ARROW_LIBRARIES ${ARROW_LIBRARIES} PARENT_SCOPE) _set_package_dir_if_exists(nvcomp nvcomp) find_package(nvcomp) set_target_properties(nvcomp PROPERTIES ARCHIVE_OUTPUT_DIRECTORY "${nvcomp_ROOT}" LIBRARY_OUTPUT_DIRECTORY "${nvcomp_ROOT}") include(CMakePackageConfigHelpers) write_basic_package_version_file( ${nvcomp_ROOT}/nvcomp-config-version.cmake VERSION 2.3 COMPATIBILITY ExactVersion) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cudf::cudf) _statically_link_cuda_toolkit_libs(cudf::cudftestutil) endfunction() find_and_configure_cudf()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureBlazingSQL.cmake
#============================================================================= # Copyright (c) 2021-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_blazingsql) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureCUDF.cmake) _get_rapidsai_module_version(blazingsql-io VERSION) _clean_build_dirs_if_not_fully_built(absl absl/base/libabsl_base) _clean_build_dirs_if_not_fully_built(blazingsql-io libblazingsql-io) _clean_build_dirs_if_not_fully_built(blazingsql-engine libblazingsql-engine) _set_thrust_dir_if_exists() _set_package_dir_if_exists(absl absl) _set_package_dir_if_exists(cudf cudf) _set_package_dir_if_exists(cuco cuco) _set_package_dir_if_exists(dlpack dlpack) _set_package_dir_if_exists(jitify jitify) _set_package_dir_if_exists(nvcomp nvcomp) _set_package_dir_if_exists(blazingsql-io blazingsql-io) _set_package_dir_if_exists(blazingsql-engine blazingsql-engine) if(NOT TARGET blazingdb::blazingsql-io) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(blazingsql-io ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME blazingsql-io VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/trxcllnt/blazingsql.git GIT_TAG fea/rapids-cmake-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR io OPTIONS # "S3_SUPPORT ON" "S3_SUPPORT OFF" "GCS_SUPPORT OFF" "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_SHARED_LIBS OFF" # "ARROW_DEPENDENCY_SOURCE AUTO" "BLAZINGSQL_IO_BUILD_ARROW_ORC OFF" "BLAZINGSQL_IO_USE_ARROW_STATIC ON" "BLAZINGSQL_IO_BUILD_ARROW_PYTHON OFF" ) endif() set(blazingsql-io_VERSION "${blazingsql-io_VERSION}" PARENT_SCOPE) _get_rapidsai_module_version(blazingsql-engine VERSION) if(NOT TARGET blazingdb::blazingsql-engine) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(blazingsql-engine ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME blazingsql-engine VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/trxcllnt/blazingsql.git GIT_TAG fea/rapids-cmake-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR engine OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_SHARED_LIBS OFF" # "S3_SUPPORT ON" "S3_SUPPORT OFF" "GCS_SUPPORT OFF" "MYSQL_SUPPORT OFF" "SQLITE_SUPPORT OFF" "POSTGRESQL_SUPPORT OFF" "CUDA_STATIC_RUNTIME ON" # "ARROW_DEPENDENCY_SOURCE AUTO" "BLAZINGSQL_ENGINE_USE_ARROW_STATIC ON" "DISABLE_DEPRECATION_WARNING ON" "BLAZINGSQL_IO_USE_ARROW_STATIC ON" "BLAZINGSQL_IO_BUILD_ARROW_ORC OFF" "BLAZINGSQL_IO_BUILD_ARROW_PYTHON OFF" "BLAZINGSQL_ENGINE_ENABLE_DEBUG_UTILS OFF" "BLAZINGSQL_ENGINE_BUILD_ARROW_ORC OFF" "BLAZINGSQL_ENGINE_BUILD_ARROW_PYTHON OFF" "BLAZINGSQL_ENGINE_WITH_PYTHON_ERRORS OFF" ) endif() set(blazingsql-engine_VERSION "${blazingsql-engine_VERSION}" PARENT_SCOPE) if (NOT TARGET thrift::thrift) if (NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) if (EXISTS "${CPM_BINARY_CACHE}/arrow-build/thrift_ep-install/lib/cmake/thrift/ThriftConfig.cmake") include("${CPM_BINARY_CACHE}/arrow-build/thrift_ep-install/lib/cmake/thrift/thriftTargets.cmake") endif() endif() endif() if (blazingsql-engine_ADDED) execute_process(COMMAND mvn clean install --quiet -f pom.xml -Dmaven.test.skip=true -Dmaven.repo.local=${blazingsql-engine_BINARY_DIR}/blazing-protocol-mvn/ WORKING_DIRECTORY "${blazingsql-engine_SOURCE_DIR}/algebra") configure_file("${blazingsql-engine_SOURCE_DIR}/algebra/blazingdb-calcite-application/target/BlazingCalcite.jar" "${blazingsql-engine_BINARY_DIR}/blazingsql-algebra.jar" COPYONLY) configure_file("${blazingsql-engine_SOURCE_DIR}/algebra/blazingdb-calcite-core/target/blazingdb-calcite-core.jar" "${blazingsql-engine_BINARY_DIR}/blazingsql-algebra-core.jar" COPYONLY) endif() if(NOT blazingsql-engine_BINARY_DIR) set(blazingsql-engine_BINARY_DIR "${CPM_BINARY_CACHE}/blazingsql-engine-build") if(DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) set(blazingsql-engine_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/_deps/blazingsql-engine-build") endif() endif() configure_file("${blazingsql-engine_BINARY_DIR}/blazingsql-algebra.jar" "${CMAKE_CURRENT_BINARY_DIR}/blazingsql-algebra.jar" COPYONLY) configure_file("${blazingsql-engine_BINARY_DIR}/blazingsql-algebra-core.jar" "${CMAKE_CURRENT_BINARY_DIR}/blazingsql-algebra-core.jar" COPYONLY) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(blazingdb::blazingsql-io) _statically_link_cuda_toolkit_libs(blazingdb::blazingsql-engine) endfunction() find_and_configure_blazingsql()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUSPATIAL.cmake
#============================================================================= # Copyright (c) 2021-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cuspatial) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureCUDF.cmake) _get_rapidsai_module_version(cuspatial VERSION) _clean_build_dirs_if_not_fully_built(cuspatial libcuspatial) _set_thrust_dir_if_exists() _set_package_dir_if_exists(cudf cudf) _set_package_dir_if_exists(cuco cuco) _set_package_dir_if_exists(dlpack dlpack) _set_package_dir_if_exists(jitify jitify) _set_package_dir_if_exists(nvcomp nvcomp) _set_package_dir_if_exists(cuspatial cuspatial) if(NOT TARGET cuspatial::cuspatial) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cuspatial ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cuspatial VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/rapidsai/cuspatial.git GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "BUILD_TESTS OFF" "BUILD_BENCHMARKS OFF" "BUILD_SHARED_LIBS OFF" "CUDA_STATIC_RUNTIME ON" "PER_THREAD_DEFAULT_STREAM ON" "DISABLE_DEPRECATION_WARNING ON") endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cuspatial::cuspatial) set(cuspatial_VERSION "${cuspatial_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_cuspatial()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/cmake_policies.cmake
#============================================================================= # Copyright (c) 2020-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) if(POLICY CMP0072) # Set OpenGL_GL_PREFERENCE to "GLVND" # https://cmake.org/cmake/help/latest/policy/CMP0072.html#policy:CMP0072 cmake_policy(SET CMP0072 NEW) set(CMAKE_POLICY_DEFAULT_CMP0072 NEW) endif() if(POLICY CMP0102) # empty cache variable sets # https://cmake.org/cmake/help/latest/policy/CMP0102.html#policy:CMP0102 cmake_policy(SET CMP0102 NEW) set(CMAKE_POLICY_DEFAULT_CMP0102 NEW) endif() if(POLICY CMP0124) # unset loop variables # https://cmake.org/cmake/help/latest/policy/CMP0124.html#policy:CMP0124 cmake_policy(SET CMP0124 NEW) set(CMAKE_POLICY_DEFAULT_CMP0124 NEW) endif() if(POLICY CMP0126) # make set(CACHE) command not remove normal variable of the same name from the current scope # https://cmake.org/cmake/help/latest/policy/CMP0126.html cmake_policy(SET CMP0126 NEW) set(CMAKE_POLICY_DEFAULT_CMP0126 NEW) endif()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureThrust.cmake
#============================================================================= # Copyright (c) 2020-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_thrust VERSION) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) _set_thrust_dir_if_exists() find_package(Thrust "${VERSION}.0" EXACT QUIET) if(NOT Thrust_FOUND) _get_update_disconnected_state(Thrust ${VERSION} UPDATE_DISCONNECTED) include("${rapids-cmake-dir}/cpm/detail/generate_patch_command.cmake") rapids_cpm_generate_patch_command(Thrust ${VERSION} patch_command) message(STATUS "Thrust patch command: ${patch_command}") CPMAddPackage(NAME Thrust VERSION "${VERSION}.0" # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/NVIDIA/thrust.git GIT_TAG ${VERSION} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} PATCH_COMMAND ${patch_command} ) endif() set(CPM_THRUST_CURRENT_VERSION "${VERSION}.0" CACHE STRING "version of thrust we checked out" FORCE) endfunction() find_and_configure_thrust(1.17.2)
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/get_version.cmake
#============================================================================= # Copyright (c) 2022-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(_get_rapidsai_module_version pkg out_var_) set(ver_ "22.12.00") if(DEFINED ${pkg}_VERSION) set(ver_ "${${pkg}_VERSION}") elseif(DEFINED RAPIDS_VERSION) set(ver_ "${RAPIDS_VERSION}") elseif(DEFINED ENV{RAPIDS_VERSION}) set(ver_ "$ENV{RAPIDS_VERSION}") set(RAPIDS_VERSION "${ver_}" PARENT_SCOPE) endif() set(${out_var_} "${ver_}" PARENT_SCOPE) endfunction()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/EvalGpuArchs.cmake
#============================================================================= # Copyright (c) 2019-2020, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) # Unset this first in case it's set to <empty_string> unset(CMAKE_CUDA_ARCHITECTURES CACHE) # Enable CUDA so we can invoke nvcc enable_language(CUDA) function(evaluate_gpu_archs gpu_archs) set(eval_file ${PROJECT_BINARY_DIR}/eval_gpu_archs.cu) set(eval_exe ${PROJECT_BINARY_DIR}/eval_gpu_archs) set(error_file ${PROJECT_BINARY_DIR}/eval_gpu_archs.stderr.log) file(WRITE ${eval_file} " #include <cstdio> #include <set> #include <string> using namespace std; int main(int argc, char** argv) { set<string> archs; int nDevices; if((cudaGetDeviceCount(&nDevices) == cudaSuccess) && (nDevices > 0)) { for(int dev=0;dev<nDevices;++dev) { char buff[32]; cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, dev) != cudaSuccess) continue; sprintf(buff, \"%d%d\", prop.major, prop.minor); archs.insert(buff); } } if(archs.empty()) { printf(\"${SUPPORTED_CUDA_ARCHITECTURES}\"); } else { bool first = true; for(const auto& arch : archs) { printf(first ? \"%s\" : \";%s\", arch.c_str()); first = false; } } printf(\"\\n\"); return 0; } ") execute_process( COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} -std=c++11 -o ${eval_exe} --run ${eval_file} OUTPUT_VARIABLE __gpu_archs OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_FILE ${error_file}) message("Auto detection of GPU architectures: ${__gpu_archs}") set(${gpu_archs} ${__gpu_archs} PARENT_SCOPE) endfunction(evaluate_gpu_archs)
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureRAFT.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_raft) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureRMM.cmake) # include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureOpenBLAS.cmake) _get_rapidsai_module_version(raft VERSION) _clean_build_dirs_if_not_fully_built(raft libraft_nn) _clean_build_dirs_if_not_fully_built(raft libraft_distance) _set_thrust_dir_if_exists() _set_package_dir_if_exists(rmm rmm) _set_package_dir_if_exists(raft raft) # _set_package_dir_if_exists(BLAS blas) _set_package_dir_if_exists(faiss faiss) if(NOT TARGET raft::raft) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(raft ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME raft VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY https://github.com/rapidsai/raft.git GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE SOURCE_SUBDIR cpp FIND_PACKAGE_ARGUMENTS "COMPONENTS distance nn" ${UPDATE_DISCONNECTED} OPTIONS "BUILD_TESTS OFF" # "BLA_VENDOR OpenBLAS" "BUILD_SHARED_LIBS OFF" "CUDA_STATIC_RUNTIME ON" "RAFT_USE_FAISS_STATIC ON" "RAFT_COMPILE_LIBRARIES ON") endif() # Make these -isystem so -Werror doesn't fail their builds _set_interface_include_dirs_as_system(faiss::faiss) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(raft::raft) _statically_link_cuda_toolkit_libs(faiss::faiss) set(raft_VERSION "${raft_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_raft()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureCUGRAPHOPS.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_cugraph_ops) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_version.cmake) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/ConfigureRAFT.cmake) _get_rapidsai_module_version(cugraph-ops VERSION) _clean_build_dirs_if_not_fully_built(cugraph-ops libcugraph-ops++) _set_thrust_dir_if_exists() _set_package_dir_if_exists(raft raft) _set_package_dir_if_exists(cugraph-ops cugraph-ops) if(NOT TARGET cugraph-ops::cugraph-ops++) _get_major_minor_version(${VERSION} MAJOR_AND_MINOR) _get_update_disconnected_state(cugraph-ops ${VERSION} UPDATE_DISCONNECTED) CPMFindPackage(NAME cugraph-ops VERSION ${VERSION} # EXCLUDE_FROM_ALL TRUE GIT_REPOSITORY "[email protected]:rapidsai/cugraph-ops.git" GIT_TAG branch-${MAJOR_AND_MINOR} GIT_SHALLOW TRUE ${UPDATE_DISCONNECTED} SOURCE_SUBDIR cpp OPTIONS "DETECT_CONDA_ENV OFF" "BUILD_SHARED_LIBS OFF" "CUDA_STATIC_RUNTIME ON" "BUILD_CUGRAPH_OPS_CPP_TESTS OFF") endif() include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/link_utils.cmake) _statically_link_cuda_toolkit_libs(cugraph-ops++) set(cugraph-ops_VERSION "${cugraph-ops_VERSION}" PARENT_SCOPE) endfunction() find_and_configure_cugraph_ops()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/link_utils.cmake
#============================================================================= # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(_statically_link_cuda_toolkit_libs target) if(TARGET ${target}) get_target_property(_aliased_target ${target} ALIASED_TARGET) if (_aliased_target) _statically_link_cuda_toolkit_libs(${_aliased_target}) return() endif() get_target_property(_link_libs ${target} INTERFACE_LINK_LIBRARIES) foreach(_lib IN ITEMS blas cublas cublasLt cudart cufft cufftw cupti curand cusolver cusolver_lapack cusolver_metis cusparse lapack nppc nppial nppicc nppicom nppidei nppif nppig nppim nppist nppisu nppitc npps nvgraph nvrtc nvrtc_builtins) set(_suf "_static") if(_lib STREQUAL "cufft") set(_suf "_static_nocallback") endif() string(REPLACE "CUDA::${_lib};" "CUDA::${_lib}${_suf};" _link_libs "${_link_libs}") string(REPLACE "CUDA::${_lib}>" "CUDA::${_lib}${_suf}>" _link_libs "${_link_libs}") string(REPLACE "CUDA::${_lib}\"" "CUDA::${_lib}${_suf}\"" _link_libs "${_link_libs}") endforeach() set_target_properties(${target} PROPERTIES INTERFACE_LINK_LIBRARIES "${_link_libs}") endif() endfunction()
0
rapidsai_public_repos/node/modules/core/cmake
rapidsai_public_repos/node/modules/core/cmake/Modules/ConfigureOpenGLFW.cmake
#============================================================================= # Copyright (c) 2020-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= include_guard(GLOBAL) function(find_and_configure_glfw) set(options "") set(oneValueArgs VARIANT VERSION GIT_REPO GIT_TAG USE_SHARED_LIBS USE_WAYLAND USE_EGLHEADLESS EXPORT_SET) set(multiValueArgs "") cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) include(${CMAKE_CURRENT_FUNCTION_LIST_DIR}/get_cpm.cmake) set(GLFW_LIBRARY "glfw_${PKG_VARIANT}") _set_package_dir_if_exists(${GLFW_LIBRARY} ${GLFW_LIBRARY}) rapids_cpm_find(${GLFW_LIBRARY} ${PKG_VERSION} GLOBAL_TARGETS glfw3_${PKG_VARIANT} glfw::${PKG_VARIANT} BUILD_EXPORT_SET ${PKG_EXPORT_SET} CPM_ARGS GIT_REPOSITORY ${PKG_GIT_REPO} GIT_TAG ${PKG_GIT_TAG} GIT_SHALLOW TRUE GIT_CONFIG "advice.detachedhead=false" OPTIONS "GLFW_INSTALL OFF" "GLFW_BUILD_DOCS OFF" "GLFW_BUILD_TESTS OFF" "GLFW_BUILD_EXAMPLES OFF" "BUILD_SHARED_LIBS ${PKG_USE_SHARED_LIBS}" "GLFW_USE_WAYLAND ${PKG_USE_WAYLAND}" "GLFW_USE_EGLHEADLESS ${PKG_USE_EGLHEADLESS}" ) if(${GLFW_LIBRARY}_ADDED) install(TARGETS glfw3_${PKG_VARIANT} EXPORT ${GLFW_LIBRARY}-exports) rapids_export( BUILD ${GLFW_LIBRARY} VERSION ${PKG_VERSION} EXPORT_SET ${GLFW_LIBRARY}-exports GLOBAL_TARGETS glfw3_${PKG_VARIANT} glfw::${PKG_VARIANT} ) rapids_export_package(BUILD glew ${PKG_EXPORT_SET}) include("${rapids-cmake-dir}/export/find_package_root.cmake") rapids_export_find_package_root(BUILD glew [=[${CMAKE_CURRENT_LIST_DIR}]=] ${PKG_EXPORT_SET}) endif() set(${GLFW_LIBRARY}_VERSION "${${GLFW_LIBRARY}_VERSION}" PARENT_SCOPE) endfunction()
0
rapidsai_public_repos/node/modules/core
rapidsai_public_repos/node/modules/core/src/index.ts
// Copyright (c) 2020-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import * as Path from 'path'; export const modules_path = Path.resolve(__dirname, '..', '..', '..'); export const project_root_dir_path = Path.resolve(modules_path, '..'); export const cpp_core_include_path = Path.resolve(modules_path, 'core', 'include'); export const cmake_modules_path = Path.resolve(modules_path, 'core', 'cmake', 'Modules'); export const cpm_source_cache_path = Path.resolve(project_root_dir_path, '.cache', 'source'); export const cpm_binary_cache_path = Path.resolve(project_root_dir_path, '.cache', 'binary'); export {getCudaDriverVersion, getComputeCapabilities} from './addon'; export * as addon from './addon'; import {getComputeCapabilities} from './addon'; export function getArchFromComputeCapabilities() { try { const cc = new Set(typeof process.env.RAPIDSAI_GPU_ARCH !== 'undefined' ? [process.env.RAPIDSAI_GPU_ARCH] : getComputeCapabilities()); if (cc.size === 1) { switch ([...cc][0]) { case '60': return '60'; case '70': return '70'; case '75': return '75'; case '80': return '80'; case '86': return '86'; default: break; } } } catch { /**/ } return ''; }
0
rapidsai_public_repos/node/modules/core
rapidsai_public_repos/node/modules/core/src/addon.cpp
// Copyright (c) 2020-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nv_node/addon.hpp> #include <nv_node/utilities/args.hpp> #include <nv_node/utilities/napi_to_cpp.hpp> #include <napi.h> #include <nvml.h> struct rapidsai_core : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_core> { rapidsai_core(Napi::Env const& env, Napi::Object exports) : EnvLocalAddon(env, exports) { _after_init = Napi::Persistent(Napi::Function::New(env, [](Napi::CallbackInfo const& info) { auto env = info.Env(); if (nvmlInit_v2() != NVML_SUCCESS) { throw Napi::Error::New(env, "Failed to initialize nvml."); } })); DefineAddon( exports, { InstanceValue("_cpp_exports", _cpp_exports.Value()), InstanceMethod("init", &rapidsai_core::InitAddon), InstanceMethod<&rapidsai_core::get_cuda_driver_version>("getCudaDriverVersion"), InstanceMethod<&rapidsai_core::get_compute_capabilities>("getComputeCapabilities"), }); } Napi::Value get_cuda_driver_version(Napi::CallbackInfo const& info) { auto env = info.Env(); int32_t cuda_version{}; auto ary = Napi::Array::New(env, 2); auto res = nvmlSystemGetCudaDriverVersion(&cuda_version); if (res != NVML_SUCCESS) { throw Napi::Error::New(env, nvmlErrorString(res)); } if (cuda_version > 0) { ary.Set(0u, Napi::String::New(env, std::to_string(NVML_CUDA_DRIVER_VERSION_MAJOR(cuda_version)))); ary.Set(1u, Napi::String::New(env, std::to_string(NVML_CUDA_DRIVER_VERSION_MINOR(cuda_version)))); } else { ary.Set(0u, Napi::String::New(env, "")); ary.Set(1u, Napi::String::New(env, "")); } return ary; } Napi::Value get_compute_capabilities(Napi::CallbackInfo const& info) { auto env = info.Env(); nvmlDevice_t device{}; uint32_t arch_index{}; uint32_t device_count{}; int32_t major{}, minor{}; auto res = nvmlDeviceGetCount_v2(&device_count); if (res != NVML_SUCCESS) { throw Napi::Error::New(env, nvmlErrorString(res)); } std::vector<std::string> archs(device_count); for (uint32_t device_index{0}; device_index < device_count; ++device_index) { res = nvmlDeviceGetHandleByIndex_v2(device_index, &device); if (res != NVML_SUCCESS) { throw Napi::Error::New(env, nvmlErrorString(res)); } res = nvmlDeviceGetCudaComputeCapability(device, &major, &minor); if (res != NVML_SUCCESS) { throw Napi::Error::New(env, nvmlErrorString(res)); } archs[arch_index++] = std::to_string(major) + std::to_string(minor); } auto ary = Napi::Array::New(env, arch_index); arch_index = 0; for (auto const& arch : archs) { ary.Set(arch_index++, arch); } return ary; return Napi::Array::New(env, 0); } }; NODE_API_ADDON(rapidsai_core);
0
rapidsai_public_repos/node/modules/core
rapidsai_public_repos/node/modules/core/src/addon.ts
// Copyright (c) 2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. let addon = { _cpp_exports: null as any, getCudaDriverVersion() { return new Array<string>(); }, getComputeCapabilities() { return new Array<string>(); }, }; try { addon = require('bindings')('rapidsai_core.node').init() as typeof addon; } catch { // /**/ } // eslint-disable-next-line @typescript-eslint/unbound-method export const { _cpp_exports, getCudaDriverVersion, getComputeCapabilities, } = addon;
0
rapidsai_public_repos/node/modules/core
rapidsai_public_repos/node/modules/core/test/rapids-core-tests.ts
test('nothing', () => {});
0
rapidsai_public_repos/node/modules/core
rapidsai_public_repos/node/modules/core/test/tsconfig.json
{ "extends": "../tsconfig.json", "include": [ "../src/**/*.ts", "../test/**/*.ts" ], "compilerOptions": { "target": "esnext", "module": "commonjs", "allowJs": true, "importHelpers": false, "noEmitHelpers": false, "noEmitOnError": false, "sourceMap": false, "inlineSources": false, "inlineSourceMap": false, "downlevelIteration": false, "baseUrl": "../", "paths": { "@rapidsai/core": ["src/index"], "@rapidsai/core/*": ["src/*"] } } }
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/package.json
{ "name": "@rapidsai/cuml", "version": "22.12.2", "description": "cuML - NVIDIA RAPIDS Machine Learning Library", "license": "Apache-2.0", "main": "index.js", "types": "build/js", "author": "NVIDIA, Inc. (https://nvidia.com/)", "maintainers": [ "Paul Taylor <[email protected]>" ], "homepage": "https://github.com/rapidsai/node/tree/main/modules/cuml#readme", "bugs": { "url": "https://github.com/rapidsai/node/issues" }, "repository": { "type": "git", "url": "git+https://github.com/rapidsai/node.git" }, "scripts": { "install": "npx rapidsai-install-native-module", "clean": "rimraf build doc compile_commands.json", "doc": "rimraf doc && typedoc --options typedoc.js", "test": "node -r dotenv/config node_modules/.bin/jest -c jest.config.js", "build": "yarn tsc:build && yarn cpp:build", "build:debug": "yarn tsc:build && yarn cpp:build:debug", "compile": "yarn tsc:build && yarn cpp:compile", "compile:debug": "yarn tsc:build && yarn cpp:compile:debug", "rebuild": "yarn tsc:build && yarn cpp:rebuild", "rebuild:debug": "yarn tsc:build && yarn cpp:rebuild:debug", "cpp:clean": "npx cmake-js clean -O build/Release", "cpp:clean:debug": "npx cmake-js clean -O build/Debug", "cpp:build": "npx cmake-js build -g -O build/Release", "cpp:build:debug": "npx cmake-js build -g -D -O build/Debug", "cpp:compile": "npx cmake-js compile -g -O build/Release", "postcpp:compile": "npx rapidsai-merge-compile-commands", "cpp:compile:debug": "npx cmake-js compile -g -D -O build/Debug", "postcpp:compile:debug": "npx rapidsai-merge-compile-commands", "cpp:configure": "npx cmake-js configure -g -O build/Release --", "postcpp:configure": "npx rapidsai-merge-compile-commands", "cpp:configure:debug": "npx cmake-js configure -g -D -O build/Debug --", "postcpp:configure:debug": "npx rapidsai-merge-compile-commands", "cpp:rebuild": "npx cmake-js rebuild -g -O build/Release", "postcpp:rebuild": "npx rapidsai-merge-compile-commands", "cpp:rebuild:debug": "npx cmake-js rebuild -g -D -O build/Debug", "postcpp:rebuild:debug": "npx rapidsai-merge-compile-commands", "cpp:reconfigure": "npx cmake-js reconfigure -g -O build/Release", "postcpp:reconfigure": "npx rapidsai-merge-compile-commands", "cpp:reconfigure:debug": "npx cmake-js reconfigure -g -D -O build/Debug", "postcpp:reconfigure:debug": "npx rapidsai-merge-compile-commands", "tsc:clean": "rimraf build/js", "tsc:build": "yarn tsc:clean && tsc -p ./tsconfig.json", "tsc:watch": "yarn tsc:clean && tsc -p ./tsconfig.json -w", "dev:cpack:enabled": "echo $npm_package_name" }, "dependencies": { "@rapidsai/cudf": "~22.12.2" }, "files": [ "LICENSE", "README.md", "index.js", "package.json", "CMakeLists.txt", "src/node_cuml", "build/js" ] }
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/index.js
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. module.exports = require('./build/js/index');
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/jest.config.js
// Copyright (c) 2020-2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. try { require('dotenv').config(); } catch (e) {} module.exports = { 'verbose': true, 'testEnvironment': 'node', 'maxWorkers': process.env.PARALLEL_LEVEL || 1, 'globals': {'ts-jest': {'diagnostics': false, 'tsconfig': 'test/tsconfig.json'}}, 'rootDir': './', 'roots': ['<rootDir>/test/'], 'moduleFileExtensions': ['js', 'ts', 'tsx'], 'coverageReporters': ['lcov'], 'coveragePathIgnorePatterns': ['test\\/.*\\.(ts|tsx|js)$', '/node_modules/'], 'transform': {'^.+\\.jsx?$': 'ts-jest', '^.+\\.tsx?$': 'ts-jest'}, 'transformIgnorePatterns': ['/build/(js|Debug|Release)/*$', '/node_modules/(?!web-stream-tools).+\\.js$'], 'testRegex': '(.*(-|\\.)(test|spec)s?)\\.(ts|tsx|js)$', 'preset': 'ts-jest', 'testMatch': null, 'moduleNameMapper': { '^@rapidsai\/cuml(.*)': '<rootDir>/src/$1', '^\.\.\/(Debug|Release)\/(rapidsai_cuml.node)$': '<rootDir>/build/$1/$2', } };
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/CMakeLists.txt
#============================================================================= # Copyright (c) 2021-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY) unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE) option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON) ################################################################################################### # - cmake modules --------------------------------------------------------------------------------- execute_process(COMMAND node -p "require('@rapidsai/core').cmake_modules_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake") project(rapidsai_cuml VERSION $ENV{npm_package_version} LANGUAGES C CXX) if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") return() endif() execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/core'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/cuda'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CUDA_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/rmm'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_RMM_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/cudf'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CUDF_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake") if(NOT DEFINED ENV{NODE_RAPIDS_USE_LOCAL_DEPS_BUILD_DIRS}) include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDF.cmake") endif() include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUML.cmake") ################################################################################################### # - rapidsai_cuml target --------------------------------------------------------------------------- file(GLOB_RECURSE NODE_CUML_CPP_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp") file(GLOB_RECURSE NODE_CUML_CUDA_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cu") list(APPEND NODE_CUML_SRC_FILES ${NODE_CUML_CPP_FILES}) list(APPEND NODE_CUML_SRC_FILES ${NODE_CUML_CUDA_FILES}) add_library(${PROJECT_NAME} SHARED ${NODE_CUML_SRC_FILES} ${CMAKE_JS_SRC}) set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "" SUFFIX ".node" BUILD_RPATH "\$ORIGIN" INSTALL_RPATH "\$ORIGIN" CXX_STANDARD 17 CXX_STANDARD_REQUIRED ON CUDA_STANDARD 17 CUDA_STANDARD_REQUIRED ON NO_SYSTEM_FROM_IMPORTED ON POSITION_INDEPENDENT_CODE ON INTERFACE_POSITION_INDEPENDENT_CODE ON ) target_compile_options(${PROJECT_NAME} PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>" ) target_compile_definitions(${PROJECT_NAME} PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>" "$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>" ) if(TARGET cudf::cudf) set(LIBCUDF_LIBRARY cudf::cudf) set(LIBCUDF_INCLUDE_DIRS "") else() set(LIBCUDF_LIBRARY "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-build/libcudf.a") list(APPEND LIBCUDF_INCLUDE_DIRS "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-build/include") list(APPEND LIBCUDF_INCLUDE_DIRS "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/_deps/cudf-src/cpp/include") endif() target_include_directories(${PROJECT_NAME} PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>" "$<BUILD_INTERFACE:${LIBCUDF_INCLUDE_DIRS}>" "$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>" ) target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_JS_LIB} raft::raft raft::nn raft::distance cuml::cuml++ "${LIBCUDF_LIBRARY}" "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node" "${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node" "${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node" "${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake") generate_arch_specific_custom_targets( NAME ${PROJECT_NAME} DEPENDENCIES "cudf::cudf" "cuml::cuml++" "faiss::faiss" "raft::raft_nn_lib" "raft::raft_distance_lib" ) generate_install_rules( NAME ${PROJECT_NAME} CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES}) # Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/README.md
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/>&nbsp; node-rapids cuML - GPU Machine Learning Algorithms</div> ### Installation `npm install @rapidsai/cuml` ### About The js bindings for [cuML](https://github.com/rapidsai/cuml) is a suite of libraries that implement machine learning algorithms and mathematical primitives that share compatible APIs with other RAPIDS projects. For detailed node-cuML API, follow our [API Documentation](https://rapidsai.github.io/node-rapids/modules/cuml_src.html).
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/tsconfig.json
{ "include": ["src"], "exclude": ["node_modules"], "compilerOptions": { "baseUrl": "./", "paths": { "@rapidsai/cuml": ["src/index"], "@rapidsai/cuml/*": ["src/*"] }, "target": "ESNEXT", "module": "commonjs", "outDir": "./build/js", /* Decorators */ "experimentalDecorators": false, /* Basic stuff */ "moduleResolution": "node", "skipLibCheck": true, "skipDefaultLibCheck": true, "lib": ["dom", "esnext", "esnext.asynciterable"], /* Control what is emitted */ "declaration": true, "declarationMap": true, "noEmitOnError": true, "removeComments": false, "downlevelIteration": true, /* Create inline sourcemaps with sources */ "sourceMap": false, "inlineSources": true, "inlineSourceMap": true, /* The most restrictive settings possible */ "strict": true, "importHelpers": true, "noEmitHelpers": true, "noImplicitAny": true, "noUnusedLocals": true, "noImplicitReturns": true, "allowUnusedLabels": false, "noUnusedParameters": true, "allowUnreachableCode": false, "noFallthroughCasesInSwitch": true, "forceConsistentCasingInFileNames": true } }
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- include/visit_struct/visit_struct.hpp (modified): BSL 1.0 Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/cuml/typedoc.js
module.exports = { entryPoints: ['src/index.ts'], out: 'doc', name: '@rapidsai/cuml', tsconfig: 'tsconfig.json', excludePrivate: true, excludeProtected: true, excludeExternals: true, };
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/.vscode/launch.json
{ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "compounds": [ { "name": "Debug Tests (TS and C++)", "configurations": [ "Debug Tests (launch gdb)", // "Debug Tests (launch lldb)", "Debug Tests (attach node)", ] } ], "configurations": [ { "name": "Debug Tests (TS only)", "type": "node", "request": "launch", "cwd": "${workspaceFolder}", "console": "integratedTerminal", "internalConsoleOptions": "neverOpen", "program": "${workspaceFolder}/node_modules/.bin/jest", "skipFiles": [ "<node_internals>/**", "${workspaceFolder}/node_modules/**" ], "env": { "NODE_NO_WARNINGS": "1", "NODE_ENV": "production", "READABLE_STREAM": "disable", }, "args": [ "--verbose", "--runInBand", "-c", "jest.config.js", "${input:TEST_FILE}" ] }, // { // "name": "Debug Tests (launch lldb)", // // hide the individual configurations from the debug dropdown list // "presentation": { "hidden": true }, // "type": "lldb", // "request": "launch", // "stdio": null, // "cwd": "${workspaceFolder}", // "preLaunchTask": "cpp:ensure:debug:build", // "env": { // "NODE_DEBUG": "1", // "NODE_NO_WARNINGS": "1", // "NODE_ENV": "production", // "READABLE_STREAM": "disable", // }, // "stopOnEntry": false, // "terminal": "console", // "program": "${input:NODE_BINARY}", // "initCommands": [ // "settings set target.disable-aslr false", // ], // "sourceLanguages": ["cpp", "cuda", "javascript"], // "args": [ // "--inspect=9229", // "--expose-internals", // "${workspaceFolder}/node_modules/.bin/jest", // "--verbose", // "--runInBand", // "-c", // "jest.config.js", // "${input:TEST_FILE}" // ], // }, { "name": "Debug Tests (launch gdb)", // hide the individual configurations from the debug dropdown list "presentation": { "hidden": true }, "type": "cppdbg", "request": "launch", "stopAtEntry": false, "externalConsole": false, "cwd": "${workspaceFolder}", "envFile": "${workspaceFolder}/.env", "MIMode": "gdb", "miDebuggerPath": "/usr/bin/gdb", "setupCommands": [ { "description": "Enable pretty-printing for gdb", "text": "-enable-pretty-printing", "ignoreFailures": true } ], "program": "${input:NODE_BINARY}", "environment": [ { "name": "NODE_DEBUG", "value": "1" }, { "name": "NODE_NO_WARNINGS", "value": "1" }, { "name": "NODE_ENV", "value": "production" }, { "name": "READABLE_STREAM", "value": "disable" }, ], "args": [ "--inspect=9229", "--expose-internals", "${workspaceFolder}/node_modules/.bin/jest", "--verbose", "--runInBand", "-c", "jest.config.js", "${input:TEST_FILE}" ], }, { "name": "Debug Tests (attach node)", "type": "node", "request": "attach", // hide the individual configurations from the debug dropdown list "presentation": { "hidden": true }, "port": 9229, "timeout": 60000, "cwd": "${workspaceFolder}", "skipFiles": [ "<node_internals>/**", "${workspaceFolder}/node_modules/**" ], }, ], "inputs": [ { "type": "command", "id": "NODE_BINARY", "command": "shellCommand.execute", "args": { "description": "path to node", "command": "which node", "useFirstResult": true, } }, { "type": "command", "id": "TEST_FILE", "command": "shellCommand.execute", "args": { "cwd": "${workspaceFolder}/modules/cuml", "description": "Select a file to debug", "command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"", } }, ], }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/.vscode/tasks.json
{ "version": "2.0.0", "tasks": [ { "type": "shell", "label": "Rebuild node_cuml TS and C++ (slow)", "group": { "kind": "build", "isDefault": true, }, "command": "if [[ \"${input:CMAKE_BUILD_TYPE}\" == \"Release\" ]]; then yarn rebuild; else yarn rebuild:debug; fi", "problemMatcher": [ "$tsc", { "owner": "cuda", "fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"], "pattern": { "file": 1, "line": 2, "severity": 3, "message": 4, "regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$" } }, { "owner": "cpp", "fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"], "pattern": { "file": 1, "line": 2, "severity": 4, "message": 5, "regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$" } }, ], }, { "type": "npm", "group": "build", "label": "Recompile node_cuml TS (fast)", "script": "tsc:build", "detail": "yarn tsc:build", "problemMatcher": ["$tsc"], }, { "type": "shell", "group": "build", "label": "Recompile node_cuml C++ (fast)", "command": "ninja -C ${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}", "problemMatcher": [ { "owner": "cuda", "fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"], "pattern": { "file": 1, "line": 2, "severity": 3, "message": 4, "regexp": "^(.*)\\((\\d+)\\):\\s+(error|warning|note|info):\\s+(.*)$" } }, { "owner": "cpp", "fileLocation": ["relative", "${workspaceFolder}/build/${input:CMAKE_BUILD_TYPE}"], "pattern": { "file": 1, "line": 2, "severity": 4, "message": 5, "regexp": "^(.*):(\\d+):(\\d+):\\s+(error|warning|note|info):\\s+(.*)$" } }, ], }, ], "inputs": [ { "type": "pickString", "default": "Release", "id": "CMAKE_BUILD_TYPE", "options": ["Release", "Debug"], "description": "C++ Build Type", } ] }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/umap_base.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {DataType, Numeric} from '@rapidsai/cudf'; import {DeviceBuffer} from '@rapidsai/rmm'; import * as CUML from './addon'; import {COOInterface} from './coo'; import {CUMLLogLevels, MetricType} from './mappings'; /** * UMAPParams parameters from https://docs.rapids.ai/api/cuml/stable/api.html#cuml.UMAP */ export type UMAPParams = { nNeighbors?: number, nComponents?: number, nEpochs?: number, learningRate?: number, minDist?: number, spread?: number, setOpMixRatio?: number, localConnectivity?: number, repulsionStrength?: number, negativeSampleRate?: number, transformQueueSize?: number, verbosity?: keyof typeof CUMLLogLevels, a?: number, b?: number, initialAlpha?: number, init?: number, targetNNeighbors?: number, targetMetric?: keyof typeof MetricType, targetWeight?: number, randomState?: number, }; export type FitProps<T extends Numeric = any, R extends Numeric = any> = { features: DeviceBuffer|(T['scalarType']|null|undefined)[], featuresType: T, nSamples: number, nFeatures: number, convertDType: boolean, embeddings: DeviceBuffer, graph: COOInterface, target?: DeviceBuffer|(R['scalarType']|null|undefined)[], targetType?: R, knnIndices?: DeviceBuffer, knnDists?: DeviceBuffer, }; export type RefineProps<T extends Numeric = any> = { features: DeviceBuffer|(T['scalarType']|null|undefined)[], featuresType: T, nSamples: number, nFeatures: number, convertDType: boolean, embeddings: DeviceBuffer, coo: COOInterface }; export type GetGraphProps<T extends Numeric = any, R extends Numeric = any> = { features: DeviceBuffer|(T['scalarType']|null|undefined)[], featuresType: T, nSamples: number, nFeatures: number, knnIndices?: DeviceBuffer, knnDists?: DeviceBuffer, convertDType: boolean, target?: DeviceBuffer|(R['scalarType']|null|undefined)[], targetType?: R }; export type transformProps<T extends Numeric = any> = { features: DeviceBuffer|(T['scalarType']|null|undefined)[], featuresType: DataType, nSamples: number, nFeatures: number, convertDType: boolean, embeddings: DeviceBuffer, transformed: DeviceBuffer, knnIndices?: DeviceBuffer, knnDists?: DeviceBuffer, }; export interface UMAPConstructor { new(options?: UMAPParams): UMAPInterface; } export interface UMAPInterface { readonly nNeighbors: number; readonly nComponents: number; readonly nEpochs: number; readonly learningRate: number; readonly minDist: number; readonly spread: number; readonly setOpMixRatio: number; readonly localConnectivity: number; readonly repulsionStrength: number; readonly negativeSampleRate: number; readonly transformQueueSize: number; readonly verbosity: number; readonly a: number; readonly b: number; readonly initialAlpha: number; readonly init: number; readonly targetNNeighbors: number; readonly targetMetric: number; readonly targetWeight: number; readonly randomState: number; fit<T extends Numeric, R extends Numeric>(options?: FitProps<T, R>): DeviceBuffer; transform<T extends Numeric>(options?: transformProps<T>): DeviceBuffer; refine<T extends Numeric>(options?: RefineProps<T>): DeviceBuffer; graph<T extends Numeric, R extends Numeric>(options?: GetGraphProps<T, R>): COOInterface; } export const UMAPBase: UMAPConstructor = CUML.UMAP;
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/index.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. export * as addon from './addon'; export * from './umap'; export * from './metrics'; export * from './mappings';
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/umap.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import { Bool8, DataFrame, Float32, Int16, Int32, Int64, Int8, Integral, Series, Uint16, Uint32, Uint64, Uint8 } from '@rapidsai/cudf'; import {DeviceBuffer} from '@rapidsai/rmm'; import {compareTypes} from 'apache-arrow/visitor/typecomparator'; import {COO, COOInterface} from './coo'; import {CUMLLogLevels, MetricType} from './mappings'; import {UMAPBase, UMAPInterface, UMAPParams} from './umap_base'; import {dataframeToSeries, seriesToDataFrame} from './utilities/array_utils'; export type Numeric = Integral|Float32; const allowedTypes = [ new Int8, new Int16, new Int32, new Uint8, new Uint16, new Uint32, new Float32, new Bool8, new Int64, new Uint64 ]; export type outputType = 'dataframe'|'series'|'devicebuffer'; class Embeddings<T extends Numeric> { protected _embeddings: DeviceBuffer; protected _dType: T; protected nFeatures: number; constructor(_embeddings: DeviceBuffer, nFeatures: number, dType: T) { this._embeddings = _embeddings; this.nFeatures = nFeatures; this._dType = dType; } public asSeries() { return Series.new({type: this._dType, data: this._embeddings}); } public asDataFrame() { return seriesToDataFrame(this.asSeries(), this.nFeatures); } public asDeviceBuffer() { return this._embeddings; } } export class UMAP { protected _umap: UMAPInterface; protected _embeddings = new DeviceBuffer(); /** * Initialize a UMAP object * @param input: UMAPParams * @param outputType: 'dataframe'|'series'|'devicebuffer' * * @example: * ```typescript * import {UMAP} from 'rapidsai/cuml'; * * const umap = new UMAP({nComponents:2}, 'dataframe'); * ``` */ constructor(input: UMAPParams = {}) { this._umap = new UMAPBase(input); } protected _generate_embeddings<D extends Numeric>(nSamples: number, dtype: D) { return Series .sequence({ type: dtype, size: nSamples * this._umap.nComponents, init: 0, step: 0, }) ._col.data; } // throw runtime error if type isn't Integral | Float32 protected _check_type<D extends Numeric>(features: D) { if (!allowedTypes.some((type) => compareTypes(features, type))) { throw new Error( `Expected input to be of type in [Integral, Float32] but got ${features.toString()}`); } } protected _resolveType(convertDType: boolean, value: number|bigint|null|undefined) { return convertDType ? new Float32 : (typeof value === 'bigint') ? new Int64 : new Float32; } /** * Fit features into an embedded space * * @param features cuDF Series containing floats or doubles in the format [x1, y1, z1, x2, y2, * z2...] for features x, y & z. * * @param target cuDF Series containing target values * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * * @param nFeatures number of properties in the input features, if features is of the format * [x1,y1,x2,y2...] * @returns FittedUMAP object with updated embeddings */ fitSeries<T extends Series<Numeric>, R extends Series<Numeric>>(features: T, target: null|R, nFeatures = 1, convertDType = true) { // runtime type check this._check_type(features.type); const nSamples = Math.floor(features.length / nFeatures); let options = { features: features._col.data, featuresType: features.type, nSamples: nSamples, nFeatures: nFeatures, convertDType: convertDType, embeddings: this._generate_embeddings(nSamples, features.type) }; if (target !== null) { options = {...options, ...{ target: target._col.data, targetType: target.type }}; } const graph = this._umap.graph(options); return new FittedUMAP(this.getUMAPParams(), this._umap.fit({...options, graph}), graph); } /** * Fit features into an embedded space * @param features Dense or sparse matrix containing floats or doubles. Acceptable dense formats: * cuDF DataFrame * * @param target cuDF Series containing target values * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @returns FittedUMAP object with updated embeddings */ fitDataFrame<T extends Numeric, K extends string, R extends Series<Numeric>>( features: DataFrame<{[P in K]: T}>, target: null|R, convertDType = true) { // runtime type check this._check_type(features.get(features.names[0]).type); return this.fitSeries( dataframeToSeries(features) as Series<T>, target, features.numColumns, convertDType); } /** * Fit features into an embedded space * @param features array containing floats or doubles in the format [x1, y1, z1, x2, y2, z2...] * for features x, y & z. * * ```typescript * // For a sample dataset of colors, with properties r,g and b: * features = [ * ...Object.values({ r: xx1, g: xx2, b: xx3 }), * ...Object.values({ r: xx4, g: xx5, b: xx6 }), * ] // [xx1, xx2, xx3, xx4, xx5, xx6] * ``` * * @param target array containing target values * * ```typescript * // For a sample dataset of colors, with properties r,g and b: * target = [color1, color2] // len(target) = nFeatures * ``` * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @param nFeatures number of properties in the input features, if features is of the format * [x1, y1, x2, y2...] * * @returns FittedUMAP object with updated embeddings */ fitArray<T extends Series<Numeric>>(features: (number|bigint|null|undefined)[], target: (number|bigint|null|undefined)[]|null, nFeatures = 1, convertDType = true) { return this.fitSeries( Series.new({type: this._resolveType(convertDType, features[0]), data: features}) as T, target == null ? null : Series.new({ type: this._resolveType(convertDType, target[0]), data: target, }), nFeatures, convertDType); } setUMAPParams(input: UMAPParams) { this._umap = new UMAPBase(input); } getUMAPParams() { return { nNeighbors: this.nNeighbors, nComponents: this.nComponents, nEpochs: this.nEpochs, learningRate: this.learningRate, minDist: this.minDist, spread: this.spread, setOpMixRatio: this.setOpMixRatio, localConnectivity: this.localConnectivity, repulsionStrength: this.repulsionStrength, negativeSampleRate: this.negativeSampleRate, transformQueueSize: this.transformQueueSize, verbosity: this.verbosity, a: this.a, b: this.b, initialAlpha: this.initialAlpha, init: this.init, targetNNeighbors: this.targetNNeighbors, targetMetric: this.targetMetric, targetWeight: this.targetWeight, randomState: this.randomState, } as UMAPParams; } /** * @param dtype Numeric cudf DataType * @returns Embeddings in low-dimensional space in dtype format, which can be converted to any * of the following types: DataFrame, Series, DeviceBuffer. * ```typescript * getEmbeddings(new Float64).asDataFrame(); // returns DataFrame<{number: Series<Numeric>}> * getEmbeddings(new Int32).asSeries(); // returns Series<Numeric> * getEmbeddings(new UInt32).asDeviceBuffer(); //returns rmm.DeviceBuffer * ``` */ getEmbeddings<T extends Numeric>(dtype: T) { return new Embeddings(this._embeddings, this.nComponents, dtype); } /** * @returns Embeddings in low-dimensional space in float32 format, which can be converted to any * of the following types: DataFrame, Series, DeviceBuffer. * ```typescript * embeddings.asDataFrame(); // returns DataFrame<{number: Series<Numeric>}> * embeddings.asSeries(); // returns Series<Numeric> * embeddings.asDeviceBuffer(); //returns rmm.DeviceBuffer * ``` */ get embeddings() { return new Embeddings(this._embeddings, this.nComponents, new Float32); } get nNeighbors(): number { return this._umap.nNeighbors; } get nComponents(): number { return this._umap.nComponents; } get nEpochs(): number { return this._umap.nEpochs; } get learningRate(): number { return this._umap.learningRate; } get minDist(): number { return this._umap.minDist; } get spread(): number { return this._umap.spread; } get setOpMixRatio(): number { return this._umap.setOpMixRatio; } get localConnectivity(): number { return this._umap.localConnectivity; } get repulsionStrength(): number { return this._umap.repulsionStrength; } get negativeSampleRate(): number { return this._umap.negativeSampleRate; } get transformQueueSize(): number { return this._umap.transformQueueSize; } get a(): number { return this._umap.a; } get b(): number { return this._umap.b; } get initialAlpha(): number { return this._umap.initialAlpha; } get init(): number { return this._umap.init; } get targetNNeighbors(): number { return this._umap.targetNNeighbors; } get targetWeight(): number { return this._umap.targetWeight; } get randomState(): number { return this._umap.randomState; } get targetMetric(): string { return MetricType[this._umap.targetMetric]; } get verbosity(): string { return CUMLLogLevels[this._umap.verbosity]; } } export class FittedUMAP extends UMAP { /** * Initialize a UMAP object * @param input: UMAPParams * @param outputType: 'dataframe'|'series'|'devicebuffer' * * @example: * ```typescript * import {UMAP} from 'rapidsai/cuml'; * * const umap = new UMAP({nComponents:2}, 'dataframe'); * ``` */ _coo: COOInterface; constructor(input: UMAPParams, embeddings: DeviceBuffer, coo: COOInterface = new COO()) { super(input); this._embeddings = embeddings; this._coo = coo; } /** * Transform features into the existing embedded space and return that transformed output. * * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @param nFeatures number of properties in the input features, if features is of the format * [x1,y1,x2,y2...] * @returns Transformed `features` into the existing embedded space and return an `Embeddings` * instancewhich can be converted to any of the following types: DataFrame, Series, DeviceBuffer. * ```typescript * transformSeries(...).asDataFrame(); // returns DataFrame<{number: Series<Numeric>}> * transformSeries(...).asSeries(); // returns Series<Numeric> * transformSeries(...).asDeviceBuffer(); //returns rmm.DeviceBuffer * ``` */ transformSeries<T extends Series<Numeric>>(features: T, nFeatures = 1, convertDType = true) { // runtime type check this._check_type(features.type); const nSamples = Math.floor(features.length / nFeatures); const result = this._umap.transform({ features: features._col.data, featuresType: features.type, nSamples: nSamples, nFeatures: nFeatures, convertDType: convertDType, embeddings: this._embeddings, transformed: this._generate_embeddings(nSamples, features.type) }); const dtype = (convertDType) ? new Float32 : features.type; return new Embeddings(result, this.nComponents, dtype); } /** * Transform features into the existing embedded space and return that transformed output. * * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @param nFeatures number of properties in the input features, if features is of the format * [x1,y1,x2,y2...] * @returns Transformed `features` into the existing embedded space and return an `Embeddings` * instance which can be converted to any of the following types: DataFrame, Series, DeviceBuffer * ```typescript * transformDataFrame(...).asDataFrame(); // returns DataFrame<{number: Series<Numeric>}> * transformDataFrame(...).asSeries(); // returns Series<Numeric> * transformDataFrame(...).asDeviceBuffer(); //returns rmm.DeviceBuffer * ``` */ transformDataFrame<T extends Numeric, K extends string>(features: DataFrame<{[P in K]: T}>, convertDType = true) { return this.transformSeries( dataframeToSeries(features) as Series<T>, features.numColumns, convertDType); } /** * Transform features into the existing embedded space and return that transformed output. * * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @param nFeatures number of properties in the input features, if features is of the format * [x1,y1,x2,y2...] * @returns Transformed `features` into the existing embedded space and return an `Embeddings` * instance which can be converted to any of the following types: DataFrame, Series, DeviceBuffer. * ```typescript * transformArray(...).asDataFrame(); // returns DataFrame<{number: Series<Numeric>}> * transformArray(...).asSeries(); // returns Series<Numeric> * transformArray(...).asDeviceBuffer(); //returns rmm.DeviceBuffer * ``` */ transformArray<T extends Series<Numeric>>(features: (number|bigint|null|undefined)[], nFeatures = 1, convertDType = true) { return this.transformSeries( Series.new({type: this._resolveType(convertDType, features[0]), data: features}) as T, nFeatures, convertDType); } /** * Refine features into existing embedded space as base * * @param features cuDF Series containing floats or doubles in the format [x1, y1, z1, x2, y2, * z2...] for features x, y & z. * * @param target cuDF Series containing target values * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * * @param nFeatures number of properties in the input features, if features is of the format * [x1,y1,x2,y2...] */ refineSeries<T extends Series<Numeric>, R extends Series<Numeric>>(features: T, target: null|R, nFeatures = 1, convertDType = true) { // runtime type check this._check_type(features.type); const nSamples = Math.floor(features.length / nFeatures); if (this._coo.getSize() === 0) { const target_ = (target != null) ? {target: target._col.data, targetType: target.type} : {}; this._coo = this._umap.graph({ features: features._col.data, featuresType: features.type, nSamples: nSamples, nFeatures: nFeatures, convertDType: convertDType, ...target_ }); } const options = { features: features._col.data, featuresType: features.type, nSamples: nSamples, nFeatures: nFeatures, convertDType: convertDType, embeddings: this._embeddings || this._generate_embeddings(nSamples, features.type), coo: this._coo }; this._embeddings = this._umap.refine(options); } /** * Refine features into existing embedded space as base * @param features Dense or sparse matrix containing floats or doubles. Acceptable dense formats: * cuDF DataFrame * * @param target cuDF Series containing target values * @param convertDType When set to True, the method will automatically convert the inputs to * float32 */ refineDataFrame<T extends Numeric, K extends string, R extends Series<Numeric>>( features: DataFrame<{[P in K]: T}>, target: null|R, convertDType = true) { // runtime type check this._check_type(features.get(features.names[0]).type); this.refineSeries( dataframeToSeries(features) as Series<T>, target, features.numColumns, convertDType); } /** * Refine features into existing embedded space as base * @param features array containing floats or doubles in the format [x1, y1, z1, x2, y2, z2...] * for features x, y & z. * * ```typescript * // For a sample dataset of colors, with properties r,g and b: * features = [ * ...Object.values({ r: xx1, g: xx2, b: xx3 }), * ...Object.values({ r: xx4, g: xx5, b: xx6 }), * ] // [xx1, xx2, xx3, xx4, xx5, xx6] * ``` * * @param target array containing target values * * ```typescript * // For a sample dataset of colors, with properties r,g and b: * target = [color1, color2] // len(target) = nFeatures * ``` * @param convertDType When set to True, the method will automatically convert the inputs to * float32 * @param nFeatures number of properties in the input features, if features is of the format * [x1, y1, x2, y2...] * */ refineArray<T extends Series<Numeric>>(features: (number|bigint|null|undefined)[], target: (number|bigint|null|undefined)[]|null, nFeatures = 1, convertDType = true) { this.refineSeries( Series.new({type: this._resolveType(convertDType, features[0]), data: features}) as T, target == null ? null : Series.new({ type: this._resolveType(convertDType, target[0]), data: target, }), nFeatures, convertDType); } }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml.ts
// Copyright (c) 2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {DataType} from '@rapidsai/cudf'; import {DeviceBuffer} from '@rapidsai/rmm'; import {COOConstructor} from './coo'; import {UMAPConstructor} from './umap_base'; /** @ignore */ export declare const _cpp_exports: any; export declare const COO: COOConstructor; export declare const UMAP: UMAPConstructor; export declare function trustworthiness(features: any[]|DeviceBuffer, featuresType: DataType, embedding: any[]|DeviceBuffer, embeddedType: DataType, nSamples: number, nFeatures: number, nComponents: number, nNeighbors: number, batch_size: number): number;
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/metrics.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // todo: including the below headers with undef guards is the only way cuml builds with raft // locally // #include <node_cuml/raft/linalg/distance_type.h> #include <node_cuml/cuml/metrics/metrics.hpp> #include <node_cuml/metrics.hpp> #include <node_cuml/raft/handle.hpp> #include <node_cudf/utilities/buffer.hpp> #include <node_cudf/utilities/napi_to_cpp.hpp> #include <node_rmm/device_buffer.hpp> #include <node_cuda/utilities/error.hpp> #include <nv_node/utilities/args.hpp> #include <napi.h> namespace nv { namespace Metrics { Napi::Value trustworthiness(Napi::CallbackInfo const& info) { CallbackArgs args{info}; DeviceBuffer::wrapper_t X = data_to_devicebuffer(args.Env(), args[0], args[1]); DeviceBuffer::wrapper_t embedded = data_to_devicebuffer(args.Env(), args[2], args[3]); raft::handle_t handle; try { double result = ML::Metrics::trustworthiness_score<float, raft::distance::L2SqrtUnexpanded>( handle, static_cast<float*>(X->data()), static_cast<float*>(embedded->data()), args[4], args[5], args[6], args[7], args[8]); return Napi::Value::From(info.Env(), result); } catch (std::exception const& e) { NAPI_THROW(Napi::Error::New(info.Env(), e.what())); } } } // namespace Metrics } // namespace nv
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/mappings.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. export enum MetricType { CATEGORICAL, EUCLIDEAN } export enum CUMLLogLevels { CUML_LEVEL_OFF, CUML_LEVEL_CRITICAL, CUML_LEVEL_ERROR, CUML_LEVEL_WARN, CUML_LEVEL_INFO, CUML_LEVEL_DEBUG, CUML_LEVEL_TRACE, }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/coo.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <node_cuda/utilities/error.hpp> #include <node_cudf/utilities/buffer.hpp> #include <node_cudf/utilities/napi_to_cpp.hpp> #include <node_rmm/device_buffer.hpp> #include <node_cuml/coo.hpp> #include <napi.h> namespace nv { Napi::Function COO::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "COO", {InstanceMethod<&COO::get_size>("getSize")}); } COO::wrapper_t COO::New(Napi::Env const& env, std::unique_ptr<raft::sparse::COO<float, int32_t>> coo) { auto buf = EnvLocalObjectWrap<COO>::New(env); buf->coo_ = std::move(coo); return buf; } COO::COO(CallbackArgs const& args) : EnvLocalObjectWrap<COO>(args) { this->coo_ = std::make_unique<raft::sparse::COO<float, int32_t>>(cudaStream_t{0}); } Napi::Value COO::get_size(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), get_size()); } } // namespace nv
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/coo.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import * as CUML from './addon'; export interface COOConstructor { new(): COOInterface; } export interface COOInterface { getSize(): number; } export const COO: COOConstructor = CUML.COO;
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/umap.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <node_rmm/device_buffer.hpp> #include <node_cuda/utilities/error.hpp> #include <node_cudf/utilities/buffer.hpp> #include <node_cudf/utilities/napi_to_cpp.hpp> #include <node_cuml/umap.hpp> #include <napi.h> namespace nv { int get_int(NapiToCPP const& opt, int const default_val) { return opt.IsNumber() ? opt.operator int() : default_val; } bool get_bool(NapiToCPP const& opt, bool const default_val) { return opt.IsBoolean() ? opt.operator bool() : default_val; } float get_float(NapiToCPP const& opt, float const default_val) { return opt.IsNumber() ? opt.operator float() : default_val; } Napi::Function UMAP::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "UMAP", {InstanceAccessor<&UMAP::n_neighbors>("nNeighbors"), InstanceAccessor<&UMAP::n_components>("nComponents"), InstanceAccessor<&UMAP::n_epochs>("nEpochs"), InstanceAccessor<&UMAP::learning_rate>("learningRate"), InstanceAccessor<&UMAP::min_dist>("minDist"), InstanceAccessor<&UMAP::spread>("spread"), InstanceAccessor<&UMAP::set_op_mix_ratio>("setOpMixRatio"), InstanceAccessor<&UMAP::local_connectivity>("localConnectivity"), InstanceAccessor<&UMAP::repulsion_strength>("repulsionStrength"), InstanceAccessor<&UMAP::negative_sample_rate>("negativeSampleRate"), InstanceAccessor<&UMAP::transform_queue_size>("transformQueueSize"), InstanceAccessor<&UMAP::a>("a"), InstanceAccessor<&UMAP::b>("b"), InstanceAccessor<&UMAP::initial_alpha>("initialAlpha"), InstanceAccessor<&UMAP::init>("init"), InstanceAccessor<&UMAP::target_n_neighbors>("targetNNeighbors"), InstanceAccessor<&UMAP::target_weight>("targetWeight"), InstanceAccessor<&UMAP::random_state>("randomState"), InstanceAccessor<&UMAP::verbosity>("verbosity"), InstanceAccessor<&UMAP::target_metric>("targetMetric"), InstanceMethod<&UMAP::fit>("fit"), InstanceMethod<&UMAP::transform>("transform"), InstanceMethod<&UMAP::refine>("refine"), InstanceMethod<&UMAP::get_graph>("graph")}); } ML::UMAPParams update_params(NapiToCPP::Object props) { ML::UMAPParams params{}; params.n_neighbors = get_int(props.Get("nNeighbors"), 15); params.n_components = get_int(props.Get("nComponents"), 2); params.n_epochs = get_int(props.Get("nEpochs"), 0); params.learning_rate = get_float(props.Get("learningRate"), 1.0); params.min_dist = get_float(props.Get("minDist"), 0.1); params.spread = get_float(props.Get("spread"), 1.0); params.set_op_mix_ratio = get_float(props.Get("setOpMixRatio"), 1.0); params.local_connectivity = get_float(props.Get("localConnectivity"), 1.0); params.repulsion_strength = get_float(props.Get("repulsionStrength"), 1.0); params.negative_sample_rate = get_int(props.Get("negativeSampleRate"), 5); params.transform_queue_size = get_float(props.Get("transformQueueSize"), 4); params.verbosity = get_int(props.Get("verbosity"), 4); params.a = get_float(props.Get("a"), -1.0); params.b = get_float(props.Get("b"), -1.0); params.initial_alpha = get_float(props.Get("initialAlpha"), 1.0); params.init = get_int(props.Get("init"), 1); params.target_n_neighbors = get_int(props.Get("targetNNeighbors"), 1); params.target_metric = (get_int(props.Get("targetMetric"), 0) == 0) ? ML::UMAPParams::MetricType::CATEGORICAL : ML::UMAPParams::MetricType::EUCLIDEAN; params.target_weight = get_float(props.Get("targetWeight"), 0.5); params.random_state = get_int(props.Get("randomState"), 0); return params; } UMAP::wrapper_t UMAP::New(Napi::Env const& env) { return EnvLocalObjectWrap<UMAP>::New(env); } UMAP::UMAP(CallbackArgs const& args) : EnvLocalObjectWrap<UMAP>(args) { raft::handle_t handle; this->params_ = update_params(args[0]); ML::UMAP::find_ab(handle, &this->params_); } void UMAP::fit(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& y, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings, raft::sparse::COO<float>* graph) { raft::handle_t handle; try { ML::UMAP::fit(handle, static_cast<float*>(X->data()), (y->size() != 0) ? static_cast<float*>(y->data()) : nullptr, n_samples, n_features, static_cast<int64_t*>(knn_indices->data()), static_cast<float*>(knn_dists->data()), &this->params_, static_cast<float*>(embeddings->data()), graph); } catch (std::exception const& e) { NAPI_THROW(Napi::Error::New(Env(), e.what())); } } COO::wrapper_t UMAP::get_graph(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, DeviceBuffer::wrapper_t const& y, bool convert_dtype) { raft::handle_t handle; try { auto coo = ML::UMAP::get_graph(handle, static_cast<float*>(X->data()), (y->size() != 0) ? static_cast<float*>(y->data()) : nullptr, n_samples, n_features, static_cast<int64_t*>(knn_indices->data()), static_cast<float*>(knn_dists->data()), &this->params_); return COO::New(this->Env(), std::move(coo)); } catch (std::exception const& e) { NAPI_THROW(Napi::Error::New(Env(), e.what())); } } void UMAP::refine(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, COO::wrapper_t const& coo, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings) { try { raft::handle_t handle; ML::UMAP::refine(handle, static_cast<float*>(X->data()), n_samples, n_features, coo->get_coo(), &this->params_, static_cast<float*>(embeddings->data())); } catch (std::exception const& e) { NAPI_THROW(Napi::Error::New(Env(), e.what())); } } void UMAP::transform(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, DeviceBuffer::wrapper_t const& orig_X, int orig_n, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings, DeviceBuffer::wrapper_t const& transformed) { raft::handle_t handle; try { ML::UMAP::transform(handle, static_cast<float*>(X->data()), n_samples, n_features, static_cast<int64_t*>(knn_indices->data()), static_cast<float*>(knn_dists->data()), static_cast<float*>(orig_X->data()), orig_n, static_cast<float*>(embeddings->data()), n_samples, &this->params_, static_cast<float*>(transformed->data())); } catch (std::exception const& e) { NAPI_THROW(Napi::Error::New(Env(), e.what())); } } Napi::Value UMAP::fit(Napi::CallbackInfo const& info) { CallbackArgs args{info}; NODE_CUDF_EXPECT( args[0].IsObject(), "fit constructor expects an Object of properties", args.Env()); NapiToCPP::Object props = args[0]; DeviceBuffer::wrapper_t X = data_to_devicebuffer(args.Env(), props.Get("features"), props.Get("featuresType")); DeviceBuffer::wrapper_t y = props.Has("y") ? data_to_devicebuffer(args.Env(), props.Get("target"), props.Get("targetType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t knn_indices = props.Has("knnIndices") ? data_to_devicebuffer(args.Env(), props.Get("knnIndices"), props.Get("knnIndicesType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t knn_dists = props.Has("knnDists") ? data_to_devicebuffer(args.Env(), props.Get("knnDists"), props.Get("knnDistsType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t embeddings = props.Get("embeddings"); COO::wrapper_t graph = props.Get("graph"); fit(X, props.Get("nSamples"), props.Get("nFeatures"), y, knn_indices, knn_dists, props.Get("convertDType"), embeddings, graph->get_coo()); return embeddings; } Napi::Value UMAP::get_graph(Napi::CallbackInfo const& info) { CallbackArgs args{info}; NODE_CUDF_EXPECT( args[0].IsObject(), "refine constructor expects an Object of properties", args.Env()); NapiToCPP::Object props = args[0]; DeviceBuffer::wrapper_t X = data_to_devicebuffer(args.Env(), props.Get("features"), props.Get("featuresType")); DeviceBuffer::wrapper_t y = props.Has("y") ? data_to_devicebuffer(args.Env(), props.Get("target"), props.Get("targetType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t knn_indices = props.Has("knnIndices") ? data_to_devicebuffer(args.Env(), props.Get("knnIndices"), props.Get("knnIndicesType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t knn_dists = props.Has("knnDists") ? data_to_devicebuffer(args.Env(), props.Get("knnDists"), props.Get("knnDistsType")) : DeviceBuffer::New(args.Env()); return get_graph(X, props.Get("nSamples"), props.Get("nFeatures"), knn_indices, knn_dists, y, props.Get("convertDType")); } Napi::Value UMAP::refine(Napi::CallbackInfo const& info) { CallbackArgs args{info}; NODE_CUDF_EXPECT( args[0].IsObject(), "get_graph constructor expects an Object of properties", args.Env()); NapiToCPP::Object props = args[0]; DeviceBuffer::wrapper_t X = data_to_devicebuffer(args.Env(), props.Get("features"), props.Get("featuresType")); DeviceBuffer::wrapper_t embeddings = props.Get("embeddings"); refine(X, props.Get("nSamples"), props.Get("nFeatures"), props.Get("coo"), props.Get("convertDType"), embeddings); return embeddings; } Napi::Value UMAP::transform(Napi::CallbackInfo const& args) { NODE_CUDF_EXPECT( args[0].IsObject(), "transform constructor expects an Object of properties", args.Env()); NapiToCPP::Object props = args[0]; DeviceBuffer::wrapper_t X = data_to_devicebuffer(args.Env(), props.Get("features"), props.Get("featuresType")); DeviceBuffer::wrapper_t knn_indices = props.Has("knnIndices") ? data_to_devicebuffer(args.Env(), props.Get("knnIndices"), props.Get("knnIndicesType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t knn_dists = props.Has("knnDists") ? data_to_devicebuffer(args.Env(), props.Get("knnDists"), props.Get("knnDistsType")) : DeviceBuffer::New(args.Env()); DeviceBuffer::wrapper_t embeddings = props.Get("embeddings"); DeviceBuffer::wrapper_t transformed = props.Get("transformed"); transform(X, props.Get("nSamples"), props.Get("nFeatures"), knn_indices, knn_dists, X, props.Get("nSamples"), props.Get("convertDType"), embeddings, transformed); return transformed; } Napi::Value UMAP::n_neighbors(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.n_neighbors); } Napi::Value UMAP::n_components(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.n_components); } Napi::Value UMAP::n_epochs(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.n_epochs); } Napi::Value UMAP::learning_rate(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.learning_rate); } Napi::Value UMAP::min_dist(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.min_dist); } Napi::Value UMAP::spread(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.spread); } Napi::Value UMAP::set_op_mix_ratio(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.set_op_mix_ratio); } Napi::Value UMAP::local_connectivity(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.local_connectivity); } Napi::Value UMAP::repulsion_strength(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.repulsion_strength); } Napi::Value UMAP::negative_sample_rate(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.negative_sample_rate); } Napi::Value UMAP::transform_queue_size(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.transform_queue_size); } Napi::Value UMAP::a(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.a); } Napi::Value UMAP::b(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.b); } Napi::Value UMAP::initial_alpha(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.initial_alpha); } Napi::Value UMAP::init(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.init); } Napi::Value UMAP::target_n_neighbors(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.target_n_neighbors); } Napi::Value UMAP::target_weight(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.target_weight); } Napi::Value UMAP::random_state(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.random_state); } Napi::Value UMAP::verbosity(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), params_.verbosity); } Napi::Value UMAP::target_metric(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), (int)(params_.target_metric)); } } // namespace nv
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/addon.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <node_cuml/coo.hpp> #include <node_cuml/metrics.hpp> #include <node_cuml/umap.hpp> #include <nv_node/addon.hpp> struct rapidsai_cuml : public nv::EnvLocalAddon, public Napi::Addon<rapidsai_cuml> { rapidsai_cuml(Napi::Env const& env, Napi::Object exports) : nv::EnvLocalAddon(env, exports) { DefineAddon(exports, { InstanceMethod("init", &rapidsai_cuml::InitAddon), InstanceValue("_cpp_exports", _cpp_exports.Value()), InstanceValue("COO", InitClass<nv::COO>(env, exports)), InstanceValue("UMAP", InitClass<nv::UMAP>(env, exports)), InstanceMethod("trustworthiness", &rapidsai_cuml::trustworthiness), }); } private: Napi::Value trustworthiness(Napi::CallbackInfo const& info) { return nv::Metrics::trustworthiness(info); } }; NODE_API_ADDON(rapidsai_cuml);
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/metrics.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {DataFrame, Float32, Numeric, Series} from '@rapidsai/cudf'; import {DeviceBuffer} from '@rapidsai/rmm'; import * as CUML from './addon'; import {dataframeToSeries} from './utilities/array_utils'; /** * Expresses to what extent the local structure is retained in embedding. The score is defined in * the range [0, 1]. * @param features original high dimensional dataset * @param embedded low dimesional embedding * @param nFeatures Number of features in features * @param nComponents Number of features in embedded * @param nNeighbors Number of neighbors considered * @param batch_size It sets the number of samples that will be included in each batch * @returns Trustworthiness of the low-dimensional embedding */ export function trustworthinessSeries<T extends Numeric, R extends Numeric>( features: Series<T>, embedded: Series<R>, nFeatures: number, nComponents = 2, nNeighbors = 5, batch_size = 512): number { const nSamples = Math.floor(features.length / nFeatures); return CUML.trustworthiness(features.data.buffer as DeviceBuffer, features.type, embedded.data.buffer as DeviceBuffer, embedded.type, nSamples, nFeatures, nComponents, nNeighbors, batch_size); } /** * Expresses to what extent the local structure is retained in embedding. The score is defined in * the range [0, 1]. * @param features original high dimensional dataset * @param embedded low dimesional embedding * @param nNeighbors Number of neighbors considered * @param batch_size It sets the number of samples that will be included in each batch * @returns Trustworthiness of the low-dimensional embedding */ export function trustworthinessDataFrame<T extends Numeric, R extends Numeric, K extends string>( features: DataFrame<{[P in K]: T}>, embedded: DataFrame<{[P in number]: R}>, nNeighbors = 5, batch_size = 512): number { const nSamples = features.numRows; const nFeatures = features.numColumns; const nComponents = embedded.numColumns; return CUML.trustworthiness(dataframeToSeries(features).data.buffer as DeviceBuffer, features.get(features.names[0]).type, dataframeToSeries(embedded).data.buffer as DeviceBuffer, embedded.get(embedded.names[0]).type, nSamples, nFeatures, nComponents, nNeighbors, batch_size); } /** * Expresses to what extent the local structure is retained in embedding. The score is defined in * the range [0, 1]. * @param features original high dimensional dataset * @param embedded low dimesional embedding * @param nFeatures Number of features in features * @param nComponents Number of features in embedded * @param nNeighbors Number of neighbors considered * @param batch_size It sets the number of samples that will be included in each batch * @returns Trustworthiness of the low-dimensional embedding */ export function trustworthiness<T extends number|bigint>(features: (T|null|undefined)[], embedded: (T|null|undefined)[], nFeatures: number, nComponents = 2, nNeighbors = 5, batch_size = 512): number { const nSamples = Math.floor(features.length / nFeatures); return CUML.trustworthiness(features, new Float32, embedded, new Float32, nSamples, nFeatures, nComponents, nNeighbors, batch_size); }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/src/addon.ts
// Copyright (c) 2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {addon as CORE} from '@rapidsai/core'; import {addon as CUDA} from '@rapidsai/cuda'; import {addon as CUDF} from '@rapidsai/cudf'; import {addon as RMM} from '@rapidsai/rmm'; export const { _cpp_exports, COO, UMAP, trustworthiness, } = require('bindings')('rapidsai_cuml.node').init(CORE, CUDA, RMM, CUDF) as typeof import('./node_cuml'); export default {_cpp_exports, COO, UMAP, trustworthiness};
0
rapidsai_public_repos/node/modules/cuml/src
rapidsai_public_repos/node/modules/cuml/src/node_cuml/coo.hpp
// Copyright (c) 2020-2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <node_cuml/raft/handle.hpp> #include <node_cuml/raft/sparse/coo.hpp> #include <nv_node/objectwrap.hpp> #include <nv_node/utilities/args.hpp> #include <napi.h> #include <memory> namespace nv { struct COO : public EnvLocalObjectWrap<COO> { /** * @brief Initialize and export the COO JavaScript constructor and prototype. * * @param env The active JavaScript environment. * @param exports The exports object to decorate. * @return Napi::Function The COO constructor function. */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new COO instance from an raft::sparse::COO<float, int32_t>. * * @param buffer Pointer the raft::sparse::COO<float, int32_t> to own. */ static wrapper_t New(Napi::Env const& env, std::unique_ptr<raft::sparse::COO<float, int32_t>> coo); /** * @brief Construct a new COO instance. * */ COO(CallbackArgs const& info); inline raft::sparse::COO<float, int32_t>* get_coo() { return coo_.get(); } inline int get_size() { return coo_->nnz; } private: std::unique_ptr<raft::sparse::COO<float, int32_t>> coo_; ///< Pointer to the underlying raft::sparse::COO<float, int32_t> Napi::Value get_size(Napi::CallbackInfo const& info); }; } // namespace nv
0
rapidsai_public_repos/node/modules/cuml/src
rapidsai_public_repos/node/modules/cuml/src/node_cuml/metrics.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <napi.h> namespace nv { namespace Metrics { Napi::Value trustworthiness(Napi::CallbackInfo const& info); } // namespace Metrics } // namespace nv
0
rapidsai_public_repos/node/modules/cuml/src
rapidsai_public_repos/node/modules/cuml/src/node_cuml/umap.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once // todo: including the below headers with undef guards is the only way cuml builds with raft // locally #include <node_cuml/coo.hpp> #include <node_cuml/cuml/manifold/umap.hpp> #include <node_cuml/cuml/manifold/umapparams.hpp> #include <node_cudf/column.hpp> #include <nv_node/objectwrap.hpp> #include <nv_node/utilities/args.hpp> #include <napi.h> namespace nv { /** * @brief An owning wrapper around a cuml::manifold::UMAP * */ struct UMAP : public EnvLocalObjectWrap<UMAP> { /** * @brief Initialize and export the UMAP JavaScript constructor and prototype * * @param env The active JavaScript environment * @param exports The exports object to decorate * @return Napi::Function The UMAP constructor function */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new UMAP instance from C++. */ static wrapper_t New(Napi::Env const& env); /** * @brief Construct a new UMAP instance from JavaScript * * @param args */ UMAP(CallbackArgs const& args); void fit(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& y, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings, raft::sparse::COO<float>* graph); void refine(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, COO::wrapper_t const& coo, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings); COO::wrapper_t get_graph(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, DeviceBuffer::wrapper_t const& y, bool convert_dtype); void transform(DeviceBuffer::wrapper_t const& X, cudf::size_type n_samples, cudf::size_type n_features, DeviceBuffer::wrapper_t const& knn_indices, DeviceBuffer::wrapper_t const& knn_dists, DeviceBuffer::wrapper_t const& orig_X, int orig_n, bool convert_dtype, DeviceBuffer::wrapper_t const& embeddings, DeviceBuffer::wrapper_t const& transformed); private: ML::UMAPParams params_{}; Napi::Value fit(Napi::CallbackInfo const& info); Napi::Value refine(Napi::CallbackInfo const& info); Napi::Value get_graph(Napi::CallbackInfo const& info); Napi::Value fit_sparse(Napi::CallbackInfo const& info); Napi::Value transform(Napi::CallbackInfo const& info); Napi::Value transform_sparse(Napi::CallbackInfo const& info); Napi::Value n_neighbors(Napi::CallbackInfo const& info); Napi::Value n_components(Napi::CallbackInfo const& info); Napi::Value n_epochs(Napi::CallbackInfo const& info); Napi::Value learning_rate(Napi::CallbackInfo const& info); Napi::Value min_dist(Napi::CallbackInfo const& info); Napi::Value spread(Napi::CallbackInfo const& info); Napi::Value set_op_mix_ratio(Napi::CallbackInfo const& info); Napi::Value local_connectivity(Napi::CallbackInfo const& info); Napi::Value repulsion_strength(Napi::CallbackInfo const& info); Napi::Value negative_sample_rate(Napi::CallbackInfo const& info); Napi::Value transform_queue_size(Napi::CallbackInfo const& info); Napi::Value a(Napi::CallbackInfo const& info); Napi::Value b(Napi::CallbackInfo const& info); Napi::Value initial_alpha(Napi::CallbackInfo const& info); Napi::Value init(Napi::CallbackInfo const& info); Napi::Value target_n_neighbors(Napi::CallbackInfo const& info); Napi::Value target_weight(Napi::CallbackInfo const& info); Napi::Value target_metric(Napi::CallbackInfo const& info); Napi::Value verbosity(Napi::CallbackInfo const& info); Napi::Value random_state(Napi::CallbackInfo const& info); }; } // namespace nv
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml/metrics/metrics.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <cuml/metrics/metrics.hpp> #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml/manifold/umapparams.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <cuml/manifold/umapparams.h> #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #ifdef CUDA_TRY #undef CUDA_TRY #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml/manifold/common.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <cstddef> #include <cstdint> #include <cuml/manifold/common.hpp> #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #ifdef CUDA_TRY #undef CUDA_TRY #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml/cuml/manifold/umap.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <cuml/manifold/umap.hpp> #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #ifdef CUDA_TRY #undef CUDA_TRY #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml
rapidsai_public_repos/node/modules/cuml/src/node_cuml/raft/handle.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <raft/handle.hpp> #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #ifdef CUDA_TRY #undef CUDA_TRY #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/raft
rapidsai_public_repos/node/modules/cuml/src/node_cuml/raft/sparse/coo.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <raft/sparse/coo.hpp> #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #ifdef CUDA_TRY #undef CUDA_TRY #endif
0
rapidsai_public_repos/node/modules/cuml/src/node_cuml/raft
rapidsai_public_repos/node/modules/cuml/src/node_cuml/raft/linalg/distance_type.h
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif #include <raft/linalg/distance_type.h> #ifdef CUDA_TRY #undef CUDA_TRY #endif #ifdef CHECK_CUDA #undef CHECK_CUDA #endif
0
rapidsai_public_repos/node/modules/cuml/src
rapidsai_public_repos/node/modules/cuml/src/utilities/array_utils.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import { DataFrame, Int32, Numeric, scope, Series, } from '@rapidsai/cudf'; /** * convert a dataframe to a single series to replicate conversion to a matrix in an organized * for {x1,y1,x2,y2...} for df = {x:[x1,x2], y: [y1,y2]} */ export function dataframeToSeries<T extends Numeric, K extends string>( input: DataFrame<{[P in K]: T}>) { return input.interleaveColumns(); } /** * convert a series to a dataframe as per the number of components in umapparams * @param input * @param n_samples * @param nComponents * @returns DataFrame */ export function seriesToDataFrame<T extends Numeric>( input: Series<T>, nComponents: number): DataFrame<{[P in number]: T}> { const nSamples = Math.floor(input.length / nComponents); let result = new DataFrame<{[P in number]: T}>({}); for (let i = 0; i < nComponents; i++) { result = scope(() => { const indices = Series.sequence({type: new Int32, init: i, size: nSamples, step: nComponents}); return result.assign(new DataFrame({[i]: input.gather(indices)})); }, [result]); } return result; }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/test/tsconfig.json
{ "extends": "../tsconfig.json", "include": [ "../src/**/*.ts", "../test/**/*.ts" ], "compilerOptions": { "target": "esnext", "module": "commonjs", "allowJs": true, "importHelpers": false, "noEmitHelpers": false, "noEmitOnError": false, "sourceMap": false, "inlineSources": false, "inlineSourceMap": false, "downlevelIteration": false, "baseUrl": "../", "paths": { "@rapidsai/cuml": ["src/index"], "@rapidsai/cuml/*": ["src/*"] } } }
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/test/cuml-tests.ts
import {DataFrame, Float32} from '@rapidsai/cudf'; import { trustworthiness, trustworthinessDataFrame, trustworthinessSeries, UMAP } from '@rapidsai/cuml'; const df = DataFrame.readCSV({ header: 0, sourceType: 'files', sources: [`${__dirname}/iris.csv`], dataTypes: { sepal_length: new Float32, sepal_width: new Float32, petal_length: new Float32, petal_width: new Float32, target: new Float32 } }); const X = df.drop(['target']); const y = df.get('target'); const XSeries = X.interleaveColumns(); const options = { nNeighbors: 10, minDist: 0.01, randomState: 12, targetNNeighbors: -1 }; test('fit_transform trustworthiness score (series)', () => { const umap = new UMAP(options); const t1 = umap.fitSeries(XSeries, y, 4).embeddings; const trust1 = trustworthinessSeries(XSeries, t1.asSeries(), 4); expect(trust1).toBeGreaterThan(0.97); }); test('fit_transform trustworthiness score (dataframe)', () => { const umap = new UMAP(options); const t1 = umap.fitDataFrame(X, y).embeddings; const trust1 = trustworthinessDataFrame(X, t1.asDataFrame()); expect(trust1).toBeGreaterThan(0.97); }); test('fit_transform trustworthiness score (array)', () => { const umap = new UMAP(options); const t1 = umap.fitArray([...XSeries], [...y], 4).embeddings; const trust1 = trustworthiness([...XSeries], [...t1.asSeries()], 4); expect(trust1).toBeGreaterThan(0.97); }); test('transform trustworthiness score (series)', () => { const umap = new UMAP(options); const t1 = umap.fitSeries(XSeries, y, 4).transformSeries(XSeries, 4); const score = trustworthinessSeries(XSeries, t1.asSeries(), 4); expect(score).toBeGreaterThan(0.95); }); test('transform trustworthiness score (dataframe)', () => { const umap = new UMAP(options); const t1 = umap.fitDataFrame(X, y).transformDataFrame(X); const score = trustworthinessDataFrame(X, t1.asDataFrame()); expect(score).toBeGreaterThan(0.95); }); test('transform trustworthiness score (array)', () => { const umap = new UMAP(options); const t1 = umap.fitArray([...XSeries], [...y], 4).transformArray([...XSeries], 4); const score = trustworthiness([...XSeries], [...t1.asSeries()], 4); expect(score).toBeGreaterThan(0.95); }); test('refine trustworthiness score (series)', () => { const umap = new UMAP({nEpochs: 3, ...options}); const fittedUmap = umap.fitSeries(XSeries, y, 4); const score = trustworthinessSeries(XSeries, fittedUmap.embeddings.asSeries(), 4); expect(score).toBeGreaterThan(0.45); // refine for (let i = 1; i < 50; i++) { fittedUmap.refineSeries(XSeries, y, 4); } const score_1 = trustworthinessSeries(XSeries, fittedUmap.embeddings.asSeries(), 4); expect(score_1).toBeGreaterThan(score); // refine further for (let i = 1; i < 50; i++) { fittedUmap.refineSeries(XSeries, y, 4); } const score_2 = trustworthinessSeries(XSeries, fittedUmap.embeddings.asSeries(), 4); expect(score_2).toBeGreaterThan(score_1); }); test('refine trustworthiness score (dataframe)', () => { const umap = new UMAP({nEpochs: 3, ...options}); const fittedUmap = umap.fitDataFrame(X, y); const score = trustworthinessDataFrame(X, fittedUmap.embeddings.asDataFrame()); expect(score).toBeGreaterThan(0.45); // refine for (let i = 1; i < 50; i++) { fittedUmap.refineDataFrame(X, y); } const score_1 = trustworthinessDataFrame(X, fittedUmap.embeddings.asDataFrame()); expect(score_1).toBeGreaterThan(score); // refine further for (let i = 1; i < 50; i++) { fittedUmap.refineDataFrame(X, y); } const score_2 = trustworthinessDataFrame(X, fittedUmap.embeddings.asDataFrame()); expect(score_2).toBeGreaterThan(score_1); }); test('transform trustworthiness score (array)', () => { const umap = new UMAP({nEpochs: 3, ...options}); const fittedUmap = umap.fitArray([...XSeries], [...y], 4); const score = trustworthiness([...XSeries], [...fittedUmap.embeddings.asSeries()], 4); expect(score).toBeGreaterThan(0.45); // refine for (let i = 1; i < 50; i++) { fittedUmap.refineArray([...XSeries], [...y], 4); } const score_1 = trustworthiness([...XSeries], [...fittedUmap.embeddings.asSeries()], 4); expect(score_1).toBeGreaterThan(score); // refine further for (let i = 1; i < 50; i++) { fittedUmap.refineArray([...XSeries], [...y], 4); } const score_2 = trustworthiness([...XSeries], [...fittedUmap.embeddings.asSeries()], 4); expect(score_2).toBeGreaterThan(score_1); });
0
rapidsai_public_repos/node/modules/cuml
rapidsai_public_repos/node/modules/cuml/test/iris.csv
sepal_length,sepal_width,petal_length,petal_width,species 5.1,3.5,1.4,0.2,0 4.9,3.0,1.4,0.2,0 4.7,3.2,1.3,0.2,0 4.6,3.1,1.5,0.2,0 5.0,3.6,1.4,0.2,0 5.4,3.9,1.7,0.4,0 4.6,3.4,1.4,0.3,0 5.0,3.4,1.5,0.2,0 4.4,2.9,1.4,0.2,0 4.9,3.1,1.5,0.1,0 5.4,3.7,1.5,0.2,0 4.8,3.4,1.6,0.2,0 4.8,3.0,1.4,0.1,0 4.3,3.0,1.1,0.1,0 5.8,4.0,1.2,0.2,0 5.7,4.4,1.5,0.4,0 5.4,3.9,1.3,0.4,0 5.1,3.5,1.4,0.3,0 5.7,3.8,1.7,0.3,0 5.1,3.8,1.5,0.3,0 5.4,3.4,1.7,0.2,0 5.1,3.7,1.5,0.4,0 4.6,3.6,1.0,0.2,0 5.1,3.3,1.7,0.5,0 4.8,3.4,1.9,0.2,0 5.0,3.0,1.6,0.2,0 5.0,3.4,1.6,0.4,0 5.2,3.5,1.5,0.2,0 5.2,3.4,1.4,0.2,0 4.7,3.2,1.6,0.2,0 4.8,3.1,1.6,0.2,0 5.4,3.4,1.5,0.4,0 5.2,4.1,1.5,0.1,0 5.5,4.2,1.4,0.2,0 4.9,3.1,1.5,0.1,0 5.0,3.2,1.2,0.2,0 5.5,3.5,1.3,0.2,0 4.9,3.1,1.5,0.1,0 4.4,3.0,1.3,0.2,0 5.1,3.4,1.5,0.2,0 5.0,3.5,1.3,0.3,0 4.5,2.3,1.3,0.3,0 4.4,3.2,1.3,0.2,0 5.0,3.5,1.6,0.6,0 5.1,3.8,1.9,0.4,0 4.8,3.0,1.4,0.3,0 5.1,3.8,1.6,0.2,0 4.6,3.2,1.4,0.2,0 5.3,3.7,1.5,0.2,0 5.0,3.3,1.4,0.2,0 7.0,3.2,4.7,1.4,1 6.4,3.2,4.5,1.5,1 6.9,3.1,4.9,1.5,1 5.5,2.3,4.0,1.3,1 6.5,2.8,4.6,1.5,1 5.7,2.8,4.5,1.3,1 6.3,3.3,4.7,1.6,1 4.9,2.4,3.3,1.0,1 6.6,2.9,4.6,1.3,1 5.2,2.7,3.9,1.4,1 5.0,2.0,3.5,1.0,1 5.9,3.0,4.2,1.5,1 6.0,2.2,4.0,1.0,1 6.1,2.9,4.7,1.4,1 5.6,2.9,3.6,1.3,1 6.7,3.1,4.4,1.4,1 5.6,3.0,4.5,1.5,1 5.8,2.7,4.1,1.0,1 6.2,2.2,4.5,1.5,1 5.6,2.5,3.9,1.1,1 5.9,3.2,4.8,1.8,1 6.1,2.8,4.0,1.3,1 6.3,2.5,4.9,1.5,1 6.1,2.8,4.7,1.2,1 6.4,2.9,4.3,1.3,1 6.6,3.0,4.4,1.4,1 6.8,2.8,4.8,1.4,1 6.7,3.0,5.0,1.7,1 6.0,2.9,4.5,1.5,1 5.7,2.6,3.5,1.0,1 5.5,2.4,3.8,1.1,1 5.5,2.4,3.7,1.0,1 5.8,2.7,3.9,1.2,1 6.0,2.7,5.1,1.6,1 5.4,3.0,4.5,1.5,1 6.0,3.4,4.5,1.6,1 6.7,3.1,4.7,1.5,1 6.3,2.3,4.4,1.3,1 5.6,3.0,4.1,1.3,1 5.5,2.5,4.0,1.3,1 5.5,2.6,4.4,1.2,1 6.1,3.0,4.6,1.4,1 5.8,2.6,4.0,1.2,1 5.0,2.3,3.3,1.0,1 5.6,2.7,4.2,1.3,1 5.7,3.0,4.2,1.2,1 5.7,2.9,4.2,1.3,1 6.2,2.9,4.3,1.3,1 5.1,2.5,3.0,1.1,1 5.7,2.8,4.1,1.3,1 6.3,3.3,6.0,2.5,2 5.8,2.7,5.1,1.9,2 7.1,3.0,5.9,2.1,2 6.3,2.9,5.6,1.8,2 6.5,3.0,5.8,2.2,2 7.6,3.0,6.6,2.1,2 4.9,2.5,4.5,1.7,2 7.3,2.9,6.3,1.8,2 6.7,2.5,5.8,1.8,2 7.2,3.6,6.1,2.5,2 6.5,3.2,5.1,2.0,2 6.4,2.7,5.3,1.9,2 6.8,3.0,5.5,2.1,2 5.7,2.5,5.0,2.0,2 5.8,2.8,5.1,2.4,2 6.4,3.2,5.3,2.3,2 6.5,3.0,5.5,1.8,2 7.7,3.8,6.7,2.2,2 7.7,2.6,6.9,2.3,2 6.0,2.2,5.0,1.5,2 6.9,3.2,5.7,2.3,2 5.6,2.8,4.9,2.0,2 7.7,2.8,6.7,2.0,2 6.3,2.7,4.9,1.8,2 6.7,3.3,5.7,2.1,2 7.2,3.2,6.0,1.8,2 6.2,2.8,4.8,1.8,2 6.1,3.0,4.9,1.8,2 6.4,2.8,5.6,2.1,2 7.2,3.0,5.8,1.6,2 7.4,2.8,6.1,1.9,2 7.9,3.8,6.4,2.0,2 6.4,2.8,5.6,2.2,2 6.3,2.8,5.1,1.5,2 6.1,2.6,5.6,1.4,2 7.7,3.0,6.1,2.3,2 6.3,3.4,5.6,2.4,2 6.4,3.1,5.5,1.8,2 6.0,3.0,4.8,1.8,2 6.9,3.1,5.4,2.1,2 6.7,3.1,5.6,2.4,2 6.9,3.1,5.1,2.3,2 5.8,2.7,5.1,1.9,2 6.8,3.2,5.9,2.3,2 6.7,3.3,5.7,2.5,2 6.7,3.0,5.2,2.3,2 6.3,2.5,5.0,1.9,2 6.5,3.0,5.2,2.0,2 6.2,3.4,5.4,2.3,2 5.9,3.0,5.1,1.8,2
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/package.json
{ "name": "@rapidsai/sql", "version": "22.12.2", "description": "NVIDIA RAPIDS In-memory GPU accelerated SQL Queries", "license": "Apache-2.0", "main": "index.js", "types": "build/js", "author": "NVIDIA, Inc. (https://nvidia.com/)", "maintainers": [ "Paul Taylor <[email protected]>" ], "homepage": "https://github.com/rapidsai/node/tree/main/modules/sql#readme", "bugs": { "url": "https://github.com/rapidsai/node/issues" }, "repository": { "type": "git", "url": "git+https://github.com/rapidsai/node.git" }, "scripts": { "install": "npx rapidsai-install-native-module blazingsql-algebra.jar blazingsql-algebra-core.jar", "clean": "rimraf build doc compile_commands.json", "doc": "rimraf doc && typedoc --options typedoc.js", "test": "node -r dotenv/config node_modules/.bin/jest -i -c jest.config.js", "build": "yarn tsc:build && yarn cpp:build", "build:debug": "yarn tsc:build && yarn cpp:build:debug", "compile": "yarn tsc:build && yarn cpp:compile", "compile:debug": "yarn tsc:build && yarn cpp:compile:debug", "rebuild": "yarn tsc:build && yarn cpp:rebuild", "rebuild:debug": "yarn tsc:build && yarn cpp:rebuild:debug", "cpp:clean": "npx cmake-js clean -O build/Release", "cpp:clean:debug": "npx cmake-js clean -O build/Debug", "cpp:build": "npx cmake-js build -g -O build/Release", "cpp:build:debug": "npx cmake-js build -g -D -O build/Debug", "cpp:compile": "npx cmake-js compile -g -O build/Release", "postcpp:compile": "npx rapidsai-merge-compile-commands", "cpp:compile:debug": "npx cmake-js compile -g -D -O build/Debug", "postcpp:compile:debug": "npx rapidsai-merge-compile-commands", "cpp:configure": "npx cmake-js configure -g -O build/Release", "postcpp:configure": "npx rapidsai-merge-compile-commands", "cpp:configure:debug": "npx cmake-js configure -g -D -O build/Debug", "postcpp:configure:debug": "npx rapidsai-merge-compile-commands", "cpp:rebuild": "npx cmake-js rebuild -g -O build/Release", "postcpp:rebuild": "npx rapidsai-merge-compile-commands", "cpp:rebuild:debug": "npx cmake-js rebuild -g -D -O build/Debug", "postcpp:rebuild:debug": "npx rapidsai-merge-compile-commands", "cpp:reconfigure": "npx cmake-js reconfigure -g -O build/Release", "postcpp:reconfigure": "npx rapidsai-merge-compile-commands", "cpp:reconfigure:debug": "npx cmake-js reconfigure -g -D -O build/Debug", "postcpp:reconfigure:debug": "npx rapidsai-merge-compile-commands", "tsc:clean": "rimraf build/js", "tsc:build": "yarn tsc:clean && tsc -p ./tsconfig.json", "tsc:watch": "yarn tsc:clean && tsc -p ./tsconfig.json -w", "dev:cpack:enabled": "echo $npm_package_name" }, "dependencies": { "@rapidsai/cudf": "~22.12.2", "@types/java": "0.9.1", "java": "0.12.2", "nanoid": "3.1.31" }, "files": [ "LICENSE", "README.md", "index.js", "package.json", "CMakeLists.txt", "build/js" ] }
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/index.js
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. module.exports = require('./build/js/index');
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/jest.config.js
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. try { require('dotenv').config(); } catch (e) {} module.exports = { 'verbose': true, 'testEnvironment': 'node', 'maxWorkers': 1, 'globals': {'ts-jest': {'diagnostics': false, 'tsconfig': 'test/tsconfig.json'}}, 'rootDir': './', 'roots': ['<rootDir>/test/'], 'moduleFileExtensions': ['js', 'ts', 'tsx'], 'coverageReporters': ['lcov'], 'coveragePathIgnorePatterns': ['test\\/.*\\.(ts|tsx|js)$', '/node_modules/'], 'transform': {'^.+\\.jsx?$': 'ts-jest', '^.+\\.tsx?$': 'ts-jest'}, 'transformIgnorePatterns': ['/build/(js|Debug|Release)/*$', '/node_modules/(?!web-stream-tools).+\\.js$'], 'testRegex': '(.*(-|\\.)(test|spec)s?)\\.(ts|tsx|js)$', 'preset': 'ts-jest', 'testMatch': null, 'moduleNameMapper': { '^@rapidsai\/sql(.*)': '<rootDir>/src/$1', '^\.\.\/(Debug|Release)\/(rapidsai_sql.node)$': '<rootDir>/build/$1/$2', } };
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/CMakeLists.txt
#============================================================================= # Copyright (c) 2021-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= cmake_minimum_required(VERSION 3.24.1 FATAL_ERROR) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY) unset(CMAKE_LIBRARY_OUTPUT_DIRECTORY CACHE) option(NODE_RAPIDS_USE_SCCACHE "Enable caching compilation results with sccache" ON) ################################################################################################### # - cmake modules --------------------------------------------------------------------------------- execute_process(COMMAND node -p "require('@rapidsai/core').cmake_modules_path" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CMAKE_MODULES_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cmake_policies.cmake") project(rapidsai_sql VERSION $ENV{npm_package_version} LANGUAGES C CXX) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/core'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CORE_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/cuda'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CUDA_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/rmm'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_RMM_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND node -p "require('path').dirname(require.resolve('@rapidsai/cudf'))" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE NODE_RAPIDS_CUDF_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCXX.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureCUDA.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureNapi.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/ConfigureBlazingSQL.cmake") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/install_utils.cmake") ################################################################################################### # - blazingsql_wrapper target --------------------------------------------------------------------- set(BLAZINGSQL_WRAPPER_CMAKE_C_FLAGS ${NODE_RAPIDS_CMAKE_C_FLAGS}) set(BLAZINGSQL_WRAPPER_CMAKE_CXX_FLAGS ${NODE_RAPIDS_CMAKE_CXX_FLAGS}) set(BLAZINGSQL_WRAPPER_CMAKE_CUDA_FLAGS ${NODE_RAPIDS_CMAKE_CUDA_FLAGS}) # Silence warnings list(APPEND BLAZINGSQL_WRAPPER_CMAKE_C_FLAGS -w) list(APPEND BLAZINGSQL_WRAPPER_CMAKE_CXX_FLAGS -w) list(APPEND BLAZINGSQL_WRAPPER_CMAKE_CUDA_FLAGS -w) # Remove -Werror from flags because blazingsql headers are messy list(REMOVE_ITEM BLAZINGSQL_WRAPPER_CMAKE_C_FLAGS -Werror) list(REMOVE_ITEM BLAZINGSQL_WRAPPER_CMAKE_CXX_FLAGS -Werror) list(REMOVE_ITEM BLAZINGSQL_WRAPPER_CMAKE_CUDA_FLAGS -Werror) file(GLOB_RECURSE BLAZINGSQL_WRAPPER_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/blazingsql/*.cpp") add_library(blazingsql_wrapper STATIC ${BLAZINGSQL_WRAPPER_SRC_FILES}) set_target_properties(blazingsql_wrapper PROPERTIES PREFIX "" BUILD_RPATH "\$ORIGIN" INSTALL_RPATH "\$ORIGIN" CXX_STANDARD 17 CXX_STANDARD_REQUIRED ON CUDA_STANDARD 17 CUDA_STANDARD_REQUIRED ON NO_SYSTEM_FROM_IMPORTED ON POSITION_INDEPENDENT_CODE ON INTERFACE_POSITION_INDEPENDENT_CODE ON ) target_compile_options(blazingsql_wrapper PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${BLAZINGSQL_WRAPPER_CMAKE_C_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${BLAZINGSQL_WRAPPER_CMAKE_CXX_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${BLAZINGSQL_WRAPPER_CMAKE_CUDA_FLAGS}>>" ) target_include_directories(blazingsql_wrapper PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/blazingsql>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>" "$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>" ) target_link_libraries(blazingsql_wrapper PRIVATE rmm::rmm cudf::cudf blazingdb::blazingsql-engine PUBLIC "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node" "${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node" "${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node" "${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node" ) ################################################################################################### # - rapidsai_sql target --------------------------------------------------------------------------- file(GLOB_RECURSE NODE_BLAZINGSQL_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp") add_library(${PROJECT_NAME} SHARED ${NODE_BLAZINGSQL_SRC_FILES} ${CMAKE_JS_SRC}) set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "" SUFFIX ".node" BUILD_RPATH "\$ORIGIN" INSTALL_RPATH "\$ORIGIN" CXX_STANDARD 17 CXX_STANDARD_REQUIRED ON CUDA_STANDARD 17 CUDA_STANDARD_REQUIRED ON NO_SYSTEM_FROM_IMPORTED ON POSITION_INDEPENDENT_CODE ON INTERFACE_POSITION_INDEPENDENT_CODE ON ) target_compile_options(${PROJECT_NAME} PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:C>:${NODE_RAPIDS_CMAKE_C_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${NODE_RAPIDS_CMAKE_CXX_FLAGS}>>" "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${NODE_RAPIDS_CMAKE_CUDA_FLAGS}>>" ) target_compile_definitions(${PROJECT_NAME} PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:CUDA_API_PER_THREAD_DEFAULT_STREAM>" "$<$<COMPILE_LANGUAGE:CUDA>:CUDA_API_PER_THREAD_DEFAULT_STREAM>" ) target_include_directories(${PROJECT_NAME} PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>" "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/blazingsql>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDF_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_RMM_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${NODE_RAPIDS_CUDA_MODULE_PATH}/src>" "$<BUILD_INTERFACE:${RAPIDS_CORE_INCLUDE_DIR}>" "$<BUILD_INTERFACE:${NAPI_INCLUDE_DIRS}>" ) target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_JS_LIB} blazingsql_wrapper ucp "${NODE_RAPIDS_CUDF_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cudf.node" "${NODE_RAPIDS_RMM_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_rmm.node" "${NODE_RAPIDS_CUDA_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_cuda.node" "${NODE_RAPIDS_CORE_MODULE_PATH}/build/${CMAKE_BUILD_TYPE}/rapidsai_core.node") include("${NODE_RAPIDS_CMAKE_MODULES_PATH}/cuda_arch_helpers.cmake") generate_arch_specific_custom_targets( NAME ${PROJECT_NAME} DEPENDENCIES "cudf::cudf" "blazingdb::blazingsql-io" "blazingdb::blazingsql-engine" ) generate_install_rules( NAME ${PROJECT_NAME} GLOBAL_TARGETS ${PROJECT_NAME} blazingsql_wrapper CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES}) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/blazingsql-algebra.jar" "${CMAKE_CURRENT_BINARY_DIR}/blazingsql-algebra-core.jar" DESTINATION "lib") # Create a symlink to compile_commands.json for the llvm-vs-code-extensions.vscode-clangd plugin execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json)
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/README.md
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/>&nbsp; node-rapids GPU accelerated SQL engine</div> ### Installation `npm install @rapidsai/sql` ### About These js bindings allow for GPU accelerated SQL queries. For example, the following snippet creates a DataFrame, then uses our SQL engine to select and query a DataFrame using the `SQLContext` module. ```javascript var { Series, DataFrame, Int32 } = require("@rapidsai/cudf"); var { SQLContext } = require("@rapidsai/sql"); var a = Series.new({type: new Int32(), data: [1, 2, 3]}); var b = Series.new({type: new Int32(), data: [4, 5, 6]}); var df = new DataFrame({'a': a, 'b': b}); var sqlContext = new SQLContext(); sqlContext.createDataFrameTable('test_table', df); await sqlContext.sql('SELECT a FROM test_table').result(); // [1, 2, 3] ``` We have also provided the `SQLCluster` module which allows one to run SQL queries on multiple GPUs. ```javascript var { Series, DataFrame } = require("@rapidsai/cudf"); var { SQLCluster } = require("@rapidsai/sql"); var a = Series.new(['foo', 'bar']); var df = new DataFrame({'a': a}); var sqlCluster = await SQLCluster.init({numWorkers: 2}); await sqlCluster.createDataFrameTable('test_table', df); await sqlCluster.sql('SELECT a FROM test_table WHERE a LIKE \'%foo%\''); // ['foo'] ``` For detailed SQL API, [follow our API Documentation](https://rapidsai.github.io/node/modules/sql_src.html).
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/tsconfig.json
{ "include": ["src"], "exclude": ["node_modules"], "compilerOptions": { "baseUrl": "./", "paths": { "@rapidsai/sql": ["src/index"], "@rapidsai/sql/*": ["src/*"] }, "target": "ESNEXT", "module": "commonjs", "outDir": "./build/js", /* Decorators */ "experimentalDecorators": false, /* Basic stuff */ "moduleResolution": "node", "skipLibCheck": true, "skipDefaultLibCheck": true, "lib": ["dom", "esnext", "esnext.asynciterable"], /* Control what is emitted */ "declaration": true, "declarationMap": true, "noEmitOnError": true, "removeComments": false, "downlevelIteration": true, /* Create inline sourcemaps with sources */ "sourceMap": false, "inlineSources": true, "inlineSourceMap": true, /* The most restrictive settings possible */ "strict": true, "importHelpers": true, "noEmitHelpers": true, "noImplicitAny": true, "noUnusedLocals": true, "noImplicitReturns": true, "allowUnusedLabels": false, "noUnusedParameters": true, "allowUnreachableCode": false, "noFallthroughCasesInSwitch": true, "forceConsistentCasingInFileNames": true } }
0
rapidsai_public_repos/node/modules
rapidsai_public_repos/node/modules/sql/typedoc.js
module.exports = { entryPoints: ['src/index.ts'], out: 'doc', name: '@rapidsai/sql', tsconfig: 'tsconfig.json', excludePrivate: true, excludeProtected: true, excludeExternals: true, };
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/.vscode/launch.json
{ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "compounds": [ { "name": "Debug Tests (TS and C++)", "configurations": [ "Debug Tests (launch gdb)", // "Debug Tests (launch lldb)", "Debug Tests (attach node)", ] } ], "configurations": [ { "name": "Debug Tests (TS only)", "type": "node", "request": "launch", "cwd": "${workspaceFolder}", "console": "integratedTerminal", "internalConsoleOptions": "neverOpen", "program": "${workspaceFolder}/node_modules/.bin/jest", "skipFiles": [ "<node_internals>/**", "${workspaceFolder}/node_modules/**" ], "env": { "NODE_NO_WARNINGS": "1", "NODE_ENV": "production", "READABLE_STREAM": "disable", }, "args": [ "--verbose", "--runInBand", "-c", "jest.config.js", "${input:TEST_FILE}" ] }, // { // "name": "Debug Tests (launch lldb)", // // hide the individual configurations from the debug dropdown list // "presentation": { "hidden": true }, // "type": "lldb", // "request": "launch", // "stdio": null, // "cwd": "${workspaceFolder}", // "preLaunchTask": "cpp:ensure:debug:build", // "env": { // "NODE_DEBUG": "1", // "NODE_NO_WARNINGS": "1", // "NODE_ENV": "production", // "READABLE_STREAM": "disable", // }, // "stopOnEntry": false, // "terminal": "console", // "program": "${input:NODE_BINARY}", // "initCommands": [ // "settings set target.disable-aslr false", // ], // "sourceLanguages": ["cpp", "cuda", "javascript"], // "args": [ // "--inspect=9229", // "--expose-internals", // "${workspaceFolder}/node_modules/.bin/jest", // "--verbose", // "--runInBand", // "-c", // "jest.config.js", // "${input:TEST_FILE}" // ], // }, { "name": "Debug Tests (launch gdb)", // hide the individual configurations from the debug dropdown list "presentation": { "hidden": true }, "type": "cppdbg", "request": "launch", "stopAtEntry": false, "externalConsole": false, "cwd": "${workspaceFolder}", "envFile": "${workspaceFolder}/.env", "MIMode": "gdb", "miDebuggerPath": "/usr/bin/gdb", "setupCommands": [ { "description": "Enable pretty-printing for gdb", "text": "-enable-pretty-printing", "ignoreFailures": true } ], "program": "${input:NODE_BINARY}", "environment": [ { "name": "NODE_DEBUG", "value": "1" }, { "name": "NODE_NO_WARNINGS", "value": "1" }, { "name": "NODE_ENV", "value": "production" }, { "name": "READABLE_STREAM", "value": "disable" }, ], "args": [ "--inspect=9229", "--expose-internals", "${workspaceFolder}/node_modules/.bin/jest", "--verbose", "--runInBand", "-c", "jest.config.js", "${input:TEST_FILE}" ], }, { "name": "Debug Tests (attach node)", "type": "node", "request": "attach", // hide the individual configurations from the debug dropdown list "presentation": { "hidden": true }, "port": 9229, "timeout": 60000, "cwd": "${workspaceFolder}", "skipFiles": [ "<node_internals>/**", "${workspaceFolder}/node_modules/**" ], }, ], "inputs": [ { "type": "command", "id": "NODE_BINARY", "command": "shellCommand.execute", "args": { "description": "path to node", "command": "which node", "useFirstResult": true, } }, { "type": "command", "id": "TEST_FILE", "command": "shellCommand.execute", "args": { "cwd": "${workspaceFolder}/modules/sql", "description": "Select a file to debug", "command": "./node_modules/.bin/jest --listTests | sed -r \"s@$PWD/test/@@g\"", } }, ], }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/graph.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/graph.hpp" #include "blazingsql_wrapper/api.hpp" #include "blazingsql_wrapper/async.hpp" #include <node_cudf/table.hpp> #include <cudf/concatenate.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <nv_node/utilities/args.hpp> #include <execution_graph/graph.h> namespace nv { namespace blazingsql { Napi::Function ExecutionGraph::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "ExecutionGraph", { InstanceMethod<&ExecutionGraph::send>("sendTo"), InstanceMethod<&ExecutionGraph::start>("start"), InstanceMethod<&ExecutionGraph::result>("result"), }); } ExecutionGraph::wrapper_t ExecutionGraph::New(Napi::Env const& env, std::shared_ptr<ral::cache::graph> const& graph, nv::Wrapper<Context> const& context) { auto inst = EnvLocalObjectWrap<ExecutionGraph>::New(env, {}); inst->_graph = graph; inst->_context = Napi::Persistent(context); if (context->get_node_id() == -1) { context->set_node_id(graph->get_last_kernel()->input_cache()->get_context()->getNodeIndex( ral::communication::CommunicationData::getInstance().getSelfNode())); } return inst; } ExecutionGraph::ExecutionGraph(Napi::CallbackInfo const& info) : EnvLocalObjectWrap<ExecutionGraph>(info) {} void ExecutionGraph::start(Napi::CallbackInfo const& info) { if (!_started) { start_execute_graph(_graph); _started = true; } } Napi::Value ExecutionGraph::result(Napi::CallbackInfo const& info) { auto env = info.Env(); start(info); if (_fetched == false) { _fetched = true; auto task = new SQLTask(env, [this]() { auto [names, tables] = std::move(get_execute_graph_result(_graph)); return std::make_pair(std::move(names), std::move(tables)); }); _results = Napi::Persistent(task->run()); } return _results.Value(); } Napi::Value ExecutionGraph::send(Napi::CallbackInfo const& info) { auto env = info.Env(); CallbackArgs args{info}; int32_t dst_ral_id = args[0]; Napi::Array data_frames = args[1]; int32_t nonce = args[2]; auto messages = Napi::Array::New(env, data_frames.Length()); for (int i = 0; i < data_frames.Length(); ++i) { NapiToCPP::Object df = data_frames.Get(i); std::vector<std::string> names = df.Get("names"); Napi::Function asTable = df.Get("asTable"); Table::wrapper_t table = asTable.Call(df.val, {}).ToObject(); auto ctx_token = std::to_string(_graph->get_last_kernel()->input_cache()->get_context()->getContextToken()); std::string message = "broadcast_table_message_" + std::to_string(nonce) + "_" + std::to_string(i); messages[i] = message; _context.Value()->send(dst_ral_id, ctx_token, message, names, *table); } return messages; } } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/ucpcontext.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/ucpcontext.hpp" #include <nv_node/utilities/args.hpp> #include <communication/ucx_init.h> namespace nv { namespace blazingsql { Napi::Function UcpContext::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "UcpContext", {}); } UcpContext::wrapper_t UcpContext::New(Napi::Env const& env) { return EnvLocalObjectWrap<UcpContext>::New(env, {}); } UcpContext::UcpContext(Napi::CallbackInfo const& info) : EnvLocalObjectWrap<UcpContext>(info) { this->_ucp_context = ral::communication::CreateUcpContext(); } } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/cache.cpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/cache.hpp" #include "blazingsql_wrapper/async.hpp" #include <nv_node/utilities/args.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cache_machine/CacheMachine.h> namespace nv { namespace blazingsql { Napi::Function CacheMachine::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "CacheMachine", {}); } CacheMachine::wrapper_t CacheMachine::New(Napi::Env const& env, std::shared_ptr<ral::cache::CacheMachine> const& cache) { auto inst = EnvLocalObjectWrap<CacheMachine>::New(env, {}); inst->_cache = cache; return inst; } void CacheMachine::add_to_cache(int32_t const& node_id, int32_t const& src_ral_id, int32_t const& dst_ral_id, std::string const& ctx_token, std::string const& message_id, std::vector<std::string> const& column_names, cudf::table_view const& table_view) { std::unique_ptr<ral::frame::BlazingTable> table = std::make_unique<ral::frame::BlazingTable>(table_view, column_names); table->ensureOwnership(); ral::cache::MetadataDictionary metadata; metadata.add_value(ral::cache::RAL_ID_METADATA_LABEL, node_id); metadata.add_value(ral::cache::KERNEL_ID_METADATA_LABEL, std::to_string(0)); // unused, kept as 0 metadata.add_value(ral::cache::QUERY_ID_METADATA_LABEL, ctx_token); metadata.add_value(ral::cache::ADD_TO_SPECIFIC_CACHE_METADATA_LABEL, "false"); metadata.add_value(ral::cache::CACHE_ID_METADATA_LABEL, 0); // unused, kept as 0 metadata.add_value(ral::cache::SENDER_WORKER_ID_METADATA_LABEL, std::to_string(src_ral_id)); metadata.add_value(ral::cache::WORKER_IDS_METADATA_LABEL, std::to_string(dst_ral_id)); metadata.add_value(ral::cache::MESSAGE_ID, message_id); this->_cache->addToCache(std::move(table), message_id, true, metadata, true); } SQLTask* CacheMachine::pull_from_cache(std::string const& message_id) { return new SQLTask(Env(), [this, message_id]() { auto result = std::move(this->_cache->pullCacheData(message_id)); auto names = std::move(result->names()); auto decached = std::move(result->decache()); auto table = std::move(decached->releaseCudfTable()); std::vector<std::unique_ptr<cudf::table>> tables; tables.reserve(1); tables.push_back(std::move(table)); return std::make_pair(std::move(names), std::move(tables)); }); } CacheMachine::CacheMachine(Napi::CallbackInfo const& info) : EnvLocalObjectWrap<CacheMachine>(info) {} } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/async.cpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/async.hpp" #include <node_cudf/table.hpp> namespace nv { namespace blazingsql { SQLTask::SQLTask(Napi::Env const& env, SQLTaskCallback const& work) : AsyncWorker(env), work_(work), deferred_(Napi::Promise::Deferred::New(env)) {} Napi::Promise SQLTask::run() { if (!queued_ && (queued_ = true)) { Queue(); } return deferred_.Promise(); } void SQLTask::Execute() { std::tie(names_, tables_) = work_(); } std::vector<napi_value> SQLTask::GetResult(Napi::Env env) { size_t i{0}; auto names = Napi::Array::New(env, names_.size()); std::for_each(names_.begin(), names_.end(), [&](auto const& name) mutable { // names[i++] = name; }); auto tables = Napi::Array::New(env, tables_.size()); for (size_t i = 0; i < tables_.size(); ++i) { tables[i] = Table::New(env, std::move(tables_[i])); } return {names, tables}; } void SQLTask::OnOK() { auto res = GetResult(Env()); auto obj = Napi::Object::New(Env()); obj["names"] = res[0]; obj["tables"] = res[1]; deferred_.Resolve(obj); } void SQLTask::OnError(Napi::Error const& err) { deferred_.Reject(err.Value()); } } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/api.cpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/api.hpp" #include "blazingsql_wrapper/ucpcontext.hpp" #include <node_cudf/utilities/dtypes.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <engine/engine.h> #include <engine/initialize.h> #include <algorithm> #include <cstdint> namespace nv { namespace blazingsql { std::tuple<uint16_t, int32_t, std::vector<int32_t>, UcpContext::wrapper_t, std::shared_ptr<ral::cache::CacheMachine>, std::shared_ptr<ral::cache::CacheMachine>> initialize(Napi::Env const& env, NapiToCPP::Object const& props) { auto config_options = [&] { std::map<std::string, std::string> config{}; auto prop = props.Get("configOptions"); if (!prop.IsNull() && prop.IsObject()) { auto opts = prop.As<Napi::Object>(); auto keys = opts.GetPropertyNames(); for (auto i = 0u; i < keys.Length(); ++i) { Napi::HandleScope scope(env); std::string name = keys.Get(i).ToString(); config[name] = opts.Get(name).ToString(); if (config[name] == "true") { config[name] = "True"; } else if (config[name] == "false") { config[name] = "False"; } } } return config; }(); std::vector<int32_t> worker_ids{}; UcpContext::wrapper_t ucp_context{}; std::vector<NodeMetaDataUCP> ucp_metadata{}; if (UcpContext::IsInstance(props.Get("ucpContext"))) { ucp_context = props.Get("ucpContext").ToObject(); if (props.Get("workersUcpInfo").IsArray()) { auto list = props.Get("workersUcpInfo").As<Napi::Array>(); worker_ids.reserve(list.Length()); ucp_metadata.reserve(list.Length()); for (size_t i = 0; i < list.Length(); ++i) { NapiToCPP::Object worker = list.Get(i); worker_ids.push_back(worker.Get("id")); ucp_metadata.push_back({ worker.Get("id").ToString(), // std::string worker_id worker.Get("ip").ToString(), // std::string ip 0, // std::uintptr_t ep_handle 0, // std::uintptr_t worker_handle *ucp_context, // std::uintptr_t context_handle worker.Get("port").ToNumber(), // int32_t port }); } } } uint16_t id = props.Get("id"); bool single_node = ucp_metadata.size() == 0; if (single_node) { worker_ids.push_back(id); } auto init_result = std::move(::initialize(id, std::to_string(id), props.Get("networkIfaceName"), props.Get("port"), ucp_metadata, single_node, config_options, props.Get("allocationMode"), props.Get("initialPoolSize"), props.Get("maximumPoolSize"), props.Get("enableLogging"))); auto& caches = init_result.first; auto& port = init_result.second; auto& transport_in = caches.second; auto& transport_out = caches.first; return std::make_tuple(id, port, std::move(worker_ids), std::move(ucp_context), std::move(transport_in), std::move(transport_out)); } std::tuple<std::vector<std::string>, std::vector<std::string>> get_table_scan_info( std::string const& logical_plan) { auto table_scan_info = ::getTableScanInfo(logical_plan); return std::make_tuple(std::move(table_scan_info.table_names), std::move(table_scan_info.relational_algebra_steps)); } ExecutionGraph::wrapper_t run_generate_graph( Napi::Env const& env, Wrapper<Context> const& context, uint32_t const& masterIndex, std::vector<std::string> const& worker_ids, std::vector<cudf::table_view> const& table_views, Napi::Array const& schemas, std::vector<std::vector<std::string>> const& column_names, std::vector<std::string> const& table_names, std::vector<std::string> const& table_scans, int32_t const& ctx_token, std::string const& query, std::string const& sql, std::string const& current_timestamp, std::map<std::string, std::string> const& config_options) { std::vector<TableSchema> table_schemas; std::vector<std::vector<std::string>> table_schema_cpp_arg_keys; std::vector<std::vector<std::string>> table_schema_cpp_arg_values; std::vector<std::vector<std::string>> files_all; std::vector<int> file_types; std::vector<std::vector<std::map<std::string, std::string>>> uri_values; table_schemas.reserve(table_views.size() + schemas.Length()); table_schema_cpp_arg_keys.reserve(table_views.size()); table_schema_cpp_arg_values.reserve(table_views.size()); files_all.reserve(table_views.size()); file_types.reserve(table_views.size()); uri_values.reserve(table_views.size()); for (std::size_t i = 0; i < table_views.size(); ++i) { auto table = table_views[i]; auto names = column_names[i]; std::vector<cudf::type_id> type_ids; type_ids.reserve(table.num_columns()); for (auto const& col : table) { type_ids.push_back(col.type().id()); } table_schemas.push_back({ {{table, names}}, // std::vector<ral::frame::BlazingTableView> blazingTableViews type_ids, // std::vector<cudf::type_id> types {}, // std::vector<std::string> files {}, // std::vector<std::string> datasource names, // std::vector<std::string> names {}, // std::vector<size_t> calcite_to_file_indices {}, // std::vector<bool> in_file ral::io::DataType::CUDF, // int data_type false, // bool has_header_csv = false {cudf::table_view{}, {}}, // ral::frame::BlazingTableView metadata {{0}}, // std::vector<std::vector<int>> row_groups_ids nullptr // std::shared_ptr<arrow::Table> arrow_tabl }); table_schema_cpp_arg_keys.push_back({}); table_schema_cpp_arg_values.push_back({}); files_all.push_back({}); file_types.push_back(ral::io::DataType::CUDF); uri_values.push_back({}); } for (std::size_t i = 0; i < schemas.Length(); ++i) { NapiToCPP::Object schema = schemas.Get(i); std::vector<std::string> names = schema.Get("names"); std::vector<std::string> files = schema.Get("files"); std::vector<size_t> calcite_to_file_indicies = schema.Get("calciteToFileIndicies"); int file_type_int = schema.Get("fileType"); ral::io::DataType file_type = ral::io::DataType(file_type_int); std::vector<int32_t> type_ints = schema.Get("types"); std::vector<cudf::type_id> type_ids; type_ids.reserve(type_ints.size()); for (auto const& type : type_ints) { type_ids.push_back(cudf::type_id(type)); } bool has_header_csv = schema.Get("hasHeaderCSV"); table_schemas.push_back({ {}, // std::vector<ral::frame::BlazingTableView> blazingTableViews type_ids, // std::vector<cudf::type_id> types files, // std::vector<std::string> files files, // std::vector<std::string> datasource names, // std::vector<std::string> names calcite_to_file_indicies, // std::vector<size_t> calcite_to_file_indices {}, // std::vector<bool> in_file file_type, // int data_type has_header_csv, // bool has_header_csv = false {cudf::table_view{}, {}}, // ral::frame::BlazingTableView metadata {{0}}, // std::vector<std::vector<int>> row_groups_ids nullptr // std::shared_ptr<arrow::Table> arrow_tabl }); table_schema_cpp_arg_keys.push_back({"has_header_csv"}); table_schema_cpp_arg_values.push_back({has_header_csv ? "True" : "False"}); files_all.push_back(files); file_types.push_back(file_type); uri_values.push_back({}); } auto result = ::runGenerateGraph(masterIndex, worker_ids, table_names, table_scans, table_schemas, table_schema_cpp_arg_keys, table_schema_cpp_arg_values, files_all, file_types, ctx_token, query, uri_values, config_options, sql, current_timestamp); return ExecutionGraph::New(env, result, context); } std::string run_generate_physical_graph(uint32_t const& masterIndex, std::vector<std::string> const& worker_ids, int32_t const& ctx_token, std::string const& query) { return ::runGeneratePhysicalGraph(masterIndex, worker_ids, ctx_token, query); } Napi::Value parse_schema(Napi::Env const& env, std::vector<std::string> const& input, std::string const& file_format, bool const& ignoreMissingFiles) { auto table_schema = ::parseSchema(input, file_format, {}, {}, {}, ignoreMissingFiles); auto result = Napi::Object::New(env); auto files = Napi::Array::New(env, table_schema.files.size()); for (size_t i = 0; i < table_schema.files.size(); ++i) { files.Set(i, Napi::String::New(env, table_schema.files[i])); } result.Set("files", files); result.Set("fileType", table_schema.data_type); auto types = Napi::Array::New(env, table_schema.types.size()); for (size_t i = 0; i < table_schema.types.size(); ++i) { types.Set(i, cudf_to_arrow_type(env, cudf::data_type(table_schema.types[i]))); } result.Set("types", types); auto names = Napi::Array::New(env, table_schema.names.size()); for (size_t i = 0; i < table_schema.names.size(); ++i) { names.Set(i, Napi::String::New(env, table_schema.names[i])); } result.Set("names", names); auto calcite_to_file_indices = Napi::Array::New(env, table_schema.calcite_to_file_indices.size()); for (size_t i = 0; i < table_schema.calcite_to_file_indices.size(); ++i) { calcite_to_file_indices.Set(i, Napi::Number::New(env, table_schema.calcite_to_file_indices[i])); } result.Set("calciteToFileIndicies", calcite_to_file_indices); result.Set("hasHeaderCSV", Napi::Boolean::New(env, table_schema.has_header_csv)); return result; } void start_execute_graph(std::shared_ptr<ral::cache::graph> const& graph) { ::startExecuteGraph(graph, graph->get_context_token()); } std::tuple<std::vector<std::string>, std::vector<std::unique_ptr<cudf::table>>> get_execute_graph_result(std::shared_ptr<ral::cache::graph> const& graph) { auto bsql_result = std::move(::getExecuteGraphResult(graph, graph->get_context_token())); return {std::move(bsql_result->names), std::move(bsql_result->cudfTables)}; } } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/blazingsql/context.cpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "blazingsql_wrapper/context.hpp" #include "blazingsql_wrapper/api.hpp" #include "blazingsql_wrapper/async.hpp" #include "blazingsql_wrapper/cache.hpp" #include <node_cudf/table.hpp> #include <nv_node/utilities/napi_to_cpp.hpp> #include <cudf/copying.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <execution_graph/Context.h> namespace nv { namespace blazingsql { namespace { std::pair<std::vector<std::string>, Table::wrapper_t> get_names_and_table(NapiToCPP::Object df) { std::vector<std::string> names = df.Get("names"); Napi::Function asTable = df.Get("asTable"); auto table = asTable.Call(df.val, {}); return {std::move(names), table.ToObject()}; } } // namespace Napi::Function Context::Init(Napi::Env const& env, Napi::Object exports) { return DefineClass(env, "Context", { InstanceAccessor<&Context::get_ral_id>("id"), InstanceMethod<&Context::send>("send"), InstanceMethod<&Context::pull>("pull"), InstanceMethod<&Context::broadcast>("broadcast"), InstanceMethod<&Context::run_generate_graph>("runGenerateGraph"), }); } Context::Context(Napi::CallbackInfo const& info) : EnvLocalObjectWrap<Context>(info) { auto env = info.Env(); auto result = blazingsql::initialize(env, info[0]); this->_id = std::get<0>(result); this->_port = std::get<1>(result); this->_worker_ids = std::move(std::get<2>(result)); this->_ucp_context = Napi::Persistent(std::get<3>(result)); this->_transport_in = Napi::Persistent(CacheMachine::New(env, std::get<4>(result))); this->_transport_out = Napi::Persistent(CacheMachine::New(env, std::get<5>(result))); } Napi::Value Context::run_generate_graph(Napi::CallbackInfo const& info) { auto env = info.Env(); CallbackArgs args{info}; Napi::Array data_frames = args[0]; Napi::Array schemas = args[1]; std::vector<std::string> table_names = args[2]; std::vector<std::string> table_scans = args[3]; int32_t ctx_token = args[4]; std::string query = args[5]; auto config_opts_ = args[6]; std::string sql = args[7]; std::string current_timestamp = args[8]; auto config_options = [&] { std::map<std::string, std::string> config{}; if (!config_opts_.IsNull() && config_opts_.IsObject()) { auto opts = config_opts_.As<Napi::Object>(); auto keys = opts.GetPropertyNames(); for (auto i = 0u; i < keys.Length(); ++i) { std::string name = keys.Get(i).ToString(); config[name] = opts.Get(name).ToString(); if (config[name] == "true") { config[name] = "True"; } else if (config[name] == "false") { config[name] = "False"; } } } return config; }(); std::vector<cudf::table_view> table_views; std::vector<std::vector<std::string>> column_names; table_views.reserve(data_frames.Length()); column_names.reserve(data_frames.Length()); auto tables = Napi::Array::New(env, data_frames.Length()); for (std::size_t i = 0; i < data_frames.Length(); ++i) { NapiToCPP::Object df = data_frames.Get(i); auto [names, table] = std::move(get_names_and_table(df)); tables.Set(i, table); auto view = table->view(); table_views.push_back(view); column_names.push_back(std::move(names)); } std::vector<std::string> worker_ids; worker_ids.reserve(_worker_ids.size()); std::transform( _worker_ids.begin(), _worker_ids.end(), std::back_inserter(worker_ids), [](int32_t const id) { return std::to_string(id); }); return blazingsql::run_generate_graph(env, *this, 0, worker_ids, table_views, schemas, column_names, table_names, table_scans, ctx_token, query, sql, current_timestamp, config_options); } Napi::Value Context::get_ral_id(Napi::CallbackInfo const& info) { return Napi::Value::From(info.Env(), _id); } void Context::send(int32_t const& dst_ral_id, std::string const& ctx_token, std::string const& message_id, std::vector<std::string> const& column_names, cudf::table_view const& table_view) { this->_transport_out.Value()->add_to_cache( get_node_id(), get_ral_id(), dst_ral_id, ctx_token, message_id, column_names, table_view); } SQLTask* Context::pull(std::string const& message_id) { return this->_transport_in.Value()->pull_from_cache(message_id); } void Context::send(Napi::CallbackInfo const& info) { CallbackArgs args{info}; int32_t dst_ral_id = args[0]; std::string ctx_token = args[1]; std::string message_id = args[2]; NapiToCPP::Object df = args[3]; auto [names, table] = std::move(get_names_and_table(df)); this->send(dst_ral_id, ctx_token, message_id, names, *table); } Napi::Value Context::pull(Napi::CallbackInfo const& info) { return pull(info[0].ToString())->run(); } Napi::Value Context::broadcast(Napi::CallbackInfo const& info) { auto env = info.Env(); CallbackArgs args{info}; int32_t ctx_token = args[0]; NapiToCPP::Object df = args[1]; auto [names, table] = std::move(get_names_and_table(df)); auto const num_rows = table->num_rows(); auto const num_workers = _worker_ids.size(); auto const num_slice_rows = static_cast<cudf::size_type>(ceil(static_cast<double>(num_rows) / num_workers)); auto slices = cudf::slice(*table, [&]() { // cudf::size_type count{0}; std::vector<cudf::size_type> indices; std::generate_n(std::back_inserter(indices), num_workers * 2, [&]() mutable { return std::min(num_rows, num_slice_rows * (++count / 2)); }); return indices; }()); auto messages = Napi::Array::New(env, num_workers); for (int32_t i = num_workers; --i > -1;) { auto const id = _worker_ids[i]; auto const tok = std::to_string(ctx_token + i); auto const msg = "broadcast_table_message_" + tok; messages[i] = msg; if (id != _id) { this->send(id, tok, msg, names, slices[i]); } else { this->_transport_in.Value()->add_to_cache( get_node_id(), get_ral_id(), get_ral_id(), tok, msg, names, slices[i]); } } return messages; } } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/graph.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "context.hpp" #include <nv_node/objectwrap.hpp> namespace ral { namespace cache { struct graph; } } // namespace ral namespace nv { namespace blazingsql { struct ExecutionGraph : public EnvLocalObjectWrap<ExecutionGraph> { /** * @brief Initialize and export the ExecutionGraph JavaScript constructor and prototype. * * @param env The active JavaScript environment. * @param exports The exports object to decorate. * @return Napi::Function The ExecutionGraph constructor function. */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new ExecutionGraph instance from a ral::cache::graph. * * @param cache The shared pointer to the ExecutionGraph. */ static wrapper_t New(Napi::Env const& env, std::shared_ptr<ral::cache::graph> const& graph, Wrapper<Context> const& context); /** * @brief Construct a new ExecutionGraph instance from JavaScript. */ ExecutionGraph(Napi::CallbackInfo const& info); inline operator std::shared_ptr<ral::cache::graph>() const { return _graph; } private: bool _started{false}; bool _fetched{false}; Napi::Reference<Napi::Promise> _results; std::shared_ptr<ral::cache::graph> _graph; Napi::Reference<Wrapper<Context>> _context; void start(Napi::CallbackInfo const& info); Napi::Value send(Napi::CallbackInfo const& info); Napi::Value result(Napi::CallbackInfo const& info); }; } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/context.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "async.hpp" #include "cache.hpp" #include "ucpcontext.hpp" #include <nv_node/objectwrap.hpp> namespace ral { namespace cache { struct CacheMachine; } } // namespace ral namespace nv { namespace blazingsql { struct Context : public EnvLocalObjectWrap<Context> { /** * @brief Initialize and export the ContextWrapper JavaScript constructor and prototype. * * @param env The active JavaScript environment. * @param exports The exports object to decorate. * @return Napi::Function The ContextWrapper constructor function. */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new ContextWrapper instance from existing device memory. * * @return wrapper_t The new ContextWrapper instance */ static wrapper_t New(Napi::Env const& env, int32_t const& ral_id, std::pair<std::pair<std::shared_ptr<ral::cache::CacheMachine>, std::shared_ptr<ral::cache::CacheMachine>>, int> const& pair, UcpContext::wrapper_t const& ucp_context); /** * @brief Construct a new ContextWrapper instance from JavaScript. */ Context(Napi::CallbackInfo const& info); inline int32_t get_ral_id() const { return _id; } inline int32_t get_node_id() const { return _node_id; } inline void set_node_id(int32_t node_id) { _node_id = node_id; } void send(int32_t const& dst_ral_id, std::string const& ctx_token, std::string const& message_id, std::vector<std::string> const& column_names, cudf::table_view const& table_view); SQLTask* pull(std::string const& message_id); private: int32_t _id{}; int32_t _port{}; int32_t _node_id{-1}; std::vector<int32_t> _worker_ids{}; Napi::Reference<UcpContext::wrapper_t> _ucp_context; Napi::Reference<Wrapper<CacheMachine>> _transport_in; Napi::Reference<Wrapper<CacheMachine>> _transport_out; Napi::Value get_ral_id(Napi::CallbackInfo const& info); void send(Napi::CallbackInfo const& info); Napi::Value pull(Napi::CallbackInfo const& info); Napi::Value broadcast(Napi::CallbackInfo const& info); Napi::Value run_generate_graph(Napi::CallbackInfo const& info); }; } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/cache.hpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "async.hpp" #include <nv_node/objectwrap.hpp> namespace cudf { struct table_view; } namespace ral { namespace cache { struct CacheMachine; } } // namespace ral namespace nv { namespace blazingsql { struct CacheMachine : public nv::EnvLocalObjectWrap<CacheMachine> { /** * @brief Initialize and export the CacheMachine JavaScript constructor and prototype. * * @param env The active JavaScript environment. * @param exports The exports object to decorate. * @return Napi::Function The CacheMachine constructor function. */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new CacheMachine instance from a ral::cache::CacheMachine. * * @param cache The shared pointer to the CacheMachine. */ static wrapper_t New(Napi::Env const& env, std::shared_ptr<ral::cache::CacheMachine> const& cache); /** * @brief Construct a new CacheMachine instance from JavaScript. */ CacheMachine(Napi::CallbackInfo const& info); inline operator std::shared_ptr<ral::cache::CacheMachine>() { return _cache; } void add_to_cache(int32_t const& node_id, int32_t const& src_ral_id, int32_t const& dst_ral_id, std::string const& ctx_token, std::string const& message_id, std::vector<std::string> const& column_names, cudf::table_view const& table_view); SQLTask* pull_from_cache(std::string const& message_id); private: std::shared_ptr<ral::cache::CacheMachine> _cache; }; } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/api.hpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "context.hpp" #include "graph.hpp" #include <nv_node/utilities/args.hpp> struct NodeMetaDataUCP; struct TableSchema; namespace cudf { struct table; struct table_view; } // namespace cudf namespace nv { namespace blazingsql { std::tuple<uint16_t, int32_t, std::vector<int32_t>, UcpContext::wrapper_t, std::shared_ptr<ral::cache::CacheMachine>, std::shared_ptr<ral::cache::CacheMachine>> initialize(Napi::Env const& env, NapiToCPP::Object const& props); std::tuple<std::vector<std::string>, std::vector<std::string>> get_table_scan_info( std::string const& logical_plan); ExecutionGraph::wrapper_t run_generate_graph( Napi::Env const& env, Wrapper<Context> const& context, uint32_t const& masterIndex, std::vector<std::string> const& worker_ids, std::vector<cudf::table_view> const& table_views, Napi::Array const& schemas, std::vector<std::vector<std::string>> const& column_names, std::vector<std::string> const& table_names, std::vector<std::string> const& table_scans, int32_t const& ctx_token, std::string const& query, std::string const& sql, std::string const& current_timestamp, std::map<std::string, std::string> const& config_options); std::string run_generate_physical_graph(uint32_t const& masterIndex, std::vector<std::string> const& worker_ids, int32_t const& ctx_token, std::string const& query); Napi::Value parse_schema(Napi::Env const& env, std::vector<std::string> const& input, std::string const& file_format, bool const& ignoreMissingFiles); void start_execute_graph(std::shared_ptr<ral::cache::graph> const& execution_graph); std::tuple<std::vector<std::string>, std::vector<std::unique_ptr<cudf::table>>> get_execute_graph_result(std::shared_ptr<ral::cache::graph> const& execution_graph); } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/async.hpp
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <napi.h> #include <memory> #include <string> namespace cudf { struct table; } namespace nv { namespace blazingsql { using SQLTaskCallback = typename std::function< std::pair<std::vector<std::string>, std::vector<std::unique_ptr<cudf::table>>>()>; struct SQLTask : public Napi::AsyncWorker { SQLTask(Napi::Env const& env, SQLTaskCallback const& work); Napi::Promise run(); protected: void Execute() override; void OnError(Napi::Error const& err) override; void OnOK() override; std::vector<napi_value> GetResult(Napi::Env env) override; private: bool queued_{false}; SQLTaskCallback work_; std::vector<std::string> names_; std::vector<std::unique_ptr<cudf::table>> tables_; Napi::Promise::Deferred deferred_; }; } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql/blazingsql
rapidsai_public_repos/node/modules/sql/blazingsql/blazingsql_wrapper/ucpcontext.hpp
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <nv_node/objectwrap.hpp> typedef struct ucp_context* ucp_context_h; namespace nv { namespace blazingsql { struct UcpContext : public EnvLocalObjectWrap<UcpContext> { /** * @brief Initialize and export the UcpContext JavaScript constructor and prototype. * * @param env The active JavaScript environment. * @param exports The exports object to decorate. * @return Napi::Function The UcpContext constructor function. */ static Napi::Function Init(Napi::Env const& env, Napi::Object exports); /** * @brief Construct a new UcpContext instance from a ral::cache::graph. * * @param cache The shared pointer to the UcpContext. */ static wrapper_t New(Napi::Env const& env); /** * @brief Construct a new UcpContext instance from JavaScript. */ UcpContext(Napi::CallbackInfo const& info); inline operator std::uintptr_t() { return reinterpret_cast<std::uintptr_t>(_ucp_context); } private: ucp_context_h _ucp_context; }; } // namespace blazingsql } // namespace nv
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/graph.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* eslint-disable @typescript-eslint/await-thenable */ import {DataFrame, Table} from '@rapidsai/cudf'; let nonce = Math.random() * 1e3 | 0; export class ExecutionGraph implements Promise<DataFrame[]> { constructor(private _graph?: import('./rapidsai_sql').ExecutionGraph) {} get[Symbol.toStringTag]() { return 'ExecutionGraph'; } then<TResult1 = DataFrame[], TResult2 = never>( onfulfilled?: ((value: DataFrame[]) => TResult1 | PromiseLike<TResult1>)|undefined|null, onrejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>)|undefined| null): Promise<TResult1|TResult2> { return this.result().then(onfulfilled, onrejected); } catch<TResult = never>(onrejected?: ((reason: any) => TResult | PromiseLike<TResult>)|undefined| null): Promise<DataFrame[]|TResult> { return this.result().catch(onrejected); } finally(onfinally?: (() => void)|undefined|null): Promise<DataFrame[]> { return this.result().finally(onfinally); } private _result: Promise<DataFrame[]>|undefined; start() { this._graph?.start(); } result() { if (!this._result) { this._result = (async () => { const {names, tables} = this._graph ? (await this._graph.result()) : {names: [], tables: [new Table({})]}; return tables.map((table: Table) => DataFrame.fromTable(table, names)); })(); } return this._result; } sendTo(id: number) { return this.then((dfs) => { const {_graph} = this; const inFlightTables: Record<string, DataFrame> = {}; if (_graph) { _graph.sendTo(id, dfs, `${nonce++}`).forEach((messageId, i) => { // inFlightTables[messageId] = dfs[i]; }); } return inFlightTables; }); } }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/index.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. export {UcpContext} from './addon'; export {SQLCluster} from './cluster'; export {SQLContext} from './context';
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/cluster.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* eslint-disable @typescript-eslint/await-thenable */ import {Device} from '@rapidsai/cuda'; import {arrowToCUDFType, DataFrame, Series} from '@rapidsai/cudf'; import {ContextProps, parseSchema} from './addon'; import {LocalSQLWorker} from './cluster/local'; import {RemoteSQLWorker} from './cluster/remote'; import {defaultClusterConfigValues} from './config'; export interface Worker { readonly id: number; kill(): void; dropTable(name: string): Promise<void>; sql(query: string, token: number): Promise<DataFrame[]>; createDataFrameTable(name: string, table_id: string): Promise<void>; createCSVTable(name: string, paths: string[]): Promise<void>; createParquetTable(name: string, paths: string[]): Promise<void>; createORCTable(name: string, paths: string[]): Promise<void>; createContext(props: Omit<ContextProps, 'id'>): Promise<void>; } export interface ClusterProps extends ContextProps { ip: string; port: number; numWorkers: number; } let ctxToken = 0; export class SQLCluster { /** * Initialize and return a new pool of SQLCluster workers. * * @param options options for the SQLCluster and SQLContext instance(s) * * @example * ```typescript * import {SQLCluster} from '@rapidsai/sql'; * * const cluster = await Cluster.init(); * ``` */ public static async init(options: Partial<ClusterProps> = {}) { const {numWorkers = Device.numDevices, ip = '0.0.0.0', port = 4000} = options; const { networkIfaceName = 'lo', allocationMode = 'cuda_memory_resource', initialPoolSize = null, maximumPoolSize = null, enableLogging = false, } = options; const configOptions = {...defaultClusterConfigValues, ...options.configOptions}; const cluster = new SQLCluster(options.id || 0, Math.min(numWorkers, Device.numDevices)); await cluster._createContexts({ ip, port, networkIfaceName, allocationMode, initialPoolSize, maximumPoolSize, enableLogging, configOptions, }); return cluster; } private declare _workers: Worker[]; private declare _worker: LocalSQLWorker; private constructor(id: number, numWorkers: number) { process.on('exit', this.kill.bind(this)); process.on('beforeExit', this.kill.bind(this)); this._worker = new LocalSQLWorker(id); this._workers = Array .from({length: numWorkers}, (_, i) => i === 0 ? this._worker : new RemoteSQLWorker( this, id + i, {...process.env, CUDA_VISIBLE_DEVICES: i})) .reverse(); } public get context() { return this._worker.context; } protected async _createContexts(props: {ip: string}&Omit<ContextProps, 'id'|'workersUcpInfo'>) { const {ip, port} = props; const workersUcpInfo = [...this._workers].reverse().map(({id}) => ({id, ip, port: port + id})); await Promise.all( this._workers.map((worker) => worker.createContext({...props, workersUcpInfo}))); } /** * Create a SQL table to be used for future queries. * * @param tableName Name of the table when referenced in a query * @param input DataFrame or paths to CSV files * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * ``` */ public async createDataFrameTable(tableName: string, input: DataFrame) { ctxToken += this._workers.length; const ids = this.context.context.broadcast(ctxToken - this._workers.length, input).reverse(); await Promise.all( this._workers.map((worker, i) => worker.createDataFrameTable(tableName, ids[i]))); } /** * Create a SQL table from CSV file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to CSV file(s) * * @example * ```typescript * import {sqlCluster} from '@rapidsai/sql'; * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createCSVTable('test_table', ['test.csv']); * ``` */ public async createCSVTable(tableName: string, filePaths: string[]) { await this._createFileTable( tableName, filePaths, 'csv', (worker, chunkedPaths) => { return worker.createCSVTable(tableName, chunkedPaths); }); } /** * Create a SQL table from Apache Parquet file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to Parquet file(s) * * @example * ```typescript * import {sqlCluster} from '@rapidsai/sql'; * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createParquetTable('test_table', ['test.parquet']); * ``` */ public async createParquetTable(tableName: string, filePaths: string[]) { await this._createFileTable( tableName, filePaths, 'parquet', (worker, chunkedPaths) => { return worker.createParquetTable(tableName, chunkedPaths); }); } /** * Create a SQL table from Apache ORC file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to ORC file(s) * * @example * ```typescript * import {sqlCluster} from '@rapidsai/sql'; * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createORCTable('test_table', ['test.orc']); * ``` */ public async createORCTable(tableName: string, filePaths: string[]) { await this._createFileTable( tableName, filePaths, 'orc', (worker, chunkedPaths) => { return worker.createORCTable(tableName, chunkedPaths); }); } private async _createFileTable( tableName: string, filePath: string[], fileType: 'csv'|'orc'|'parquet', cb: (worker: Worker, chunkedPaths: string[]) => Promise<void>, ) { // TODO: This logic needs to be reworked. We split up the files among the workers. // There is a possibility a worker does not get a file, therefore we need to give it an // empty DataFrame. const {types, names} = parseSchema(filePath, fileType); const empty = new DataFrame(names.reduce((xs: any, name: any, i: any) => ({ ...xs, [name]: Series.new({type: arrowToCUDFType(types[i]), data: []}), }), {})); const chunkedPaths: string[][] = []; for (let i = this._workers.length; i > 0; i--) { chunkedPaths.push(filePath.splice(0, Math.ceil(filePath.length / i))); } await Promise.all(this._workers.slice().reverse().map((worker, i) => { if (chunkedPaths[i].length > 0) { return cb(worker, chunkedPaths[i]); } else { ctxToken += 1; const message = `broadcast_table_message_${ctxToken}`; this.context.context.send(worker.id, ctxToken, message, empty); return worker.createDataFrameTable(tableName, message); } })); } /** * Drop a SQL table from SQLContext memory. * * @param tableName Name of the table to drop * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * await sqlCluster.dropTable('test_table'); * console.log(await sqlCluster.listTables()); * // [] * ``` */ public async dropTable(tableName: string) { await Promise.all(this._workers.map((worker) => worker.dropTable(tableName))); } /** * Query a SQL table and return the result as a DataFrame. * * @param query SQL query string * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * * for await (const df of sqlCluster.sql('SELECT a FROM test_table')) { * console.log(df.toString()); * } * // a * // 0 * // 1 * // 2 * // 3 * ``` */ public async * sql(query: string) { const algebra = await this.explain(query); if (algebra.includes('LogicalValues(tuples=[[]])')) { // SQL returns empty result. return; } const token = ctxToken++; const promises = this._workers.map((worker) => worker.sql(query, token)); while (promises.length > 0) { const {dfs, idx} = await Promise.race(promises.map((dfs, idx) => dfs.then((dfs) => ({dfs, idx})))); promises.splice(idx, 1); yield* dfs; } } /** * Returns an array with the names of all created tables. * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const df = new DataFrame({'a': a}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * console.log(await sqlCluster.listTables()); * // ['test_table'] * ``` */ public listTables() { return this.context.listTables(); } /** * Returns a map with column names as keys and the column data type as values. * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const df = new DataFrame({'a': a}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * console.log(sqlCluster.describeTable('test_table')); * // {'a': Int32} * ``` */ public describeTable(tableName: string) { return this.context.describeTable(tableName); } /** * Returns a break down of a given query's logical relational algebra plan. * * @param sql SQL query * @param detail if a physical plan should be returned instead * * @example * ```typescript * import {Series, DataFrame} from '@rapidsai/cudf'; * import {SQLCluster} from '@rapidsai/sql'; * * const a = Series.new([1, 2, 3]); * const df = new DataFrame({'a': a}); * * const sqlCluster = await SQLCluster.init(); * await sqlCluster.createTable('test_table', df); * * console.log(sqlCluster.explain('SELECT a FROM test_table')); * // BindableTableScan(table=[[main, test_table]], aliases=[[a]]) * ``` */ public explain(sql: string, detail = false) { return this.context.explain(sql, detail); } /** * Sends a `SIGTERM` signal to all spawned workers. Essentially terminates all spawned workers and * removes any references to them. * * @example * ```typescript * import {SQLCluster} from '@rapidsai/sql'; * * const sqlCluster = await SQLCluster.init(); * sqlCluster.kill(); * ``` */ public kill(): void { this._workers.forEach((w) => { w.kill(); }); this._workers.length = 0; } }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/rapidsai_sql.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {DataFrame, Table} from '@rapidsai/cudf'; import type {defaultContextConfigValues} from './config'; import {ParsedSchema} from './SQLTable'; /** @ignore */ export declare const _cpp_exports: any; export declare function getTableScanInfo(logicalPlan: string): [string[], string[]]; export declare function runGeneratePhysicalGraph( workerIds: string[], ctxToken: number, query: string): string; export declare function parseSchema(input: string[], fileType: 'csv'|'orc'|'parquet'): ParsedSchema; export type WorkerUcpInfo = { id: number; // ip: string; // port: number; // } export type ContextProps = { id: number; // port: number; // ucpContext?: UcpContext; // networkIfaceName: string; // workersUcpInfo: WorkerUcpInfo[]; configOptions: typeof defaultContextConfigValues; allocationMode: string; initialPoolSize: number | null; maximumPoolSize: number | null; enableLogging: boolean; }; export declare class Context { constructor(props: ContextProps); public readonly id: number; broadcast(ctxToken: number, df: DataFrame): string[]; pull(messageId: string): Promise<{names: string[], tables: Table[]}>; send(id: number, ctxToken: number, messageId: string, df: DataFrame): void; runGenerateGraph(dataframes: DataFrame[], schemas: Record<string, unknown>[], tableNames: string[], tableScans: string[], ctxToken: number, query: string, configOptions: Record<string, unknown>, sql: string, currentTimestamp: string): ExecutionGraph; } export declare class ExecutionGraph { constructor(); start(): void; result(): Promise<{names: string[], tables: Table[]}>; sendTo(id: number, df: DataFrame[], nonce: string): string[]; } export declare class UcpContext { constructor(); }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/SQLTable.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {arrowToCUDFType, DataFrame, DataType} from '@rapidsai/cudf'; import {parseSchema} from './addon'; export interface ParsedSchema { files: string[]; fileType: number; types: DataType[]; names: string[]; calciteToFileIndicies: number[]; hasHeaderCSV: boolean; } export interface SQLTable { tableName: string; get names(): string[]; type(columnName: string): DataType; getSource(): any; } export class FileTable implements SQLTable { public tableName: string; private schema: ParsedSchema; constructor(tableName: string, input: string[], fileType: 'csv'|'orc'|'parquet') { this.tableName = tableName; this.schema = parseSchema(input, fileType); } get names(): string[] { return this.schema.names; } getSource() { return this.schema; } type(columnName: string): DataType { const idx = this.schema.names.indexOf(columnName); return arrowToCUDFType(this.schema.types[idx]); } } export class DataFrameTable implements SQLTable { public tableName: string; private df: DataFrame; constructor(tableName: string, input: DataFrame) { this.tableName = tableName; this.df = input; } get names(): string[] { return this.df.names.concat(); } getSource() { return this.df; } type(columnName: string): DataType { return this.df.get(columnName).type; } }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/context.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import {DataFrame, DataType} from '@rapidsai/cudf'; import {UcpContext} from '.'; import { Context, ContextProps, getTableScanInfo, runGeneratePhysicalGraph, } from './addon'; import { ArrayList, BlazingSchema, CatalogColumnDataType, CatalogColumnImpl, CatalogDatabaseImpl, CatalogTableImpl, RelationalAlgebraGenerator } from './algebra'; import {defaultContextConfigValues} from './config'; import {ExecutionGraph} from './graph'; import {json_plan_py} from './json_plan'; import {DataFrameTable, FileTable, SQLTable} from './SQLTable'; export class SQLContext { public readonly context: Context; declare private _db: any; declare private _schema: any; declare private _generator: any; declare private _ucpContext?: UcpContext; declare private _tables: Map<string, SQLTable>; declare private _configOptions: typeof defaultContextConfigValues; constructor(options: Partial<ContextProps> = {}) { this._db = CatalogDatabaseImpl('main'); this._schema = BlazingSchema(this._db); this._generator = RelationalAlgebraGenerator(this._schema); this._tables = new Map<string, SQLTable>(); const { id = 0, port = 0, networkIfaceName = 'lo', workersUcpInfo = [], allocationMode = 'cuda_memory_resource', initialPoolSize = null, maximumPoolSize = null, enableLogging = false, ucpContext, } = options; this._ucpContext = ucpContext; this._configOptions = {...defaultContextConfigValues, ...options.configOptions}; this.context = new Context({ id, port, networkIfaceName, ucpContext, workersUcpInfo, configOptions: this._configOptions, allocationMode, initialPoolSize, maximumPoolSize, enableLogging }); } public get id() { return this.context.id; } /** * Create a SQL table from cudf.DataFrames. * * @param tableName Name of the table when referenced in a query * @param input cudf.DataFrame * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlContext = new SQLContext(); * sqlContext.createDataFrameTable('test_table', df); * ``` */ createDataFrameTable(tableName: string, input: DataFrame): void { this._createTable(new DataFrameTable(tableName, input)); } /** * Create a SQL table from CSV file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to CSV file(s) * * @example * ```typescript * import {SQLContext} from '@rapidsai/sql'; * * const sqlContext = new SQLContext(); * sqlContext.createCSVTable('test_table', ['test.csv']); * ``` */ createCSVTable(tableName: string, filePaths: string[]): void { this._createTable(new FileTable(tableName, filePaths, 'csv')); } /** * Create a SQL table from Apache Parquet file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to parquet file(s) * * @example * ```typescript * import {SQLContext} from '@rapidsai/sql'; * * const sqlContext = new SQLContext(); * sqlContext.createParquetTable('test_table', ['test.parquet']); * ``` */ createParquetTable(tableName: string, filePaths: string[]): void { this._createTable(new FileTable(tableName, filePaths, 'parquet')); } /** * Create a SQL table from Apache ORC file(s). * * @param tableName Name of the table when referenced in a query * @param filePaths array of paths to ORC file(s) * * @example * ```typescript * import {SQLContext} from '@rapidsai/sql'; * * const sqlContext = new SQLContext(); * sqlContext.createORCTable('test_table', ['test.orc']); * ``` */ createORCTable(tableName: string, filePaths: string[]): void { this._createTable(new FileTable(tableName, filePaths, 'orc')); } private _createTable(input: SQLTable): void { if (this._tables.has(input.tableName)) { // this._db.removeTableSync(input.tableName); } this._tables.set(input.tableName, input); const arr = ArrayList(); input.names.forEach((name: string, index: number) => { const dataType = CatalogColumnDataType.fromTypeIdSync(input.type(name).typeId); const column = CatalogColumnImpl([name, dataType, index]); arr.addSync(column); }); const tableJava = CatalogTableImpl([input.tableName, this._db, arr]); this._db.addTableSync(tableJava); this._schema = BlazingSchema(this._db); this._generator = RelationalAlgebraGenerator(this._schema); } /** * Drop a SQL table from SQLContext memory. * * @param tableName Name of the table to drop * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlContext = new SQLContext(); * sqlContext.createTable('test_table', df); * sqlContext.sql('SELECT a FROM test_table'); * sqlContext.dropTable('test_table', df); * ``` */ public dropTable(tableName: string): void { if (!this._tables.has(tableName)) { throw new Error(`Unable to find table with name ${tableName} to drop from SQLContext memory`); } this._db.removeTableSync(tableName); this._schema = BlazingSchema(this._db); this._generator = RelationalAlgebraGenerator(this._schema); this._tables.delete(tableName); } /** * Returns an array with the names of all created tables. * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const df = new DataFrame({'a': a}); * * const sqlContext = new SQLContext(); * sqlContext.createTable('test_table', df); * sqlContext.listTables(); // ['test_table'] * ``` */ public listTables(): string[] { return [...this._tables.keys()]; } /** * Returns a map with column names as keys and the column data type as values. * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const df = new DataFrame({'a': a}); * * const sqlContext = new SQLContext(); * sqlContext.createTable('test_table', df); * sqlContext.describeTable('test_table'); // {'a': Int32} * ``` */ public describeTable(tableName: string): Map<string, DataType> { const table = this._tables.get(tableName); if (table === undefined) { return new Map(); } return table.names.reduce( (m: Map<string, DataType>, name: string) => m.set(name, table.type(name)), new Map()); } /** * Query a SQL table and return the result as a DataFrame. * * @param query SQL query string * @param ctxToken an optional content token used for communicating multiple nodes * * @example * ```typescript * import {Series, DataFrame, Int32} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new({type: new Int32(), data: [1, 2, 3]}); * const b = Series.new({type: new Int32(), data: [4, 5, 6]}); * const df = new DataFrame({'a': a, 'b': b}); * * const sqlContext = new SQLContext(); * sqlContext.createTable('test_table', df); * * await sqlContext.sql('SELECT a FROM test_table'); // [1, 2, 3] * ``` */ public sql(query: string, ctxToken: number = Math.random() * Number.MAX_SAFE_INTEGER | 0) { const algebra = this.explain(query); if (algebra == '') { throw new Error('ERROR: Failed to parse given query'); } if (algebra.includes('LogicalValues(tuples=[[]])')) { // SQL returns an empty execution graph. return new ExecutionGraph(); } if (algebra.includes(') OVER (')) { console.log( 'WARNING: Window Functions are currently an experimental feature and not fully supported or tested'); } const tableScanInfo = getTableScanInfo(algebra); const tableNames = tableScanInfo[0]; const tableScans = tableScanInfo[1]; const d = new Date(); const currentTimestamp = `${d.getFullYear()}-${d.getMonth() + 1}-${d.getDate()} ${ d.getHours()}:${d.getMinutes()}:${d.getSeconds()}.${d.getMilliseconds()}000`; const selectedDataFrames: DataFrame[] = []; const selectedSchemas: Record<string, unknown>[] = []; tableNames.forEach((tableName: string) => { const table = this._tables.get(tableName); if (table !== undefined) { if (table instanceof DataFrameTable) { selectedDataFrames.push(table.getSource()); } else { selectedSchemas.push(table.getSource()); } } }); return new ExecutionGraph(this.context.runGenerateGraph(selectedDataFrames, selectedSchemas, tableNames, tableScans, ctxToken, json_plan_py(algebra), this._configOptions, query, currentTimestamp)); } /** * Returns a break down of a given query's logical relational algebra plan. * * @param sql SQL query * @param detail if a physical plan should be returned instead * * @example * ```typescript * import {Series, DataFrame} from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new([1, 2, 3]); * const df = new DataFrame({'a': a}); * * const sqlContext = new SQLContext(); * sqlContext.createTable('test_table', df); * * sqlContext.explain('SELECT a FROM test_table'); // BindableTableScan(table=[[main, * test_table]], aliases=[[a]]) * ``` */ public explain(sql: string, detail = false): string { let algebra = ''; try { algebra = this._generator.getRelationalAlgebraStringSync(sql); if (detail == true) { const ctxToken = Math.random() * Number.MAX_SAFE_INTEGER; algebra = json_plan_py(runGeneratePhysicalGraph(['self'], ctxToken, json_plan_py(algebra)), 'True'); } } catch (ex: any) { throw new Error(ex.cause.getMessageSync()); } return String(algebra); } /** * Sends a DataFrame to the cache machine. * * @param id The id of the destination SQLContext * @param ctxToken The token associated with the messageId * @param messageId The id used to pull the table on the destination SQLContext * * @example * ```typescript * import {Series, DataFrame from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new([1, 2, 3]); * const df = new DataFrame({'a': a}); * * const sqlContext = new SQLContext(); * sqlContext.send(0, 0, "message_1", df); * ``` */ public send(id: number, ctxToken: number, messageId: string, df: DataFrame) { this.context.send(id, ctxToken, messageId, df); } /** * Returns a DataFrame pulled from the cache machine. * * @param messageId The message id given when initially sending the DataFrame to the cache * * @example * ```typescript * import {Series, DataFrame from '@rapidsai/cudf'; * import {SQLContext} from '@rapidsai/sql'; * * const a = Series.new([1, 2, 3]); * const df = new DataFrame({'a': a}); * * const sqlContext = new SQLContext(); * sqlContext.send(0, 0, "message_1", df); * await sqlContext.pull("message_1"); // [1, 2, 3] * ``` */ async pull(messageId: string) { const {names, tables: [table]} = await this.context.pull(messageId); return new DataFrame( names.reduce((cols, name, i) => ({...cols, [name]: table.getColumnByIndex(i)}), {})); } }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/algebra.ts
// Copyright (c) 2021, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. const java = require('java'); import * as Fs from 'fs'; import * as Path from 'path'; const NODE_DEBUG = ((<any>process.env).NODE_DEBUG || (<any>process.env).NODE_ENV === 'debug'); let moduleBasePath = Path.dirname(module.id); if (Path.basename(moduleBasePath) == 'src') { moduleBasePath = Path.dirname(moduleBasePath); moduleBasePath = Path.join(moduleBasePath, 'build', 'js'); } if (NODE_DEBUG && Fs.existsSync(Path.join(moduleBasePath, '..', 'Debug'))) { java.classpath.push(Path.join(moduleBasePath, '..', 'Debug', 'blazingsql-algebra.jar')); java.classpath.push(Path.join(moduleBasePath, '..', 'Debug', 'blazingsql-algebra-core.jar')); } else { java.classpath.push(Path.join(moduleBasePath, '..', 'Release', 'blazingsql-algebra.jar')); java.classpath.push(Path.join(moduleBasePath, '..', 'Release', 'blazingsql-algebra-core.jar')); } export function ArrayList() { return java.newInstanceSync('java.util.ArrayList'); } export const CatalogColumnDataType = java.import('com.blazingdb.calcite.catalog.domain.CatalogColumnDataType'); export function CatalogColumnImpl(args: any[]) { return java.newInstanceSync('com.blazingdb.calcite.catalog.domain.CatalogColumnImpl', ...args); } export function CatalogTableImpl(args: any[]) { return java.newInstanceSync('com.blazingdb.calcite.catalog.domain.CatalogTableImpl', ...args); } export function CatalogDatabaseImpl(name: string) { return java.newInstanceSync('com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl', name); } export function BlazingSchema(db: any) { return java.newInstanceSync('com.blazingdb.calcite.schema.BlazingSchema', db); } export function RelationalAlgebraGenerator(schema: any) { return java.newInstanceSync('com.blazingdb.calcite.application.RelationalAlgebraGenerator', schema); }
0
rapidsai_public_repos/node/modules/sql
rapidsai_public_repos/node/modules/sql/src/config.ts
// Copyright (c) 2021-2022, NVIDIA CORPORATION. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. export const defaultContextConfigValues = { PROTOCOL: 'AUTO', JOIN_PARTITION_SIZE_THRESHOLD: 400000000, CONCATENATING_CACHE_NUM_BYTES_TIMEOUT: 100, MAX_JOIN_SCATTER_MEM_OVERHEAD: 500000000, MAX_NUM_ORDER_BY_PARTITIONS_PER_NODE: 8, NUM_BYTES_PER_ORDER_BY_PARTITION: 400000000, MAX_DATA_LOAD_CONCAT_CACHE_BYTE_SIZE: 400000000, FLOW_CONTROL_BYTES_THRESHOLD: // eslint-disable-next-line @typescript-eslint/no-loss-of-precision 18446744073709551615, // https://en.cppreference.com/w/cpp/types/numeric_limits/max MAX_ORDER_BY_SAMPLES_PER_NODE: 10000, BLAZING_PROCESSING_DEVICE_MEM_CONSUMPTION_THRESHOLD: 0.9, BLAZING_DEVICE_MEM_CONSUMPTION_THRESHOLD: 0.6, BLAZ_HOST_MEM_CONSUMPTION_THRESHOLD: 0.75, BLAZING_LOGGING_DIRECTORY: 'blazing_log', BLAZING_CACHE_DIRECTORY: '/tmp/', BLAZING_LOCAL_LOGGING_DIRECTORY: 'blazing_log', MEMORY_MONITOR_PERIOD: 50, MAX_KERNEL_RUN_THREADS: 16, EXECUTOR_THREADS: 10, MAX_SEND_MESSAGE_THREADS: 20, LOGGING_LEVEL: 'trace', LOGGING_FLUSH_LEVEL: 'warn', ENABLE_GENERAL_ENGINE_LOGS: false, ENABLE_COMMS_LOGS: false, ENABLE_TASK_LOGS: false, ENABLE_OTHER_ENGINE_LOGS: false, LOGGING_MAX_SIZE_PER_FILE: 1073741824, // 1 GB TRANSPORT_BUFFER_BYTE_SIZE: 1048576, // 1 MB in bytes TRANSPORT_POOL_NUM_BUFFERS: 1000, REQUIRE_ACKNOWLEDGE: false, }; export const defaultClusterConfigValues = { ...defaultContextConfigValues, PROTOCOL: 'TCP' };
0