repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-configure-conda-channels
|
#!/bin/bash
# A utility script that configures conda channels
# Remove nightly channels if build is a release build
if rapids-is-release-build; then
conda config --system --remove channels rapidsai-nightly
conda config --system --remove channels dask/label/dev
fi
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/gpuci_conda_retry
|
#!/bin/bash
#
# gpuci_conda_retry
#
# Compatibility wrapper that calls to the new rapids-conda-retry script
# Configurable env vars GPUCI_* are re-exported as RAPIDS_*
rapids-echo-stderr "'gpuci_conda_retry' is deprecated, please use 'rapids-conda-retry' instead"
# alias old GPUCI_ env vars to new RAPIDS_ equivalents if they are set
if [[ -v GPUCI_CONDA_RETRY_MAX ]]; then
export RAPIDS_CONDA_RETRY_MAX="${GPUCI_CONDA_RETRY_MAX}"
fi
if [[ -v GPUCI_CONDA_RETRY_SLEEP ]]; then
export RAPIDS_CONDA_RETRY_SLEEP="${GPUCI_CONDA_RETRY_SLEEP}"
fi
rapids-conda-retry "$@"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-get-artifact
|
#!/bin/bash
# Downloads an artifact, extracts it to a directory, and
# echoes the resulting directory's path
# Positional Arguments:
# 1) path to an artifact (e.g. ci/cudf/pull-request/12602/aa4da21/cudf_conda_python_cuda11_38_x86_64.tar.gz)
# Example Usage:
# rapids-get-artifact ci/cudf/pull-request/12602/aa4da21/cudf_conda_python_cuda11_38_x86_64.tar.gz
set -euo pipefail
source rapids-constants
s3_dl_path="s3://${RAPIDS_DOWNLOADS_BUCKET}/${1}"
untar_dest=$(mktemp -d)
_rapids-download-from-s3 "$s3_dl_path" "$untar_dest"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-check-pr-job-dependencies
|
#!/bin/bash
# Checks whether a particular GitHub workflow job depends on all of the
# other jobs in the workflow.
#
# This is necessary since the RAPIDS branch protections are configured to require
# the "pr-builder" job to pass for all PRs. It's implied that that job depends
# on all other jobs in the workflow.
set -euo pipefail
export WORKFLOW_FILE=${WORKFLOW_FILE:-".github/workflows/pr.yaml"}
export PR_BUILDER_JOB_NAME=${PR_BUILDER_JOB_NAME:-"pr-builder"}
WORKFLOW_JOBS=$(yq '((.jobs | keys | sort) - [env(PR_BUILDER_JOB_NAME)]) | join(" ")' "${WORKFLOW_FILE}")
PR_BUILDER_JOB_NEEDS=$(yq '(.jobs.[env(PR_BUILDER_JOB_NAME)].needs | sort) | join(" ")' "${WORKFLOW_FILE}")
if [ "${WORKFLOW_JOBS}" != "${PR_BUILDER_JOB_NEEDS}" ]; then
echo "'${PR_BUILDER_JOB_NAME}' is missing a dependency."
echo "Update '${WORKFLOW_FILE}' to include all other jobs for '${PR_BUILDER_JOB_NAME}'"
echo ""
echo "Workflow jobs: ${WORKFLOW_JOBS}"
echo "'${PR_BUILDER_JOB_NAME}' job dependencies: ${PR_BUILDER_JOB_NEEDS}"
exit 1
fi
echo "${PR_BUILDER_JOB_NAME} depends on all other jobs."
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-conda-retry
|
#!/bin/bash
#
# rapids-conda-retry
#
# wrapper for conda that retries the command after a CondaHTTPError,
# ChecksumMismatchError, or JSONDecodeError (ideally, any conda error that
# is normally resolved by retrying)
#
# This must be set in order for the script to recognize failing exit codes when
# output is piped to tee
#
# Example usage:
# $ rapids-conda-retry install cudatoolkit=11.0 rapids=0.16
#
# Configurable options are set using the following env vars:
#
# RAPIDS_CONDA_EXE - override the conda executable
# Default is "conda"
#
# RAPIDS_CONDA_RETRY_MAX - set to a positive integer to set the max number of retry
# attempts (attempts after the initial try).
# Default is 3 retries
#
# RAPIDS_CONDA_RETRY_SLEEP - set to a positive integer to set the duration, in
# seconds, to wait between retries.
# Default is a 10 second sleep
#
set -o pipefail
export RAPIDS_SCRIPT_NAME="rapids-conda-retry"
condaretry_help="
rapids-conda-retry options:
--condaretry_max_retries=n Retry the conda command at most n times (default is 3)
--condaretry_sleep_interval=n Sleep n seconds between retries (default is 5)
ALSO rapids-conda-retry options can be set using the following env vars:
RAPIDS_CONDA_RETRY_MAX - set to a positive integer to set the max number of retry
attempts (attempts after the initial try).
Default is 3 retries
RAPIDS_CONDA_RETRY_SLEEP - set to a positive integer to set the duration, in
seconds, to wait between retries.
Default is a 10 second sleep
==========
"
max_retries=${RAPIDS_CONDA_RETRY_MAX:=3}
sleep_interval=${RAPIDS_CONDA_RETRY_SLEEP:=10}
exitcode=0
needToRetry=0
retries=0
args=""
# Temporarily set this to something else (eg. a script called "testConda" that
# prints "CondaHTTPError:" and exits with 1) for testing this script.
#condaCmd=./testConda
condaCmd=${RAPIDS_CONDA_EXE:=conda}
# Function to run conda and check output for specific retryable errors
# input variables:
# condaCmd: the command used for running conda, which accepts the args
# passed to this script
# outfile: file to tee output to for checking, likely a temp file
# output variables:
# exitcode: the exit code from running ${condaCmd} ${args}
# needToRetry: 1 if the command should be retried, 0 if it should not be
function runConda {
# shellcheck disable=SC2086
${condaCmd} ${args} 2>&1| tee "${outfile}"
exitcode=$?
needToRetry=0
needToClean=0
retryingMsg=""
if (( exitcode != 0 )); then
# Show exit code
rapids-echo-stderr "conda returned exit code: ${exitcode}"
if grep -q CondaHTTPError: "${outfile}"; then
retryingMsg="Retrying, found 'CondaHTTPError:' in output..."
needToRetry=1
elif grep -q ChecksumMismatchError: "${outfile}"; then
retryingMsg="Retrying, found 'ChecksumMismatchError:' in output..."
needToRetry=1
elif grep -q JSONDecodeError: "${outfile}"; then
retryingMsg="Retrying, found 'JSONDecodeError:' in output..."
needToRetry=1
elif grep -q ChunkedEncodingError: "${outfile}"; then
retryingMsg="Retrying, found 'ChunkedEncodingError:' in output..."
needToRetry=1
elif grep -q CondaMultiError: "${outfile}"; then
retryingMsg="Retrying after cleaning tarball cache, found 'CondaMultiError:' in output..."
needToRetry=1
needToClean=1
elif grep -q EOFError: "${outfile}"; then
retryingMsg="Retrying, found 'EOFError:' in output..."
needToRetry=1
elif grep -q ConnectionError: "${outfile}"; then
retryingMsg="Retrying, found 'ConnectionError:' in output..."
needToRetry=1
elif grep -q "Multi-download failed" "${outfile}"; then
retryingMsg="Retrying, found 'Multi-download failed' in output..."
needToRetry=1
elif grep -q "Timeout was reached" "${outfile}"; then
retryingMsg="Retrying, found 'Timeout was reached' in output..."
needToRetry=1
elif [[ $exitcode -eq 139 ]]; then
retryingMsg="Retrying, command resulted in a segfault. This may be an intermittent failure..."
needToRetry=1
needToClean=1
else
rapids-echo-stderr "Exiting, no retryable ${RAPIDS_CONDA_EXE} errors detected: \
'ChecksumMismatchError:', \
'ChunkedEncodingError:', \
'CondaHTTPError:', \
'CondaMultiError:', \
'ConnectionError:', \
'EOFError:', \
'JSONDecodeError:', \
'Multi-download failed', \
'Timeout was reached', \
segfault exit code 139"
fi
if (( needToRetry == 1 )) && \
(( retries >= max_retries )); then
# Catch instance where we run out of retries
rapids-echo-stderr "Exiting, reached max retries..."
else
# Give reason for retry
rapids-echo-stderr "${retryingMsg}"
if (( needToClean == 1 )); then
rapids-echo-stderr "Cleaning tarball cache before retrying..."
${condaCmd} clean --tarballs -y
fi
fi
fi
}
# Process and remove args recognized only by this script, save others for conda
# Process help separately
for arg in "$@"; do
opt=${arg%%=*}
val=${arg##*=}
if [[ ${opt} == "--help" ]] || [[ ${opt} == "-h" ]]; then
echo "${condaretry_help}"
${condaCmd} --help
exit $?
elif [[ ${opt} == "--condaretry_max_retries" ]]; then
max_retries=${val}
elif [[ ${opt} == "--condaretry_sleep_interval" ]]; then
sleep_interval=${val}
else
args="${args} ${arg}"
fi
done
# Run command
outfile=$(mktemp)
# shellcheck disable=SC2086
runConda ${args}
# Retry loop, only if needed
while (( needToRetry == 1 )) && \
(( retries < max_retries )); do
retries=$(( retries + 1 ))
rapids-echo-stderr "Waiting, retry ${retries} of ${max_retries} -> sleeping for ${sleep_interval} seconds..."
sleep "${sleep_interval}"
rapids-echo-stderr "Starting, retry ${retries} of ${max_retries} -> sleep done..."
# shellcheck disable=SC2086
runConda ${args}
done
rm -f "${outfile}"
exit ${exitcode}
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-configure-sccache
|
#!/bin/bash
# A utility script that configures sccache environment variables
export CMAKE_CUDA_COMPILER_LAUNCHER=sccache
export CMAKE_CXX_COMPILER_LAUNCHER=sccache
export CMAKE_C_COMPILER_LAUNCHER=sccache
export SCCACHE_BUCKET=rapids-sccache-east
export SCCACHE_REGION=us-east-2
export SCCACHE_IDLE_TIMEOUT=32768
export SCCACHE_S3_USE_SSL=true
export SCCACHE_S3_NO_CREDENTIALS=false
if [ "${CI:-false}" = "false" ]; then
# Configure sccache for read-only mode since no credentials
# are available in local builds.
export SCCACHE_S3_NO_CREDENTIALS=true
export PARALLEL_LEVEL=${PARALLEL_LEVEL:-$(nproc)}
fi
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-download-from-s3
|
#!/bin/bash
# A utility script that downloads an artifact archive from S3, untars it,
# and prints the location where it was untarred.
# Positional Arguments:
# 1) package name to generate s3 path for
# 2) location to untar it to
set -euo pipefail
export RAPIDS_SCRIPT_NAME="rapids-download-from-s3"
if [ -z "$1" ] || [ -z "$2" ]; then
rapids-echo-stderr "Must specify input arguments: PKG_NAME and UNTAR_DEST"
exit 1
fi
s3_dl_path="$(rapids-s3-path)$1"
untar_dest="$2"
_rapids-download-from-s3 "$s3_dl_path" "$untar_dest"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-upload-artifacts-dir
|
#!/bin/bash
# A utility that uploads individual files from $RAPIDS_ARTIFACTS_DIR to S3
set -euo pipefail
source rapids-constants
pkg_prefix="$1"
if [ "${CI:-false}" = "false" ]; then
rapids-echo-stderr "Artifacts from local builds are not uploaded to S3."
exit 0
fi
if [ ! -d "${RAPIDS_ARTIFACTS_DIR}" ]; then
rapids-echo-stderr "Artifacts directory ${RAPIDS_ARTIFACTS_DIR} not found."
exit 0
fi
if [ "$(ls -A "$RAPIDS_ARTIFACTS_DIR")" ]; then
echo "Uploading additional artifacts"
echo ""
for art_file in "${RAPIDS_ARTIFACTS_DIR}"/* ; do
[ -e "$art_file" ] || continue
upload_name=$(basename "${art_file}")
pkg_name="${pkg_prefix}.${upload_name}"
rapids-upload-to-s3 "${pkg_name}" "${art_file}"
done
else
echo "No additional artifacts found."
fi
echo ""
ARTIFACTS_URL=$(rapids-s3-path | sed "s|s3://${RAPIDS_DOWNLOADS_BUCKET}|https://${RAPIDS_DOWNLOADS_DOMAIN}|")
echo "Browse all uploads (NVIDIA Employee VPN Required): ${ARTIFACTS_URL}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-retry
|
#!/bin/bash
#
# rapids-retry
#
# retries a command 3 times after a non-zero exit, waiting 10 seconds
# between attempts. 3 times and 10 seconds are default values which can be
# configured with env vars described below.
#
# NOTE: source this file to update your bash environment with the settings
# below. Keep in mind that the calling environment will be modified, so do not
# set or change the environment here unless the caller expects that. Also
# remember that "exit" will exit the calling shell! Consider rewriting this
# as a callable script if the functionality below needs to make changes to its
# environment as a side-effect.
#
# Example usage:
# $ rapids-retry conda install cudatoolkit=10.0 rapids=0.12
#
# Configurable options are set using the following env vars:
#
# RAPIDS_RETRY_MAX - set to a positive integer to set the max number of retry
# attempts (attempts after the initial try).
# Default is 3 retries
#
# RAPIDS_RETRY_SLEEP - set to a positive integer to set the duration, in
# seconds, to wait between retries.
# Default is a 10 second sleep
#
function rapids-retry {
command=$1
shift
max_retries=${RAPIDS_RETRY_MAX:=3}
retries=0
sleep_interval=${RAPIDS_RETRY_SLEEP:=10}
${command} "$@"
retcode=$?
while (( retcode != 0 )) && \
(( retries < max_retries )); do
((retries++))
rapids-logger "rapids-retry: retry ${retries} of ${max_retries} | exit code: (${retcode}) -> sleeping for ${sleep_interval} seconds..."
sleep "${sleep_interval}"
rapids-logger "rapids-retry: sleep done -> retrying..."
${command} "$@"
retcode=$?
done
return ${retcode}
}
rapids-retry "$@"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-download-docker-from-s3
|
#!/bin/bash
# A utility script that downloads a docker image from S3
# Positional Arguments:
# 1) image tag
set -eo pipefail
source rapids-constants
export RAPIDS_SCRIPT_NAME="rapids-download-docker-from-s3"
tmp_dir="$(mktemp -d)"
if [ -z "$1" ]; then
rapids-echo-stderr "Must specify input argument: IMAGE_TAG"
exit 1
fi
docker_image="$1"
# replace "/..." with "_..." to use as file name
docker_image_no_slash="${docker_image//\//_}"
docker_image_s3_name="docker_${docker_image_no_slash}.tar.gz"
tmp_fname="${tmp_dir}/${docker_image_no_slash}.tar.gz"
# download .tar.gz into tmpdir
s3_dl_path="$(rapids-s3-path)${docker_image_s3_name}"
aws s3 cp --only-show-errors "${s3_dl_path}" "${tmp_fname}"
# load into docker
docker load < "${tmp_fname}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-docker-multiarch-from-s3
|
#!/bin/bash
# A utility script that downloads all docker images from S3
# and assembles them into multiarch tags
# Optional Positional Arguments:
# 1) latest tag to match
# 2) latest tag alias to apply if tag matches latest tag
set -eo pipefail
set -x
source rapids-constants
export RAPIDS_SCRIPT_NAME="rapids-docker-multiarch-from-s3"
latest_tag="$1:-"
latest_tag_alias="$2:-"
DOCKER_TMP_DIR="$(mktemp -d)"
DOCKER_STARTS_WITH="docker_"
DOCKER_ENDS_WITH=".tar.gz"
S3_PATH=$(rapids-s3-path)
BUCKET_PREFIX=${S3_PATH/s3:\/\/${RAPIDS_DOWNLOADS_BUCKET}\//} # removes s3://rapids-downloads/ from s3://rapids-downloads/ci/rmm/...
# shellcheck disable=SC2016
DOCKER_TARBALLS=$(
set -eo pipefail;
aws \
--output json \
s3api list-objects \
--bucket "${RAPIDS_DOWNLOADS_BUCKET}" \
--prefix "${BUCKET_PREFIX}" \
--page-size 100 \
--query "Contents[?contains(Key, '${DOCKER_STARTS_WITH}') && ends_with(Key, '${DOCKER_ENDS_WITH}')].Key" \
| jq -c
)
export DOCKER_TARBALLS
# create an associative array (i.e. dict) of multiarch image to per-arch image
declare -A MULTIARCH_IMAGES
# download and load them all
for OBJ in $(jq -nr 'env.DOCKER_TARBALLS | fromjson | .[]'); do
FILENAME=$(basename "${OBJ}")
S3_URI="${S3_PATH}${FILENAME}"
rapids-echo-stderr "Downloading ${S3_URI} into ${DOCKER_TMP_DIR}"
aws s3 cp --only-show-errors "${S3_URI}" "${DOCKER_TMP_DIR}"/ 1>&2
rapids-echo-stderr "Loading into docker"
loaded_image=$(docker load < "${DOCKER_TMP_DIR}/${FILENAME}")
loaded_image="${loaded_image/"Loaded image: "/}"
# delete the tarball to save space
rm "${DOCKER_TMP_DIR}/${FILENAME}" 1>&2
# strip -$(uname -m) or amd64 or arm64
loaded_image_no_arch="${loaded_image/"-x86_64"/}"
loaded_image_no_arch="${loaded_image_no_arch/"-aarch64"/}"
loaded_image_no_arch="${loaded_image_no_arch/"-amd64"/}"
loaded_image_no_arch="${loaded_image_no_arch/"-arm64"/}"
# no-arch tag is the final multiarch tag
multiarch_tag="${loaded_image_no_arch}"
# store per-arch image in the associative array by multiarch tag
MULTIARCH_IMAGES["${multiarch_tag}"]+=" ${loaded_image}"
done
manifests_to_copy=()
for key in "${!MULTIARCH_IMAGES[@]}"; do
values="${MULTIARCH_IMAGES[$key]}"
rapids-echo-stderr "Preparing multiarch manifest for: ${key} with per-arch images: ${values}"
manifest_args=""
for value in ${values}; do
# use the local registry set up by the `service` block in GHA
local_name="localhost:5000/${value}"
docker tag "${value}" "${local_name}" 1>&2
docker push "${local_name}" 1>&2
manifest_args+="--amend ${local_name} "
done
local_manifest="localhost:5000/${key}"
# shellcheck disable=SC2086
docker manifest create --insecure "${local_manifest}" ${manifest_args} 1>&2
docker manifest push "${local_manifest}" 1>&2
manifests_to_copy+=("${key}")
# if latest tags have been supplied this image matches the latest, give it the additional latest tag
if [ -n "${latest_tag}" ] && [ -n "${latest_tag_alias}" ] && [ "${key}" == "${latest_tag}" ]; then
local_latest="localhost:5000/${latest_tag_alias}"
# shellcheck disable=SC2086
docker manifest create --insecure "${local_latest}" ${manifest_args} 1>&2
docker manifest push "${local_latest}" 1>&2
manifests_to_copy+=("${latest_tag_alias}")
fi
done
echo -n "${manifests_to_copy[@]}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-upload-to-anaconda
|
#!/bin/bash
# A utility script that uploads all the conda packages from a
# GitHub Actions workflow run to Anaconda.org
set -euo pipefail
source rapids-constants
export RAPIDS_SCRIPT_NAME="rapids-upload-to-anaconda"
case "${RAPIDS_BUILD_TYPE}" in
branch)
;&
nightly)
;;
*)
rapids-echo-stderr "Only branch builds and nightly builds are uploaded to Anaconda.org"
exit 1
;;
esac
S3_PATH=$(rapids-s3-path)
BUCKET_PREFIX=${S3_PATH/s3:\/\/${RAPIDS_DOWNLOADS_BUCKET}\//} # removes s3://rapids-downloads/ from s3://rapids-downloads/ci/rmm/...
# shellcheck disable=SC2016
CONDA_ARTIFACTS=$(
set -eo pipefail;
aws \
--output json \
s3api list-objects \
--bucket "${RAPIDS_DOWNLOADS_BUCKET}" \
--prefix "${BUCKET_PREFIX}" \
--page-size 100 \
--query 'Contents[?contains(Key, `conda`)].Key' \
| jq -c
)
export CONDA_ARTIFACTS
for OBJ in $(jq -nr 'env.CONDA_ARTIFACTS | fromjson | .[]'); do
FILENAME=$(basename "${OBJ}")
FILENAME_NO_EXT="${FILENAME%%.*}"
S3_URI="${S3_PATH}${FILENAME}"
UNTAR_DEST="${FILENAME_NO_EXT}"
rapids-echo-stderr "Untarring ${S3_URI} into ${UNTAR_DEST}"
mkdir -p "${UNTAR_DEST}"
aws s3 cp --only-show-errors "${S3_URI}" - | tar xzf - -C "${UNTAR_DEST}"
PKGS_TO_UPLOAD=$(rapids-find-anaconda-uploads.py "${UNTAR_DEST}")
if [ -z "${PKGS_TO_UPLOAD}" ]; then
rapids-echo-stderr "Couldn't find any packages to upload in: ${UNTAR_DEST}"
ls -l "${UNTAR_DEST}/"*
continue
fi
rapids-echo-stderr "Uploading packages to Anaconda.org: ${PKGS_TO_UPLOAD}"
export RAPIDS_RETRY_SLEEP=180
# shellcheck disable=SC2086
rapids-retry anaconda \
-t "${RAPIDS_CONDA_TOKEN}" \
upload \
--label "${RAPIDS_CONDA_UPLOAD_LABEL:-main}" \
--skip-existing \
--no-progress \
${PKGS_TO_UPLOAD}
echo ""
done
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-size-checker
|
#!/bin/bash
set -eu
export RAPIDS_SCRIPT_NAME="rapids-size-checker"
echo "Retrieving base branch from GitHub API:"
# For PRs, the branch name is like:
# pull-request/989
pr_num="${GITHUB_REF_NAME##*/}"
curl_headers=('-H' "Authorization: token ${RAPIDS_GH_TOKEN}")
resp=$(
curl \
-H "Accept: application/vnd.github.v3+json" \
"${curl_headers[@]}" \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/pulls/${pr_num}"
)
base_branch=$(echo "${resp}" | jq -r '.base.ref')
diff_files=$(mktemp)
large_files=$(mktemp)
trap 'rm -f ${diff_files} ${large_files}' EXIT
filesize_limit=5242880
retval=0
# Get list of files changed in current PR
git fetch origin
git diff --name-only origin/"${base_branch}"..HEAD > "${diff_files}"
echo "### Comparing ${base_branch} to HEAD:"
echo '### Files modified in current PR'
while read -r file_name; do
echo "Size check ${file_name}"
if [ -f "${file_name}" ]; then
if [ "$(du -b "${file_name}" | awk '{print $1}')" -gt "${filesize_limit}" ]; then
retval=1
echo "${file_name}" >> "${large_files}"
fi
fi
done < "${diff_files}"
if [ "${retval}" == 1 ]; then
echo "### Files exceeding the ${filesize_limit} size limit. Please see documentation for"
echo "### large file handling: https://docs.rapids.ai/resources/git/#large-files-and-git"
cat "${large_files}"
echo "###"
else
echo "### All files under the ${filesize_limit} size limit"
fi
exit $retval
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-upload-conda-to-s3
|
#!/bin/bash
# A utility script that tars up $RAPIDS_CONDA_BLD_OUTPUT_DIR and uploads it to S3
# Positional Arguments:
# 1) a string of "cpp" or "python" which determines which conda artifact
# should be uploaded
set -euo pipefail
pkg_type="$1"
case "${pkg_type}" in
cpp)
;&
python)
;;
*)
echo 'Pass "cpp" or "python" as an argument.'
exit 1
;;
esac
if [ "${CI:-false}" = "false" ]; then
rapids-echo-stderr "Packages from local builds cannot be uploaded to S3."
rapids-echo-stderr "Open a PR to have successful builds uploaded."
exit 0
fi
# Prepend `conda_` to PKG_TYPE
pkg_type="conda_$pkg_type"
pkg_name="$(rapids-package-name "$pkg_type")"
# Where conda build artifacts are output
path_to_tar_up="${RAPIDS_CONDA_BLD_OUTPUT_DIR}"
rapids-upload-to-s3 "${pkg_name}" "${path_to_tar_up}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-extract-conda-files
|
#!/bin/bash
# A utility script that extracts all conda packages
# after being downloaded by rapids-download-conda-from-s3
set -eo pipefail
export RAPIDS_SCRIPT_NAME="rapids-extract-conda-files"
if [ -z "$1" ]; then
rapids-echo-stderr "Must specify input argument: TARBALL_DIR"
exit 1
fi
# dir that the extracted tarball are in
tarball_dir="$1"
{
untar_dest=$(mktemp -d)
mkdir -p "${untar_dest}"
cd "${untar_dest}"
find "${tarball_dir}" -name "*tar.bz2" -type f -print0 | xargs -0 -n 1 tar -v -xf
} >&2
echo -n "${untar_dest}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-package-name
|
#!/bin/bash
# A utility script that generates a package name from a package type
# Positional Arguments:
# 1) package type
set -euo pipefail
export RAPIDS_SCRIPT_NAME="rapids-package-name"
repo_name="${RAPIDS_REPOSITORY##*/}"
if [ -z "$1" ]; then
rapids-echo-stderr "Must specify input arguments: PKG_TYPE"
exit 1
fi
pkg_type="$1"
append_cuda=0
append_pyver=0
append_wheelname=0
case "${pkg_type}" in
conda_cpp)
append_cuda=1
;;
conda_python)
append_cuda=1
append_pyver=1
;;
wheel_python)
append_pyver=1
append_wheelname=1
;;
*)
rapids-echo-stderr "Nonstandard package type '${pkg_type}'"
exit 1
;;
esac
pkg_name="${pkg_type}"
# for conda package types, append CUDA version
if (( append_cuda == 1 )); then
pkg_name+="_cuda${RAPIDS_CUDA_VERSION%%.*}"
fi
# for wheels, add the python wheel name if env var is set
if (( append_wheelname )) && [[ -v RAPIDS_PY_WHEEL_NAME ]] && [[ "${RAPIDS_PY_WHEEL_NAME}" != "" ]]; then
pkg_name+="_${RAPIDS_PY_WHEEL_NAME}"
fi
# for python package types, add pyver
if (( append_pyver == 1 )); then
pkg_name+="_${RAPIDS_PY_VERSION//./}"
fi
# for cpp and python package types, always append arch
if [[ -v RAPIDS_ARCH ]] && [[ "${RAPIDS_ARCH}" != "" ]]; then
# use arch override if specified
pkg_name+="_${RAPIDS_ARCH}"
else
# otherwise use architecture of the host that's running the upload command
pkg_name+="_$(arch)"
fi
# for cpp and python package types, its a tarball, append .tar.gz and prepend project name
pkg_name="${repo_name}_${pkg_name}.tar.gz"
echo -n "${pkg_name}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-wheel-ctk-name-gen
|
#!/bin/bash
# A utility script that generates CUDA suffix in the format "cu${VER}" where `$VER`
# is the CUDA major version (for example, "cu11").
# Positional Arguments:
# 1) ctk tag
set -eu -o pipefail
if [ -z "$1" ]; then
rapids-echo-stderr "Must specify input argument: CTK_TAG"
exit 1
fi
ctk_tag="$1"
ctk_major=$(echo "$ctk_tag" | cut -d'.' -f1)
cuda_suffix="cu${ctk_major}"
echo -n "${cuda_suffix}"
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-upload-docs
|
#!/bin/bash
# This script uploads RAPIDS docs to S3.
# The docs are expected to be in the following directory structure:
# $RAPIDS_DOCS_DIR
# βββ cudf
# β βββ html
# β β βββ <html files>
# β βββ txt
# β βββ <txt files>
# βββ dask-cudf
# βββ html
# β βββ <html files>
# βββ txt
# βββ <txt files>
# Required Environment Variables:
# - RAPIDS_DOCS_DIR - a path to a directory containing the docs to upload
# - RAPIDS_VERSION_NUMBER - the version number of the docs being uploaded
set -euo pipefail
source rapids-constants
checks() {
if [[ ! -d "${RAPIDS_DOCS_DIR}" ]]; then
echo "ERROR: RAPIDS_DOCS_DIR must be a directory."
exit 1
fi
if [[ "${GITHUB_ACTIONS:-false}" != "true" ]]; then
echo "Uploading docs from local builds is not supported."
echo "The docs are in ${RAPIDS_DOCS_DIR}."
echo "They can be viewed in a web browser by running:"
echo "python -m http.server --directory ${RAPIDS_DOCS_DIR}"
exit 0
fi
}
get_s3_dest() {
local PROJECT=$1
local FORMAT=$2
case "${RAPIDS_BUILD_TYPE}" in
pull-request)
echo -n "$(rapids-s3-path)docs/${PROJECT}/${FORMAT}"
return
;;
branch|nightly)
echo -n "s3://rapidsai-docs/${PROJECT}/${FORMAT}/${RAPIDS_VERSION_NUMBER}"
return
;;
*)
rapids-echo-stderr "please pass a valid RAPIDS_BUILD_TYPE"
exit 1
;;
esac
}
copy_docs_to_s3() {
local PROJECT_DIR PROJECT PROJECT_FORMAT_DIR FORMAT
if [[ "${RAPIDS_BUILD_TYPE}" == "pull-request" ]]; then
{
echo '# Documentation Preview:'
echo ''
echo '**Note:** NVIDIA VPN access is required to view these URLs.'
echo ''
} > "${GITHUB_STEP_SUMMARY}"
fi
for PROJECT_DIR in "${RAPIDS_DOCS_DIR}"/*; do
PROJECT=$(basename "${PROJECT_DIR}")
for PROJECT_FORMAT_DIR in "${PROJECT_DIR}"/*; do
FORMAT=$(basename "${PROJECT_FORMAT_DIR}")
if [[ ! "${FORMAT}" =~ ^(html|txt|xml_tar)$ ]]; then
echo "ERROR: FORMAT must be either 'html' or 'txt' or 'xml_tar'."
exit 1
fi
if [[ "${FORMAT}" == "xml_tar" ]]; then
NUM_FILES=$(find "$PROJECT_FORMAT_DIR" -type f | wc -l)
if [[ ! -f "${PROJECT_FORMAT_DIR}/xml.tar.gz" || $NUM_FILES -ne 1 ]]; then
echo "Error: The xml_tar directory should contain a single file named xml.tar.gz."
exit 1
fi
fi
rapids-logger "Uploading ${RAPIDS_VERSION_NUMBER} ${PROJECT} ${FORMAT} docs to S3."
ACL_OPTION="private"
if [[ "$FORMAT" == "xml_tar" ]]; then
ACL_OPTION="public-read"
fi
aws s3 sync \
--no-progress \
--delete \
--acl "${ACL_OPTION}" \
"${PROJECT_FORMAT_DIR}" \
"$(get_s3_dest "${PROJECT}" "${FORMAT}")"
echo ""
if [[ "${RAPIDS_BUILD_TYPE}" != "pull-request" ]]; then
continue
fi
rapids-logger "${PROJECT} ${FORMAT} preview URL:"
HTTP_URL="$(
get_s3_dest "${PROJECT}" "${FORMAT}" |
sed "s|s3://${RAPIDS_DOWNLOADS_BUCKET}|https://${RAPIDS_DOWNLOADS_DOMAIN}|"
)"
echo "Note: NVIDIA VPN access is required to view this URL."
echo "- ${HTTP_URL}/" | tee --append "${GITHUB_STEP_SUMMARY}"
echo ""
done
done
}
checks
copy_docs_to_s3
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-dependency-file-checker
|
#!/bin/bash
# A utility script that ensures all generated dependency files are up-to-date
# Positional Arguments:
# 1) config file path
set -eo pipefail
CONFIG_FILE=$1
# delete existing generated files by grepping for files w/ "generated by" header
SEARCH_PATTERN='^# This file is generated by `rapids.*'
grep -rlP \
--include="*.txt" \
--include="*.yaml" \
"${SEARCH_PATTERN}" . | \
xargs rm || true
rapids-dependency-file-generator --config "${CONFIG_FILE:-"dependencies.yaml"}"
git diff --exit-code
test -z "$(git status --porcelain)" # ensures there are no untracked dependency files
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-env-update
|
#!/bin/bash
# A utility script that examines environment variables provided
# by Jenkins to make some environment changes depending on whether
# a nightly or stable build is occurring.
set -euo pipefail
rapids-configure-conda-channels
source rapids-configure-sccache
source rapids-date-string
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-find-anaconda-uploads.py
|
#!/usr/bin/env python3
# shellcheck disable=all
import glob
import sys
from os import environ, path
"""
Script that finds all Anaconda packages that should be uploaded to Anaconda.org
within a given directory.
Positional Arguments:
1: relative or absolute path to search for packages
Examples:
"/tmp/cpp_channel/", "cpp_channel", "."
Environment Variables:
SKIP_UPLOAD_PKGS: space delimited strings of package names that should
not be uploaded to Anaconda.org
Example:
export SKIP_UPLOAD_PKGS="some-private-pkg another-private-pkg"
"""
def get_pkg_name_from_filename(filename):
"""
Returns the package name associated with a given filename.
"""
return "-".join(filename.split("-")[:-2])
def is_test_pkg(pkg_name):
"""
Returns true if the package name matches the pattern we use for
gtest packages.
"""
return pkg_name.endswith("-tests")
def is_skip_pkg(pkg_name):
"""
Returns true if the package name is in the "SKIP_UPLOAD_PKGS"
environment variable.
"""
skip_pkgs_var = environ.get("SKIP_UPLOAD_PKGS", "")
pkgs_to_skip = skip_pkgs_var.split(" ")
return pkg_name in pkgs_to_skip
def file_filter_fn(file_path):
"""
Filters out packages that shouldn't be uploaded to Anaconda.org
"""
filename = path.basename(file_path)
pkg_name = get_pkg_name_from_filename(filename)
if is_test_pkg(pkg_name):
return False
if is_skip_pkg(pkg_name):
return False
return True
if __name__ == "__main__":
directory_to_search = sys.argv[1]
tar_files = glob.glob(f"{directory_to_search}/**/*.tar.bz2", recursive=True)
filtered_list = list(filter(file_filter_fn, tar_files))
print("\n".join(filtered_list))
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-twine
|
#!/bin/bash
# A utility script that wraps twine to upload all pip wheels of a workflow run
#
# Positional Arguments:
# 1) wheel name
set -exou pipefail
source rapids-constants
export RAPIDS_SCRIPT_NAME="rapids-twine"
if [ -z "$1" ]; then
rapids-echo-stderr "Must specify input arguments: WHEEL_NAME"
exit 1
fi
WHEEL_NAME="$1"
WHEEL_SEARCH_KEY="wheel_python_${WHEEL_NAME}"
WHEEL_DIR="./dist"
mkdir -p "${WHEEL_DIR}"
S3_PATH=$(rapids-s3-path)
BUCKET_PREFIX=${S3_PATH/s3:\/\/${RAPIDS_DOWNLOADS_BUCKET}\//} # removes s3://rapids-downloads/ from s3://rapids-downloads/ci/rmm/...
# shellcheck disable=SC2016
WHEEL_TARBALLS=$(
set -eo pipefail;
aws \
--output json \
s3api list-objects \
--bucket "${RAPIDS_DOWNLOADS_BUCKET}" \
--prefix "${BUCKET_PREFIX}" \
--page-size 100 \
--query "Contents[?contains(Key, '${WHEEL_SEARCH_KEY}')].Key" \
| jq -c
)
export WHEEL_TARBALLS
# first untar them all
for OBJ in $(jq -nr 'env.WHEEL_TARBALLS | fromjson | .[]'); do
FILENAME=$(basename "${OBJ}")
S3_URI="${S3_PATH}${FILENAME}"
rapids-echo-stderr "Untarring ${S3_URI} into ${WHEEL_DIR}"
aws s3 cp --only-show-errors "${S3_URI}" - | tar xzf - -C "${WHEEL_DIR}"
done
# then run twine on all wheels
export RAPIDS_RETRY_SLEEP=180
# shellcheck disable=SC2086
rapids-retry python -m twine \
upload \
--disable-progress-bar \
--non-interactive \
--skip-existing \
"${WHEEL_DIR}"/*.whl
echo ""
| 0 |
rapidsai_public_repos/gha-tools
|
rapidsai_public_repos/gha-tools/tools/rapids-get-pr-conda-artifact
|
#!/bin/bash
# Echo path to an artifact for a specific PR. Finds and uses the latest commit on the PR.
#
# Positional Arguments:
# 1) repo name
# 2) PR number
# 3) "cpp" or "python", to get the artifact for the C++ or Python build, respectively
# 4) [optional] commit hash, to get the artifact for a specific commit
#
# Example Usage:
# rapids-get-pr-conda-artifact rmm 1095 cpp
set -euo pipefail
repo="$1"
pr="$2"
pkg_type="$3"
case "${pkg_type}" in
cpp)
artifact_name=$(RAPIDS_REPOSITORY=$repo rapids-package-name conda_cpp)
;;
python)
artifact_name=$(RAPIDS_REPOSITORY=$repo rapids-package-name conda_python)
;;
*)
echo "Error: 3rd argument must be 'cpp' or 'python'"
exit 1
;;
esac
commit="${4:-}"
if [[ -z "${commit}" ]]; then
commit=$(git ls-remote https://github.com/rapidsai/"${repo}".git refs/heads/pull-request/"${pr}" | cut -c1-7)
fi
rapids-get-artifact "ci/${repo}/pull-request/${pr}/${commit}/${artifact_name}"
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/.pre-commit-config.yaml
|
repos:
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
args: ["--settings-path=pyproject.toml"]
exclude: __init__.py$
types: [text]
types_or: [python, cython, pyi]
- repo: https://github.com/ambv/black
rev: 22.3.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
rev: 5.0.4
hooks:
- id: flake8
- repo: https://github.com/pycqa/flake8
rev: 5.0.4
hooks:
- id: flake8
alias: flake8-cython
name: flake8-cython
args: ["--config=.flake8.cython"]
types: [cython]
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.5.2
hooks:
- id: rapids-dependency-file-generator
args: ["--clean"]
default_language_version:
python: python3
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/pyproject.toml
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"cython>=3.0.0",
"setuptools>=64.0.0",
"tomli",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "ucx-py"
dynamic = ["version"]
description = "Python Bindings for the Unified Communication X library (UCX)"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "BSD-3-Clause" }
requires-python = ">=3.9"
dependencies = [
"numpy>=1.21",
"pynvml>=11.4.1",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Hardware",
"Topic :: System :: Systems Administration",
"Programming Language :: Python :: 3",
]
[project.optional-dependencies]
test = [
"cloudpickle",
"cudf==24.2.*",
"cupy-cuda11x>=12.0.0",
"dask",
"distributed",
"numba>=0.57",
"pytest",
"pytest-asyncio",
"pytest-rerunfailures",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/ucx-py"
Documentation = "https://ucx-py.readthedocs.io/en/stable/"
Source = "https://github.com/rapidsai/ucx-py"
[tool.isort]
line_length = 79
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
combine_as_imports = true
order_by_type = true
known_dask = [
"dask",
"distributed",
"dask_cuda",
]
known_rapids = [
"rmm",
"cuml",
"cugraph",
"dask_cudf",
"cudf",
]
known_first_party = [
"ucp",
]
default_section = "THIRDPARTY"
sections = [
"FUTURE",
"STDLIB",
"THIRDPARTY",
"DASK",
"RAPIDS",
"FIRSTPARTY",
"LOCALFOLDER",
]
skip = [
".eggs",
".git",
".hg",
".mypy_cache",
".tox",
".venv",
"build",
"dist",
"__init__.py",
]
[tool.pytest.ini_options]
xfail_strict = true
[tool.setuptools]
license-files = ["LICENSE"]
zip-safe = false
[tool.setuptools.packages.find]
exclude=["tests*"]
[tool.setuptools.dynamic]
version = {file = "ucp/VERSION"}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/.flake8
|
[flake8]
ignore = E901,E225,E226,E227,E999,E203,W503
exclude =
.eggs,
*.egg,
build,
__init__.py,
max-line-length = 88
# Ignore black/flake8-pyi conflicts
per-file-ignores =
*.pyi:E301 E302 E704
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/.flake8.cython
|
#
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
[flake8]
filename = *.pyx, *.pxd
exclude = *.egg, build, docs, .git
ignore = E999, E225, E226, E227, W503, W504, E211
max-line-length = 88
# Rules ignored:
# E999: invalid syntax (works for Python, not Cython)
# E211: whitespace before '(' (used in multi-line imports)
# E225: Missing whitespace around operators (breaks cython casting syntax like <int>)
# E226: Missing whitespace around arithmetic operators (breaks cython pointer syntax like int*)
# E227: Missing whitespace around bitwise or shift operator (Can also break casting syntax)
# W503: line break before binary operator (breaks lines that start with a pointer)
# W504: line break after binary operator (breaks lines that end with a pointer)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/README.md
|
[]( https://ucx-py.readthedocs.io/en/latest/ )
# Python Bindings for UCX
## Installing
Users can either [install with Conda]( https://ucx-py.readthedocs.io/en/latest/install.html#conda ) or [build from source]( https://ucx-py.readthedocs.io/en/latest/install.html#source ).
## Testing
To run ucx-py's tests, just use ``pytest``:
```bash
pytest -v
```
### TCP Support
In order to use TCP add `tcp` to `UCX_TLS` and set `UCXPY_IFNAME` to the network interface you want to use. Some setup examples:
```bash
# TCP using "eth0" and CUDA support
export UCX_TLS=tcp,cuda_copy,cuda_ipc
export UCXPY_IFNAME="eth0"
# InfiniBand using "ib0" and CUDA support
export UCX_TLS=rc,cuda_copy,cuda_ipc
export UCXPY_IFNAME="ib0"
# TCP using "eno0" and no CUDA support
export UCX_TLS=tcp
export UCXPY_IFNAME="eno0"
```
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/.readthedocs.yml
|
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "mambaforge-22.9"
python:
install:
- method: pip
path: .
conda:
environment: conda/environments/builddocs.yml
sphinx:
configuration: docs/source/conf.py
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/MANIFEST.in
|
# Python type stubs
recursive-include ucp *.pyi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/dependencies.yaml
|
# Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
all:
output: none
includes:
- checks
- cudatoolkit
- py_version
- run
- test_python
test_python:
output: none
includes:
- cudatoolkit
- py_version
- test_python
checks:
output: none
includes:
- checks
- py_version
py_build:
output: pyproject
pyproject_dir: .
extras:
table: build-system
includes:
- build_python
py_run:
output: pyproject
pyproject_dir: .
extras:
table: project
includes:
- run
py_optional_test:
output: pyproject
pyproject_dir: .
extras:
table: project.optional-dependencies
key: test
includes:
- test_python
channels:
- rapidsai
- rapidsai-nightly
- conda-forge
dependencies:
checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.2"
packages:
- cuda-version=11.2
- cudatoolkit
- matrix:
cuda: "11.4"
packages:
- cuda-version=11.4
- cudatoolkit
- matrix:
cuda: "11.5"
packages:
- cuda-version=11.5
- cudatoolkit
- matrix:
cuda: "11.8"
packages:
- cuda-version=11.8
- cudatoolkit
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-cudart
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
build_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- setuptools>=64.0.0
- cython>=3.0.0
- tomli # Not needed for Python 3.11+
run:
common:
- output_types: [conda, requirements, pyproject]
packages:
- numpy>=1.21
- pynvml>=11.4.1
- output_types: conda
packages:
- ucx
test_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- cloudpickle
- cudf==24.2.*
- dask
- distributed
- numba>=0.57
- pytest
- pytest-asyncio
- pytest-rerunfailures
- output_types: [conda]
packages:
- cupy>=12.0.0
- output_types: [requirements, pyproject]
packages:
- cupy-cuda11x>=12.0.0
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/setup.py
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# This file is a copy of what is available in a Cython demo + some additions
from __future__ import absolute_import, print_function
import os
from distutils.sysconfig import get_config_var, get_python_inc
from Cython.Distutils.build_ext import new_build_ext
from setuptools import setup
from setuptools.extension import Extension
include_dirs = [os.path.dirname(get_python_inc())]
library_dirs = [get_config_var("LIBDIR")]
libraries = ["ucp", "uct", "ucm", "ucs"]
extra_compile_args = ["-std=c99", "-Werror"]
ext_modules = [
Extension(
"ucp._libs.ucx_api",
sources=["ucp/_libs/ucx_api.pyx", "ucp/_libs/src/c_util.c"],
depends=["ucp/_libs/src/c_util.h", "ucp/_libs/ucx_api_dep.pxd"],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
),
Extension(
"ucp._libs.arr",
sources=["ucp/_libs/arr.pyx"],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
),
]
setup(
ext_modules=ext_modules,
cmdclass={"build_ext": new_build_ext},
package_data={"ucp": ["VERSION"]},
)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/LICENSE
|
Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/ucx-py/VERSION
|
0.36.0
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_benchmark_cluster.py
|
import asyncio
import tempfile
from itertools import chain
import numpy as np
import pytest
from ucp.benchmarks.utils import _run_cluster_server, _run_cluster_workers
async def _worker(rank, eps, args):
futures = []
# Send my rank to all others
for ep in eps.values():
futures.append(ep.send(np.array([rank], dtype="u4")))
# Recv from all others
result = np.empty(len(eps.values()), dtype="u4")
futures += list(ep.recv(result[i : i + 1]) for i, ep in enumerate(eps.values()))
# Wait for transfers to complete
await asyncio.gather(*futures)
# We expect to get the sum of all ranks excluding ours
expect = sum(range(len(eps) + 1)) - rank
assert expect == result.sum()
@pytest.mark.asyncio
async def test_benchmark_cluster(n_chunks=1, n_nodes=2, n_workers=2):
server_file = tempfile.NamedTemporaryFile()
server, server_ret = _run_cluster_server(server_file.name, n_nodes * n_workers)
# Wait for server to become available
with open(server_file.name, "r") as f:
while len(f.read()) == 0:
pass
workers = list(
chain.from_iterable(
_run_cluster_workers(server_file.name, n_chunks, n_workers, i, _worker)
for i in range(n_nodes)
)
)
for worker in workers:
worker.join()
assert not worker.exitcode
server.join()
assert not server.exitcode
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_custom_send_recv.py
|
import asyncio
import pickle
import numpy as np
import pytest
import ucp
cudf = pytest.importorskip("cudf")
distributed = pytest.importorskip("distributed")
cuda = pytest.importorskip("numba.cuda")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"g",
[
lambda cudf: cudf.Series([1, 2, 3]),
lambda cudf: cudf.Series([1, 2, 3], index=[4, 5, 6]),
lambda cudf: cudf.Series([1, None, 3]),
lambda cudf: cudf.Series(range(2**13)),
lambda cudf: cudf.DataFrame({"a": np.random.random(1200000)}),
lambda cudf: cudf.DataFrame({"a": range(2**20)}),
lambda cudf: cudf.DataFrame({"a": range(2**26)}),
lambda cudf: cudf.Series(),
lambda cudf: cudf.DataFrame(),
lambda cudf: cudf.DataFrame({"a": [], "b": []}),
lambda cudf: cudf.DataFrame({"a": [1.0], "b": [2.0]}),
lambda cudf: cudf.DataFrame(
{"a": ["a", "b", "c", "d"], "b": ["a", "b", "c", "d"]}
),
lambda cudf: cudf.datasets.timeseries(), # ts index with ints, cats, floats
],
)
async def test_send_recv_cudf(event_loop, g):
from distributed.utils import nbytes
class UCX:
def __init__(self, ep):
self.ep = ep
async def write(self, cdf):
header, _frames = cdf.serialize()
frames = [pickle.dumps(header)] + _frames
# Send meta data
await self.ep.send(np.array([len(frames)], dtype=np.uint64))
await self.ep.send(
np.array(
[hasattr(f, "__cuda_array_interface__") for f in frames],
dtype=bool,
)
)
await self.ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64))
# Send frames
for frame in frames:
if nbytes(frame) > 0:
await self.ep.send(frame)
async def read(self):
try:
# Recv meta data
nframes = np.empty(1, dtype=np.uint64)
await self.ep.recv(nframes)
is_cudas = np.empty(nframes[0], dtype=bool)
await self.ep.recv(is_cudas)
sizes = np.empty(nframes[0], dtype=np.uint64)
await self.ep.recv(sizes)
except (ucp.exceptions.UCXCanceled, ucp.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
else:
# Recv frames
frames = []
for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()):
if size > 0:
if is_cuda:
frame = cuda.device_array((size,), dtype=np.uint8)
else:
frame = np.empty(size, dtype=np.uint8)
await self.ep.recv(frame)
frames.append(frame)
else:
if is_cuda:
frames.append(cuda.device_array((0,), dtype=np.uint8))
else:
frames.append(b"")
return frames
class UCXListener:
def __init__(self):
self.comm = None
def start(self):
async def serve_forever(ep):
ucx = UCX(ep)
self.comm = ucx
self.ucp_server = ucp.create_listener(serve_forever)
uu = UCXListener()
uu.start()
uu.address = ucp.get_address()
uu.client = await ucp.create_endpoint(uu.address, uu.ucp_server.port)
ucx = UCX(uu.client)
await asyncio.sleep(0.2)
msg = g(cudf)
frames, _ = await asyncio.gather(uu.comm.read(), ucx.write(msg))
ucx_header = pickle.loads(frames[0])
cudf_buffer = frames[1:]
typ = type(msg)
res = typ.deserialize(ucx_header, cudf_buffer)
from cudf.testing._utils import assert_eq
assert_eq(res, msg)
await uu.comm.ep.close()
await uu.client.close()
assert uu.client.closed()
assert uu.comm.ep.closed()
del uu.ucp_server
ucp.reset()
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_send_recv_two_workers.py
|
import asyncio
import multiprocessing
import os
import random
import numpy as np
import pytest
from utils import am_recv, am_send, get_cuda_devices, get_num_gpus, recv, send
import ucp
from ucp.utils import get_event_loop
cupy = pytest.importorskip("cupy")
rmm = pytest.importorskip("rmm")
distributed = pytest.importorskip("distributed")
cloudpickle = pytest.importorskip("cloudpickle")
ITERATIONS = 30
async def get_ep(name, port):
addr = ucp.get_address()
ep = await ucp.create_endpoint(addr, port)
return ep
def register_am_allocators():
ucp.register_am_allocator(lambda n: np.empty(n, dtype=np.uint8), "host")
ucp.register_am_allocator(lambda n: rmm.DeviceBuffer(size=n), "cuda")
def client(port, func, comm_api):
# wait for server to come up
# receive cudf object
# deserialize
# assert deserialized msg is cdf
# send receipt
from distributed.utils import nbytes
ucp.init()
if comm_api == "am":
register_am_allocators()
# must create context before importing
# cudf/cupy/etc
async def read():
await asyncio.sleep(1)
ep = await get_ep("client", port)
msg = None
import cupy
cupy.cuda.set_allocator(None)
for i in range(ITERATIONS):
if comm_api == "tag":
frames, msg = await recv(ep)
else:
frames, msg = await am_recv(ep)
close_msg = b"shutdown listener"
if comm_api == "tag":
close_msg_size = np.array([len(close_msg)], dtype=np.uint64)
await ep.send(close_msg_size)
await ep.send(close_msg)
else:
await ep.am_send(close_msg)
print("Shutting Down Client...")
return msg["data"]
rx_cuda_obj = get_event_loop().run_until_complete(read())
rx_cuda_obj + rx_cuda_obj
num_bytes = nbytes(rx_cuda_obj)
print(f"TOTAL DATA RECEIVED: {num_bytes}")
cuda_obj_generator = cloudpickle.loads(func)
pure_cuda_obj = cuda_obj_generator()
if isinstance(rx_cuda_obj, cupy.ndarray):
cupy.testing.assert_allclose(rx_cuda_obj, pure_cuda_obj)
else:
from cudf.testing._utils import assert_eq
assert_eq(rx_cuda_obj, pure_cuda_obj)
def server(port, func, comm_api):
# create listener receiver
# write cudf object
# confirm message is sent correctly
from distributed.comm.utils import to_frames
from distributed.protocol import to_serialize
ucp.init()
if comm_api == "am":
register_am_allocators()
async def f(listener_port):
# coroutine shows up when the client asks
# to connect
async def write(ep):
import cupy
cupy.cuda.set_allocator(None)
print("CREATING CUDA OBJECT IN SERVER...")
cuda_obj_generator = cloudpickle.loads(func)
cuda_obj = cuda_obj_generator()
msg = {"data": to_serialize(cuda_obj)}
frames = await to_frames(msg, serializers=("cuda", "dask", "pickle"))
for i in range(ITERATIONS):
# Send meta data
if comm_api == "tag":
await send(ep, frames)
else:
await am_send(ep, frames)
print("CONFIRM RECEIPT")
close_msg = b"shutdown listener"
if comm_api == "tag":
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = np.empty(msg_size[0], dtype=np.uint8)
await ep.recv(msg)
else:
msg = await ep.am_recv()
recv_msg = msg.tobytes()
assert recv_msg == close_msg
print("Shutting Down Server...")
await ep.close()
lf.close()
lf = ucp.create_listener(write, port=listener_port)
try:
while not lf.closed():
await asyncio.sleep(0.1)
except ucp.UCXCloseError:
pass
loop = get_event_loop()
loop.run_until_complete(f(port))
def dataframe():
import numpy as np
import cudf
# always generate the same random numbers
np.random.seed(0)
size = 2**26
return cudf.DataFrame(
{"a": np.random.random(size), "b": np.random.random(size)},
index=np.random.randint(size, size=size),
)
def series():
import cudf
return cudf.Series(np.arange(90000))
def empty_dataframe():
import cudf
return cudf.DataFrame({"a": [1.0], "b": [1.0]}).head(0)
def cupy_obj():
import cupy
size = 10**8
return cupy.arange(size)
@pytest.mark.slow
@pytest.mark.skipif(
get_num_gpus() <= 2, reason="Machine does not have more than two GPUs"
)
@pytest.mark.parametrize(
"cuda_obj_generator", [dataframe, empty_dataframe, series, cupy_obj]
)
@pytest.mark.parametrize("comm_api", ["tag", "am"])
def test_send_recv_cu(cuda_obj_generator, comm_api):
base_env = os.environ
env_client = base_env.copy()
# grab first two devices
cvd = get_cuda_devices()[:2]
cvd = ",".join(map(str, cvd))
# reverse CVD for other worker
env_client["CUDA_VISIBLE_DEVICES"] = cvd[::-1]
port = random.randint(13000, 15500)
# serialize function and send to the client and server
# server will use the return value of the contents,
# serialize the values, then send serialized values to client.
# client will compare return values of the deserialized
# data sent from the server
func = cloudpickle.dumps(cuda_obj_generator)
ctx = multiprocessing.get_context("spawn")
server_process = ctx.Process(
name="server", target=server, args=[port, func, comm_api]
)
client_process = ctx.Process(
name="client", target=client, args=[port, func, comm_api]
)
server_process.start()
# cudf will ping the driver for validity of device
# this will influence device on which a cuda context is created.
# work around is to update env with new CVD before spawning
os.environ.update(env_client)
client_process.start()
server_process.join()
client_process.join()
assert server_process.exitcode == 0
assert client_process.exitcode == 0
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_probe.py
|
import asyncio
import pytest
import ucp
@pytest.mark.asyncio
@pytest.mark.parametrize("transfer_api", ["am", "tag"])
async def test_message_probe(transfer_api):
msg = bytearray(b"0" * 10)
async def server_node(ep):
# Wait for remote endpoint to close before probing the endpoint for
# in-transit message and receiving it.
while not ep.closed():
await asyncio.sleep(0) # Yield task
if transfer_api == "am":
assert ep._ep.am_probe() is True
received = await ep.am_recv()
else:
assert ep._ctx.worker.tag_probe(ep._tags["msg_recv"]) is True
received = bytearray(10)
await ep.recv(received)
assert received == msg
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
if transfer_api == "am":
await ep.am_send(msg)
else:
await ep.send(msg)
listener = ucp.create_listener(
server_node,
)
await client_node(listener.port)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_rma.py
|
import pytest
import ucp
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_fence(blocking_progress_mode):
# Test needs to be async here to ensure progress tasks are cleared
# and avoid warnings.
ucp.init(blocking_progress_mode=blocking_progress_mode)
# this should always succeed
ucp.fence()
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_flush(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
await ucp.flush()
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_config.py
|
import os
from unittest.mock import patch
import pytest
from utils import captured_logger
import ucp
def test_get_config():
with patch.dict(os.environ):
# Unset to test default value
if os.environ.get("UCX_TLS") is not None:
del os.environ["UCX_TLS"]
ucp.reset()
config = ucp.get_config()
assert isinstance(config, dict)
assert config["TLS"] == "all"
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_set_env():
ucp.reset()
config = ucp.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_init_options():
ucp.reset()
options = {"SEG_SIZE": "3M"}
# environment specification should be ignored
ucp.init(options)
config = ucp.get_config()
assert config["SEG_SIZE"] == options["SEG_SIZE"]
@patch.dict(os.environ, {"UCX_SEG_SIZE": "4M"})
def test_init_options_and_env():
ucp.reset()
options = {"SEG_SIZE": "3M"} # Should be ignored
ucp.init(options, env_takes_precedence=True)
config = ucp.get_config()
assert config["SEG_SIZE"] == os.environ["UCX_SEG_SIZE"]
# Provided options dict was not modified.
assert options == {"SEG_SIZE": "3M"}
@pytest.mark.skipif(
ucp.get_ucx_version() >= (1, 12, 0),
reason="Beginning with UCX >= 1.12, it's only possible to validate "
"UCP options but not options from other modules such as UCT. "
"See https://github.com/openucx/ucx/issues/7519.",
)
def test_init_unknown_option():
ucp.reset()
options = {"UNKNOWN_OPTION": "3M"}
with pytest.raises(ucp.exceptions.UCXConfigError):
ucp.init(options)
def test_init_invalid_option():
ucp.reset()
options = {"SEG_SIZE": "invalid-size"}
with pytest.raises(ucp.exceptions.UCXConfigError):
ucp.init(options)
@patch.dict(os.environ, {"UCX_SEG_SIZE": "2M"})
def test_logging():
"""
Test default logging configuration.
"""
import logging
root = logging.getLogger("ucx")
# ucp.init will only print INFO LINES
with captured_logger(root, level=logging.INFO) as foreign_log:
ucp.reset()
options = {"SEG_SIZE": "3M"}
ucp.init(options)
assert len(foreign_log.getvalue()) > 0
with captured_logger(root, level=logging.ERROR) as foreign_log:
ucp.reset()
options = {"SEG_SIZE": "3M"}
ucp.init(options)
assert len(foreign_log.getvalue()) == 0
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_multiple_nodes.py
|
import asyncio
import numpy as np
import pytest
import ucp
def get_somaxconn():
with open("/proc/sys/net/core/somaxconn", "r") as f:
return int(f.readline())
async def hello(ep):
msg2send = np.arange(10)
msg2recv = np.empty_like(msg2send)
f1 = ep.send(msg2send)
f2 = ep.recv(msg2recv)
await f1
await f2
np.testing.assert_array_equal(msg2send, msg2recv)
assert isinstance(ep.ucx_info(), str)
async def server_node(ep):
await hello(ep)
assert isinstance(ep.ucx_info(), str)
await ep.close()
async def client_node(port):
ep = await ucp.create_endpoint(ucp.get_address(), port)
await hello(ep)
assert isinstance(ep.ucx_info(), str)
@pytest.mark.asyncio
@pytest.mark.parametrize("num_servers", [1, 2, 4])
@pytest.mark.parametrize("num_clients", [10, 50, 100])
async def test_many_servers_many_clients(num_servers, num_clients):
somaxconn = get_somaxconn()
listeners = []
for _ in range(num_servers):
listeners.append(ucp.create_listener(server_node))
# We ensure no more than `somaxconn` connections are submitted
# at once. Doing otherwise can block and hang indefinitely.
for i in range(0, num_clients * num_servers, somaxconn):
clients = []
for __ in range(i, min(i + somaxconn, num_clients * num_servers)):
clients.append(client_node(listeners[__ % num_servers].port))
await asyncio.gather(*clients)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_from_worker_address.py
|
import asyncio
import multiprocessing as mp
import os
import struct
import numpy as np
import pytest
import ucp
from ucp.utils import get_event_loop
mp = mp.get_context("spawn")
def _test_from_worker_address_server(queue):
async def run():
# Send worker address to client process via multiprocessing.Queue
address = ucp.get_worker_address()
queue.put(address)
# Receive address size
address_size = np.empty(1, dtype=np.int64)
await ucp.recv(address_size, tag=0)
# Receive address buffer on tag 0 and create UCXAddress from it
remote_address = bytearray(address_size[0])
await ucp.recv(remote_address, tag=0)
remote_address = ucp.get_ucx_address_from_buffer(remote_address)
# Create endpoint to remote worker using the received address
ep = await ucp.create_endpoint_from_worker_address(remote_address)
# Send data to client's endpoint
send_msg = np.arange(10, dtype=np.int64)
await ep.send(send_msg, tag=1, force_tag=True)
get_event_loop().run_until_complete(run())
def _test_from_worker_address_client(queue):
async def run():
# Read local worker address
address = ucp.get_worker_address()
# Receive worker address from server via multiprocessing.Queue, create
# endpoint to server
remote_address = queue.get()
ep = await ucp.create_endpoint_from_worker_address(remote_address)
# Send local address to server on tag 0
await ep.send(np.array(address.length, np.int64), tag=0, force_tag=True)
await ep.send(address, tag=0, force_tag=True)
# Receive message from server
recv_msg = np.empty(10, dtype=np.int64)
await ep.recv(recv_msg, tag=1, force_tag=True)
np.testing.assert_array_equal(recv_msg, np.arange(10, dtype=np.int64))
get_event_loop().run_until_complete(run())
def test_from_worker_address():
queue = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_server,
args=(queue,),
)
server.start()
client = mp.Process(
target=_test_from_worker_address_client,
args=(queue,),
)
client.start()
client.join()
server.join()
assert not server.exitcode
assert not client.exitcode
def _get_address_info(address=None):
# Fixed frame size
frame_size = 10000
# Header format: Recv Tag (Q) + Send Tag (Q) + UCXAddress.length (Q)
header_fmt = "QQQ"
# Data length
data_length = frame_size - struct.calcsize(header_fmt)
# Padding length
padding_length = None if address is None else (data_length - address.length)
# Header + UCXAddress string + padding
fixed_size_address_buffer_fmt = header_fmt + str(data_length) + "s"
assert struct.calcsize(fixed_size_address_buffer_fmt) == frame_size
return {
"frame_size": frame_size,
"data_length": data_length,
"padding_length": padding_length,
"fixed_size_address_buffer_fmt": fixed_size_address_buffer_fmt,
}
def _pack_address_and_tag(address, recv_tag, send_tag):
address_info = _get_address_info(address)
fixed_size_address_packed = struct.pack(
address_info["fixed_size_address_buffer_fmt"],
recv_tag, # Recv Tag
send_tag, # Send Tag
address.length, # Address buffer length
(
bytearray(address) + bytearray(address_info["padding_length"])
), # Address buffer + padding
)
assert len(fixed_size_address_packed) == address_info["frame_size"]
return fixed_size_address_packed
def _unpack_address_and_tag(address_packed):
address_info = _get_address_info()
recv_tag, send_tag, address_length, address_padded = struct.unpack(
address_info["fixed_size_address_buffer_fmt"],
address_packed,
)
# Swap send and recv tags, as they are used by the remote process in the
# opposite direction.
return {
"address": address_padded[:address_length],
"recv_tag": send_tag,
"send_tag": recv_tag,
}
def _test_from_worker_address_server_fixedsize(num_nodes, queue):
async def run():
async def _handle_client(packed_remote_address):
# Unpack the fixed-size address+tag buffer
unpacked = _unpack_address_and_tag(packed_remote_address)
remote_address = ucp.get_ucx_address_from_buffer(unpacked["address"])
# Create endpoint to remote worker using the received address
ep = await ucp.create_endpoint_from_worker_address(remote_address)
# Send data to client's endpoint
send_msg = np.arange(10, dtype=np.int64)
await ep.send(send_msg, tag=unpacked["send_tag"], force_tag=True)
# Receive data from client's endpoint
recv_msg = np.empty(20, dtype=np.int64)
await ep.recv(recv_msg, tag=unpacked["recv_tag"], force_tag=True)
np.testing.assert_array_equal(recv_msg, np.arange(20, dtype=np.int64))
# Send worker address to client processes via multiprocessing.Queue,
# one entry for each client.
address = ucp.get_worker_address()
for i in range(num_nodes):
queue.put(address)
address_info = _get_address_info()
server_tasks = []
for i in range(num_nodes):
# Receive fixed-size address+tag buffer on tag 0
packed_remote_address = bytearray(address_info["frame_size"])
await ucp.recv(packed_remote_address, tag=0)
# Create an async task for client
server_tasks.append(_handle_client(packed_remote_address))
# Await handling each client request
await asyncio.gather(*server_tasks)
get_event_loop().run_until_complete(run())
def _test_from_worker_address_client_fixedsize(queue):
async def run():
# Read local worker address
address = ucp.get_worker_address()
recv_tag = ucp.utils.hash64bits(os.urandom(16))
send_tag = ucp.utils.hash64bits(os.urandom(16))
packed_address = _pack_address_and_tag(address, recv_tag, send_tag)
# Receive worker address from server via multiprocessing.Queue, create
# endpoint to server
remote_address = queue.get()
ep = await ucp.create_endpoint_from_worker_address(remote_address)
# Send local address to server on tag 0
await ep.send(packed_address, tag=0, force_tag=True)
# Receive message from server
recv_msg = np.empty(10, dtype=np.int64)
await ep.recv(recv_msg, tag=recv_tag, force_tag=True)
np.testing.assert_array_equal(recv_msg, np.arange(10, dtype=np.int64))
# Send message to server
send_msg = np.arange(20, dtype=np.int64)
await ep.send(send_msg, tag=send_tag, force_tag=True)
get_event_loop().run_until_complete(run())
@pytest.mark.parametrize("num_nodes", [1, 2, 4, 8])
def test_from_worker_address_multinode(num_nodes):
queue = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_server_fixedsize,
args=(num_nodes, queue),
)
server.start()
clients = []
for i in range(num_nodes):
client = mp.Process(
target=_test_from_worker_address_client_fixedsize,
args=(queue,),
)
client.start()
clients.append(client)
for client in clients:
client.join()
server.join()
assert not server.exitcode
assert not client.exitcode
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_ucx_getters.py
|
import pytest
import ucp
@pytest.mark.asyncio
async def test_get_ucp_worker():
worker = ucp.get_ucp_worker()
assert isinstance(worker, int)
async def server(ep):
assert ep.get_ucp_worker() == worker
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
assert ep.get_ucp_worker() == worker
@pytest.mark.asyncio
async def test_get_endpoint():
async def server(ep):
ucp_ep = ep.get_ucp_endpoint()
assert isinstance(ucp_ep, int)
assert ucp_ep > 0
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
ucp_ep = ep.get_ucp_endpoint()
assert isinstance(ucp_ep, int)
assert ucp_ep > 0
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_info.py
|
import pytest
import ucp
@pytest.fixture(autouse=True)
def reset():
ucp.reset()
yield
ucp.reset()
def test_context_info():
info = ucp.get_ucp_context_info()
assert isinstance(info, str)
def test_worker_info():
info = ucp.get_ucp_worker_info()
assert isinstance(info, str)
@pytest.mark.parametrize(
"transports",
["posix", "tcp", "posix,tcp"],
)
def test_check_transport(transports):
transports_list = transports.split(",")
inactive_transports = list(set(["posix", "tcp"]) - set(transports_list))
ucp.reset()
options = {"TLS": transports, "NET_DEVICES": "all"}
ucp.init(options)
active_transports = ucp.get_active_transports()
for t in transports_list:
assert any([at.startswith(t) for at in active_transports])
for it in inactive_transports:
assert any([not at.startswith(it) for at in active_transports])
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_disconnect.py
|
import asyncio
import logging
import multiprocessing as mp
from io import StringIO
from queue import Empty
import numpy as np
import pytest
import ucp
from ucp.utils import get_event_loop
mp = mp.get_context("spawn")
async def mp_queue_get_nowait(queue):
while True:
try:
return queue.get_nowait()
except Empty:
pass
await asyncio.sleep(0.01)
def _test_shutdown_unexpected_closed_peer_server(
client_queue, server_queue, endpoint_error_handling
):
global ep_is_alive
ep_is_alive = None
async def run():
async def server_node(ep):
try:
global ep_is_alive
await ep.send(np.arange(100, dtype=np.int64))
# Waiting for signal to close the endpoint
await mp_queue_get_nowait(server_queue)
# At this point, the client should have died and the endpoint
# is not alive anymore. `True` only when endpoint error
# handling is enabled.
ep_is_alive = ep._ep.is_alive()
await ep.close()
finally:
listener.close()
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
client_queue.put(listener.port)
while not listener.closed():
await asyncio.sleep(0.1)
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.DEBUG)
get_event_loop().run_until_complete(run())
log = log_stream.getvalue()
if endpoint_error_handling is True:
assert ep_is_alive is False
else:
assert ep_is_alive
assert log.find("""UCXError('<[Send shutdown]""") != -1
def _test_shutdown_unexpected_closed_peer_client(
client_queue, server_queue, endpoint_error_handling
):
async def run():
server_port = client_queue.get()
ep = await ucp.create_endpoint(
ucp.get_address(),
server_port,
endpoint_error_handling=endpoint_error_handling,
)
msg = np.empty(100, dtype=np.int64)
await ep.recv(msg)
get_event_loop().run_until_complete(run())
@pytest.mark.parametrize("endpoint_error_handling", [True, False])
def test_shutdown_unexpected_closed_peer(caplog, endpoint_error_handling):
"""
Test clean server shutdown after unexpected peer close
This will causes some UCX warnings to be issued, but this as expected.
The main goal is to assert that the processes exit without errors
despite a somewhat messy initial state.
"""
if endpoint_error_handling is False and any(
[
t.startswith(i)
for i in ("rc", "dc", "ud")
for t in ucp.get_active_transports()
]
):
pytest.skip(
"Endpoint error handling is required when rc, dc or ud"
"transport is enabled"
)
client_queue = mp.Queue()
server_queue = mp.Queue()
p1 = mp.Process(
target=_test_shutdown_unexpected_closed_peer_server,
args=(client_queue, server_queue, endpoint_error_handling),
)
p1.start()
p2 = mp.Process(
target=_test_shutdown_unexpected_closed_peer_client,
args=(client_queue, server_queue, endpoint_error_handling),
)
p2.start()
p2.join()
server_queue.put("client is down")
p1.join()
assert not p1.exitcode
assert not p2.exitcode
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_endpoint.py
|
import asyncio
import pytest
import ucp
@pytest.mark.asyncio
@pytest.mark.parametrize("server_close_callback", [True, False])
async def test_close_callback(server_close_callback):
closed = [False]
def _close_callback():
closed[0] = True
async def server_node(ep):
if server_close_callback is True:
ep.set_close_callback(_close_callback)
if server_close_callback is False:
await ep.close()
listener.close()
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
if server_close_callback is False:
ep.set_close_callback(_close_callback)
if server_close_callback is True:
await ep.close()
listener = ucp.create_listener(
server_node,
)
await client_node(listener.port)
while not listener.closed():
await asyncio.sleep(0.01)
assert closed[0] is True
@pytest.mark.asyncio
@pytest.mark.parametrize("transfer_api", ["am", "tag"])
async def test_cancel(transfer_api):
async def server_node(ep):
await ep.close()
async def client_node(port):
ep = await ucp.create_endpoint(ucp.get_address(), port)
if transfer_api == "am":
with pytest.raises(
ucp.exceptions.UCXCanceled,
match="am_recv",
):
await ep.am_recv()
else:
with pytest.raises(
ucp.exceptions.UCXCanceled,
match="Recv.*tag",
):
msg = bytearray(1)
await ep.recv(msg)
await ep.close()
listener = ucp.create_listener(server_node)
await client_node(listener.port)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_from_worker_address_error.py
|
import asyncio
import multiprocessing as mp
import os
import re
import time
from unittest.mock import patch
import numpy as np
import pytest
import ucp
from ucp.utils import get_event_loop
mp = mp.get_context("spawn")
def _test_from_worker_address_error_server(q1, q2, error_type):
async def run():
address = bytearray(ucp.get_worker_address())
if error_type == "unreachable":
# Shutdown worker, then send its address to client process via
# multiprocessing.Queue
ucp.reset()
q1.put(address)
else:
# Send worker address to client process via # multiprocessing.Queue,
# wait for client to connect, then shutdown worker.
q1.put(address)
ep_ready = q2.get()
assert ep_ready == "ready"
ucp.reset()
q1.put("disconnected")
get_event_loop().run_until_complete(run())
def _test_from_worker_address_error_client(q1, q2, error_type):
async def run():
# Receive worker address from server via multiprocessing.Queue
remote_address = ucp.get_ucx_address_from_buffer(q1.get())
if error_type == "unreachable":
with pytest.raises(
ucp.exceptions.UCXError,
match="Destination is unreachable|Endpoint timeout",
):
# Here, two cases may happen:
# 1. With TCP creating endpoint will immediately raise
# "Destination is unreachable"
# 2. With rc/ud creating endpoint will succeed, but raise
# "Endpoint timeout" after UCX_UD_TIMEOUT seconds have passed.
# We need to keep progressing UCP until timeout is raised.
ep = await ucp.create_endpoint_from_worker_address(remote_address)
start = time.monotonic()
while not ep._ep.raise_on_error():
ucp.progress()
# Prevent hanging
if time.monotonic() - start >= 1.0:
return
else:
# Create endpoint to remote worker, and:
#
# 1. For timeout_am_send/timeout_send:
# - inform remote worker that local endpoint is ready for remote
# shutdown;
# - wait for remote worker to shutdown and confirm;
# - attempt to send message.
#
# 2. For timeout_am_recv/timeout_recv:
# - schedule ep.recv;
# - inform remote worker that local endpoint is ready for remote
# shutdown;
# - wait for it to shutdown and confirm
# - wait for recv message.
ep = await ucp.create_endpoint_from_worker_address(remote_address)
if re.match("timeout.*send", error_type):
q2.put("ready")
remote_disconnected = q1.get()
assert remote_disconnected == "disconnected"
with pytest.raises(ucp.exceptions.UCXError, match="Endpoint timeout"):
if error_type == "timeout_am_send":
await asyncio.wait_for(ep.am_send(np.zeros(10)), timeout=1.0)
else:
await asyncio.wait_for(
ep.send(np.zeros(10), tag=0, force_tag=True), timeout=1.0
)
else:
with pytest.raises(ucp.exceptions.UCXCanceled):
if error_type == "timeout_am_recv":
task = asyncio.wait_for(ep.am_recv(), timeout=3.0)
else:
msg = np.empty(10)
task = asyncio.wait_for(
ep.recv(msg, tag=0, force_tag=True), timeout=3.0
)
q2.put("ready")
remote_disconnected = q1.get()
assert remote_disconnected == "disconnected"
await task
get_event_loop().run_until_complete(run())
@pytest.mark.parametrize(
"error_type",
[
"unreachable",
"timeout_am_send",
"timeout_am_recv",
"timeout_send",
"timeout_recv",
],
)
@patch.dict(
os.environ,
{
"UCX_WARN_UNUSED_ENV_VARS": "n",
# Set low timeouts to ensure tests quickly raise as expected
"UCX_KEEPALIVE_INTERVAL": "100ms",
"UCX_UD_TIMEOUT": "100ms",
},
)
def test_from_worker_address_error(error_type):
q1 = mp.Queue()
q2 = mp.Queue()
server = mp.Process(
target=_test_from_worker_address_error_server,
args=(q1, q2, error_type),
)
server.start()
client = mp.Process(
target=_test_from_worker_address_error_client,
args=(q1, q2, error_type),
)
client.start()
server.join()
client.join()
assert not server.exitcode
if ucp.get_ucx_version() < (1, 12, 0) and client.exitcode == 1:
if all(t in error_type for t in ["timeout", "send"]):
pytest.xfail(
"Requires https://github.com/openucx/ucx/pull/7527 with rc/ud."
)
elif all(t in error_type for t in ["timeout", "recv"]):
pytest.xfail(
"Requires https://github.com/openucx/ucx/pull/7531 with rc/ud."
)
assert not client.exitcode
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/conftest.py
|
import asyncio
import os
import pytest
import ucp
# Prevent calls such as `cudf = pytest.importorskip("cudf")` from initializing
# a CUDA context. Such calls may cause tests that must initialize the CUDA
# context on the appropriate device to fail.
# For example, without `RAPIDS_NO_INITIALIZE=True`, `test_benchmark_cluster`
# will succeed if running alone, but fails when all tests are run in batch.
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def handle_exception(loop, context):
msg = context.get("exception", context["message"])
print(msg)
# Let's make sure that UCX gets time to cancel
# progress tasks before closing the event loop.
@pytest.fixture()
def event_loop(scope="session"):
loop = asyncio.new_event_loop()
loop.set_exception_handler(handle_exception)
ucp.reset()
yield loop
ucp.reset()
loop.run_until_complete(asyncio.sleep(0))
loop.close()
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_tags.py
|
import asyncio
import pytest
import ucp
@pytest.mark.asyncio
async def test_tag_match():
msg1 = bytes("msg1", "utf-8")
msg2 = bytes("msg2", "utf-8")
async def server_node(ep):
f1 = ep.send(msg1, tag="msg1")
await asyncio.sleep(1) # Let msg1 finish
f2 = ep.send(msg2, tag="msg2")
await asyncio.gather(f1, f2)
await ep.close()
lf = ucp.create_listener(server_node)
ep = await ucp.create_endpoint(ucp.get_address(), lf.port)
m1, m2 = (bytearray(len(msg1)), bytearray(len(msg2)))
f2 = asyncio.create_task(ep.recv(m2, tag="msg2"))
# At this point f2 shouldn't be able to finish because its
# tag "msg2" doesn't match the servers send tag "msg1"
done, pending = await asyncio.wait({f2}, timeout=0.01)
assert f2 in pending
# "msg1" should be ready
await ep.recv(m1, tag="msg1")
assert m1 == msg1
await f2
assert m2 == msg2
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_send_recv.py
|
import functools
import pytest
import ucp
np = pytest.importorskip("numpy")
msg_sizes = [2**i for i in range(0, 25, 4)]
dtypes = ["|u1", "<i8", "f8"]
def make_echo_server(create_empty_data):
"""
Returns an echo server that calls the function `create_empty_data(nbytes)`
to create the data container.`
"""
async def echo_server(ep):
"""
Basic echo server for sized messages.
We expect the other endpoint to follow the pattern::
# size of the real message (in bytes)
>>> await ep.send(msg_size)
>>> await ep.send(msg) # send the real message
>>> await ep.recv(responds) # receive the echo
"""
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = create_empty_data(msg_size[0])
await ep.recv(msg)
await ep.send(msg)
await ep.close()
return echo_server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_bytes(size, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
msg = bytearray(b"m" * size)
msg_size = np.array([len(msg)], dtype=np.uint64)
listener = ucp.create_listener(make_echo_server(lambda n: bytearray(n)))
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = bytearray(size)
await client.recv(resp)
assert resp == msg
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_numpy(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
msg = np.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: np.empty(n, dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = np.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(resp, msg)
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_cupy(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
cupy = pytest.importorskip("cupy")
msg = cupy.arange(size, dtype=dtype)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: cupy.empty((n,), dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cupy.empty_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(cupy.asnumpy(resp), cupy.asnumpy(msg))
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_numba(size, dtype, blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
cuda = pytest.importorskip("numba.cuda")
ary = np.arange(size, dtype=dtype)
msg = cuda.to_device(ary)
msg_size = np.array([msg.nbytes], dtype=np.uint64)
listener = ucp.create_listener(
make_echo_server(lambda n: cuda.device_array((n,), dtype=np.uint8))
)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
await client.send(msg_size)
await client.send(msg)
resp = cuda.device_array_like(msg)
await client.recv(resp)
np.testing.assert_array_equal(np.array(resp), np.array(msg))
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_error(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
async def say_hey_server(ep):
await ep.send(bytearray(b"Hey"))
await ep.close()
listener = ucp.create_listener(say_hey_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(100)
with pytest.raises(
ucp.exceptions.UCXMsgTruncated,
match=r"length mismatch: 3 \(got\) != 100 \(expected\)",
):
await client.recv(msg)
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_obj(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
async def echo_obj_server(ep):
obj = await ep.recv_obj()
await ep.send_obj(obj)
await ep.close()
listener = ucp.create_listener(echo_obj_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj()
assert msg == got
@pytest.mark.asyncio
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
async def test_send_recv_obj_numpy(blocking_progress_mode):
ucp.init(blocking_progress_mode=blocking_progress_mode)
allocator = functools.partial(np.empty, dtype=np.uint8)
async def echo_obj_server(ep):
obj = await ep.recv_obj(allocator=allocator)
await ep.send_obj(obj)
await ep.close()
listener = ucp.create_listener(echo_obj_server)
client = await ucp.create_endpoint(ucp.get_address(), listener.port)
msg = bytearray(b"hello")
await client.send_obj(msg)
got = await client.recv_obj(allocator=allocator)
assert msg == got
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_shutdown.py
|
import asyncio
import numpy as np
import pytest
import ucp
async def _shutdown_send(ep, message_type):
msg = np.arange(10**6)
if message_type == "tag":
await ep.send(msg)
else:
await ep.am_send(msg)
async def _shutdown_recv(ep, message_type):
if message_type == "tag":
msg = np.empty(10**6)
await ep.recv(msg)
else:
await ep.am_recv()
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_server_shutdown(message_type):
"""The server calls shutdown"""
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node,
)
await client_node(listener.port)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_client_shutdown(message_type):
"""The client calls shutdown"""
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node,
)
await client_node(listener.port)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_close(message_type):
"""The server close the listener"""
async def client_node(listener):
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
)
await _shutdown_recv(ep, message_type)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
listener.close()
assert listener.closed() is True
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node,
)
await client_node(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_del(message_type):
"""The client delete the listener"""
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node,
)
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
del listener
await _shutdown_recv(ep, message_type)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_close_after_n_recv(message_type):
"""The Endpoint.close_after_n_recv()"""
async def server_node(ep):
for _ in range(10):
await _shutdown_send(ep, message_type)
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
ep.close_after_n_recv(10)
for _ in range(10):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(5)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(10, count_from_ep_creation=True)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(),
port,
)
for _ in range(10):
await _shutdown_recv(ep, message_type)
with pytest.raises(
ucp.exceptions.UCXError,
match="`n` cannot be less than current recv_count",
):
ep.close_after_n_recv(5, count_from_ep_creation=True)
ep.close_after_n_recv(1)
with pytest.raises(
ucp.exceptions.UCXError,
match="close_after_n_recv has already been set to",
):
ep.close_after_n_recv(1)
listener = ucp.create_listener(
server_node,
)
await client_node(listener.port)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_reset.py
|
import pytest
import ucp
class ResetAfterN:
"""Calls ucp.reset() after n calls"""
def __init__(self, n):
self.n = n
self.count = 0
def __call__(self):
self.count += 1
if self.count == self.n:
ucp.reset()
@pytest.mark.asyncio
async def test_reset():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del lt
del ep
reset()
@pytest.mark.asyncio
async def test_lt_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del ep
with pytest.raises(
ucp.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
ucp.reset()
lt.close()
ucp.reset()
@pytest.mark.asyncio
async def test_ep_still_in_scope_error():
reset = ResetAfterN(2)
def server(ep):
ep.abort()
reset()
lt = ucp.create_listener(server)
ep = await ucp.create_endpoint(ucp.get_address(), lt.port)
del lt
with pytest.raises(
ucp.exceptions.UCXError,
match="Trying to reset UCX but not all Endpoints and/or Listeners are closed()",
):
ucp.reset()
ep.abort()
ucp.reset()
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_version.py
|
import ucp
def test_get_ucx_version():
version = ucp.get_ucx_version()
assert isinstance(version, tuple)
assert len(version) == 3
# Check UCX isn't initialized
assert ucp.core._ctx is None
def test_version_constant():
assert isinstance(ucp.__version__, str)
def test_ucx_version_constant():
assert isinstance(ucp.__ucx_version__, str)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/utils.py
|
import io
import logging
import os
from contextlib import contextmanager
import numpy as np
import pytest
import ucp
normal_env = {
"UCX_RNDV_SCHEME": "put_zcopy",
"UCX_MEMTYPE_CACHE": "n",
"UCX_TLS": "rc,cuda_copy,cuda_ipc",
"CUDA_VISIBLE_DEVICES": "0",
}
def set_env():
os.environ.update(normal_env)
def get_num_gpus():
import pynvml
pynvml.nvmlInit()
ngpus = pynvml.nvmlDeviceGetCount()
pynvml.nvmlShutdown()
return ngpus
def get_cuda_devices():
if "CUDA_VISIBLE_DEVICES" in os.environ:
return os.environ["CUDA_VISIBLE_DEVICES"].split(",")
else:
ngpus = get_num_gpus()
return list(range(ngpus))
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
def cuda_array(size):
try:
import rmm
return rmm.DeviceBuffer(size=size)
except ImportError:
import numba.cuda
return numba.cuda.device_array((size,), dtype="u1")
async def send(ep, frames):
pytest.importorskip("distributed")
from distributed.utils import nbytes
await ep.send(np.array([len(frames)], dtype=np.uint64))
await ep.send(
np.array([hasattr(f, "__cuda_array_interface__") for f in frames], dtype=bool)
)
await ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64))
# Send frames
for frame in frames:
if nbytes(frame) > 0:
await ep.send(frame)
async def recv(ep):
pytest.importorskip("distributed")
from distributed.comm.utils import from_frames
try:
# Recv meta data
nframes = np.empty(1, dtype=np.uint64)
await ep.recv(nframes)
is_cudas = np.empty(nframes[0], dtype=bool)
await ep.recv(is_cudas)
sizes = np.empty(nframes[0], dtype=np.uint64)
await ep.recv(sizes)
except (ucp.exceptions.UCXCanceled, ucp.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
# Recv frames
frames = []
for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()):
if size > 0:
if is_cuda:
frame = cuda_array(size)
else:
frame = np.empty(size, dtype=np.uint8)
await ep.recv(frame)
frames.append(frame)
else:
if is_cuda:
frames.append(cuda_array(size))
else:
frames.append(b"")
msg = await from_frames(frames)
return frames, msg
async def am_send(ep, frames):
await ep.am_send(np.array([len(frames)], dtype=np.uint64))
# Send frames
for frame in frames:
await ep.am_send(frame)
async def am_recv(ep):
pytest.importorskip("distributed")
from distributed.comm.utils import from_frames
try:
# Recv meta data
nframes = (await ep.am_recv()).view(np.uint64)
except (ucp.exceptions.UCXCanceled, ucp.exceptions.UCXCloseError) as e:
msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST"
raise e(msg)
# Recv frames
frames = []
for _ in range(nframes[0]):
frame = await ep.am_recv()
frames.append(frame)
msg = await from_frames(frames)
return frames, msg
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/tests/test_send_recv_am.py
|
import asyncio
from functools import partial
import numpy as np
import pytest
import ucp
msg_sizes = [0] + [2**i for i in range(0, 25, 4)]
def _bytearray_assert_equal(a, b):
assert a == b
def get_data():
ret = [
{
"allocator": bytearray,
"generator": lambda n: bytearray(b"m" * n),
"validator": lambda recv, exp: _bytearray_assert_equal(recv, exp),
"memory_type": "host",
},
{
"allocator": partial(np.ones, dtype=np.uint8),
"generator": partial(np.arange, dtype=np.int64),
"validator": lambda recv, exp: np.testing.assert_equal(
recv.view(np.int64), exp
),
"memory_type": "host",
},
]
try:
import cupy as cp
ret.append(
{
"allocator": partial(cp.ones, dtype=np.uint8),
"generator": partial(cp.arange, dtype=np.int64),
"validator": lambda recv, exp: cp.testing.assert_array_equal(
recv.view(np.int64), exp
),
"memory_type": "cuda",
}
)
except ImportError:
pass
return ret
def simple_server(size, recv):
async def server(ep):
recv = await ep.am_recv()
await ep.am_send(recv)
await ep.close()
return server
@pytest.mark.asyncio
@pytest.mark.parametrize("size", msg_sizes)
@pytest.mark.parametrize("blocking_progress_mode", [True, False])
@pytest.mark.parametrize("recv_wait", [True, False])
@pytest.mark.parametrize("data", get_data())
async def test_send_recv_am(size, blocking_progress_mode, recv_wait, data):
rndv_thresh = 8192
ucp.init(
options={"RNDV_THRESH": str(rndv_thresh)},
blocking_progress_mode=blocking_progress_mode,
)
ucp.register_am_allocator(data["allocator"], data["memory_type"])
msg = data["generator"](size)
recv = []
listener = ucp.create_listener(simple_server(size, recv))
num_clients = 1
clients = [
await ucp.create_endpoint(ucp.get_address(), listener.port)
for i in range(num_clients)
]
for c in clients:
if recv_wait:
# By sleeping here we ensure that the listener's
# ep.am_recv call will have to wait, rather than return
# immediately as receive data is already available.
await asyncio.sleep(1)
await c.am_send(msg)
recv_msg = await c.am_recv()
for c in clients:
await c.close()
listener.close()
if data["memory_type"] == "cuda" and msg.nbytes < rndv_thresh:
# Eager messages are always received on the host, if no host
# allocator is registered UCX-Py defaults to `bytearray`.
assert recv_msg == bytearray(msg.get())
else:
data["validator"](recv_msg, msg)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/core.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
import array
import asyncio
import gc
import logging
import os
import re
import struct
import weakref
from functools import partial
from os import close as close_fd
from . import comm
from ._libs import ucx_api
from ._libs.arr import Array
from .continuous_ucx_progress import BlockingMode, NonBlockingMode
from .exceptions import UCXCanceled, UCXCloseError, UCXError
from .utils import get_event_loop, hash64bits
logger = logging.getLogger("ucx")
# The module should only instantiate one instance of the application context
# However, the init of CUDA must happen after all process forks thus we delay
# the instantiation of the application context to the first use of the API.
_ctx = None
def _get_ctx():
global _ctx
if _ctx is None:
_ctx = ApplicationContext()
return _ctx
async def exchange_peer_info(endpoint, msg_tag, ctrl_tag, listener, stream_timeout=5.0):
"""Help function that exchange endpoint information"""
# Pack peer information incl. a checksum
fmt = "QQQ"
my_info = struct.pack(fmt, msg_tag, ctrl_tag, hash64bits(msg_tag, ctrl_tag))
peer_info = bytearray(len(my_info))
my_info_arr = Array(my_info)
peer_info_arr = Array(peer_info)
# Send/recv peer information. Notice, we force an `await` between the two
# streaming calls (see <https://github.com/rapidsai/ucx-py/pull/509>)
if listener is True:
await asyncio.wait_for(
comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes),
timeout=stream_timeout,
)
await asyncio.wait_for(
comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes),
timeout=stream_timeout,
)
else:
await asyncio.wait_for(
comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes),
timeout=stream_timeout,
)
await asyncio.wait_for(
comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes),
timeout=stream_timeout,
)
# Unpacking and sanity check of the peer information
ret = {}
(ret["msg_tag"], ret["ctrl_tag"], ret["checksum"]) = struct.unpack(fmt, peer_info)
expected_checksum = hash64bits(ret["msg_tag"], ret["ctrl_tag"])
if expected_checksum != ret["checksum"]:
raise RuntimeError(
f'Checksum invalid! {hex(expected_checksum)} != {hex(ret["checksum"])}'
)
return ret
class CtrlMsg:
"""Implementation of control messages
For now we have one opcode `1` which means shutdown.
The opcode takes `close_after_n_recv`, which is the number of
messages to receive before the worker should close.
"""
fmt = "QQ"
nbytes = struct.calcsize(fmt)
@staticmethod
def serialize(opcode, close_after_n_recv):
return struct.pack(CtrlMsg.fmt, int(opcode), int(close_after_n_recv))
@staticmethod
def deserialize(serialized_bytes):
return struct.unpack(CtrlMsg.fmt, serialized_bytes)
@staticmethod
def handle_ctrl_msg(ep_weakref, log, msg, future):
"""Function that is called when receiving the control message"""
try:
future.result()
except UCXCanceled:
return # The ctrl signal was canceled
logger.debug(log)
ep = ep_weakref()
if ep is None or ep.closed():
if ep is not None:
ep.abort()
return # The endpoint is closed
opcode, close_after_n_recv = CtrlMsg.deserialize(msg)
if opcode == 1:
ep.close_after_n_recv(close_after_n_recv, count_from_ep_creation=True)
else:
raise UCXError("Received unknown control opcode: %s" % opcode)
@staticmethod
def setup_ctrl_recv(ep):
"""Help function to setup the receive of the control message"""
log = "[Recv shutdown] ep: %s, tag: %s" % (
hex(ep.uid),
hex(ep._tags["ctrl_recv"]),
)
msg = bytearray(CtrlMsg.nbytes)
msg_arr = Array(msg)
shutdown_fut = comm.tag_recv(
ep._ep, msg_arr, msg_arr.nbytes, ep._tags["ctrl_recv"], name=log
)
shutdown_fut.add_done_callback(
partial(CtrlMsg.handle_ctrl_msg, weakref.ref(ep), log, msg)
)
async def _listener_handler_coroutine(conn_request, ctx, func, endpoint_error_handling):
# We create the Endpoint in five steps:
# 1) Create endpoint from conn_request
# 2) Generate unique IDs to use as tags
# 3) Exchange endpoint info such as tags
# 4) Setup control receive callback
# 5) Execute the listener's callback function
endpoint = ucx_api.UCXEndpoint.create_from_conn_request(
ctx.worker, conn_request, endpoint_error_handling
)
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, endpoint.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, endpoint.handle)
peer_info = await exchange_peer_info(
endpoint=endpoint,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
listener=True,
)
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(endpoint=endpoint, ctx=ctx, tags=tags)
logger.debug(
"_listener_handler() server: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(endpoint.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
# Setup the control receive
CtrlMsg.setup_ctrl_recv(ep)
# Removing references here to avoid delayed clean up
del ctx
# Finally, we call `func`
if asyncio.iscoroutinefunction(func):
await func(ep)
else:
func(ep)
def _listener_handler(conn_request, callback_func, ctx, endpoint_error_handling):
asyncio.ensure_future(
_listener_handler_coroutine(
conn_request,
ctx,
callback_func,
endpoint_error_handling,
)
)
def _epoll_fd_finalizer(epoll_fd, progress_tasks):
assert epoll_fd >= 0
# Notice, progress_tasks must be cleared before we close
# epoll_fd
progress_tasks.clear()
close_fd(epoll_fd)
class ApplicationContext:
"""
The context of the Asyncio interface of UCX.
"""
def __init__(self, config_dict={}, blocking_progress_mode=None):
self.progress_tasks = []
# For now, a application context only has one worker
self.context = ucx_api.UCXContext(config_dict)
self.worker = ucx_api.UCXWorker(self.context)
if blocking_progress_mode is not None:
self.blocking_progress_mode = blocking_progress_mode
elif "UCXPY_NON_BLOCKING_MODE" in os.environ:
self.blocking_progress_mode = False
else:
self.blocking_progress_mode = True
if self.blocking_progress_mode:
self.epoll_fd = self.worker.init_blocking_progress_mode()
weakref.finalize(
self, _epoll_fd_finalizer, self.epoll_fd, self.progress_tasks
)
# Ensure progress even before Endpoints get created, for example to
# receive messages directly on a worker after a remote endpoint
# connected with `create_endpoint_from_worker_address`.
self.continuous_ucx_progress()
def create_listener(
self,
callback_func,
port=0,
endpoint_error_handling=True,
):
"""Create and start a listener to accept incoming connections
callback_func is the function or coroutine that takes one
argument -- the Endpoint connected to the client.
Notice, the listening is closed when the returned Listener
goes out of scope thus remember to keep a reference to the object.
Parameters
----------
callback_func: function or coroutine
A callback function that gets invoked when an incoming
connection is accepted
port: int, optional
An unused port number for listening, or `0` to let UCX assign
an unused port.
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
Returns
-------
Listener
The new listener. When this object is deleted, the listening stops
"""
self.continuous_ucx_progress()
if port is None:
port = 0
logger.info("create_listener() - Start listening on port %d" % port)
ret = Listener(
ucx_api.UCXListener(
worker=self.worker,
port=port,
cb_func=_listener_handler,
cb_args=(callback_func, self, endpoint_error_handling),
)
)
return ret
async def create_endpoint(self, ip_address, port, endpoint_error_handling=True):
"""Create a new endpoint to a server
Parameters
----------
ip_address: str
IP address of the server the endpoint should connect to
port: int
IP address of the server the endpoint should connect to
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
Returns
-------
Endpoint
The new endpoint
"""
self.continuous_ucx_progress()
ucx_ep = ucx_api.UCXEndpoint.create(
self.worker, ip_address, port, endpoint_error_handling
)
self.worker.progress()
# We create the Endpoint in three steps:
# 1) Generate unique IDs to use as tags
# 2) Exchange endpoint info such as tags
# 3) Use the info to create an endpoint
seed = os.urandom(16)
msg_tag = hash64bits("msg_tag", seed, ucx_ep.handle)
ctrl_tag = hash64bits("ctrl_tag", seed, ucx_ep.handle)
peer_info = await exchange_peer_info(
endpoint=ucx_ep,
msg_tag=msg_tag,
ctrl_tag=ctrl_tag,
listener=False,
)
tags = {
"msg_send": peer_info["msg_tag"],
"msg_recv": msg_tag,
"ctrl_send": peer_info["ctrl_tag"],
"ctrl_recv": ctrl_tag,
}
ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=tags)
logger.debug(
"create_endpoint() client: %s, error handling: %s, msg-tag-send: %s, "
"msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s"
% (
hex(ep._ep.handle),
endpoint_error_handling,
hex(ep._tags["msg_send"]),
hex(ep._tags["msg_recv"]),
hex(ep._tags["ctrl_send"]),
hex(ep._tags["ctrl_recv"]),
)
)
# Setup the control receive
CtrlMsg.setup_ctrl_recv(ep)
return ep
async def create_endpoint_from_worker_address(
self,
address,
endpoint_error_handling=True,
):
"""Create a new endpoint to a server
Parameters
----------
address: UCXAddress
endpoint_error_handling: boolean, optional
If `True` (default) enable endpoint error handling raising
exceptions when an error occurs, may incur in performance penalties
but prevents a process from terminating unexpectedly that may
happen when disabled. If `False` endpoint endpoint error handling
is disabled.
Returns
-------
Endpoint
The new endpoint
"""
self.continuous_ucx_progress()
ucx_ep = ucx_api.UCXEndpoint.create_from_worker_address(
self.worker,
address,
endpoint_error_handling,
)
self.worker.progress()
ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=None)
logger.debug(
"create_endpoint() client: %s, error handling: %s"
% (hex(ep._ep.handle), endpoint_error_handling)
)
return ep
def continuous_ucx_progress(self, event_loop=None):
"""Guarantees continuous UCX progress
Use this function to associate UCX progress with an event loop.
Notice, multiple event loops can be associate with UCX progress.
This function is automatically called when calling
`create_listener()` or `create_endpoint()`.
Parameters
----------
event_loop: asyncio.event_loop, optional
The event loop to evoke UCX progress. If None,
`ucp.utils.get_event_loop()` is used.
"""
loop = event_loop or get_event_loop()
if loop in self.progress_tasks:
return # Progress has already been guaranteed for the current event loop
if self.blocking_progress_mode:
task = BlockingMode(self.worker, loop, self.epoll_fd)
else:
task = NonBlockingMode(self.worker, loop)
self.progress_tasks.append(task)
def get_ucp_worker(self):
"""Returns the underlying UCP worker handle (ucp_worker_h)
as a Python integer.
"""
return self.worker.handle
def get_config(self):
"""Returns all UCX configuration options as a dict.
Returns
-------
dict
The current UCX configuration options
"""
return self.context.get_config()
def ucp_context_info(self):
"""Return low-level UCX info about this endpoint as a string"""
return self.context.info()
def ucp_worker_info(self):
"""Return low-level UCX info about this endpoint as a string"""
return self.worker.info()
def fence(self):
return self.worker.fence()
async def flush(self):
return await comm.flush_worker(self.worker)
def get_worker_address(self):
return self.worker.get_address()
def register_am_allocator(self, allocator, allocator_type):
"""Register an allocator for received Active Messages.
The allocator registered by this function is always called by the
active message receive callback when an incoming message is
available. The appropriate allocator is called depending on whether
the message received is a host message or CUDA message.
Note that CUDA messages can only be received via rendezvous, all
eager messages are received on a host object.
By default, the host allocator is `bytearray`. There is no default
CUDA allocator and one must always be registered if CUDA is used.
Parameters
----------
allocator: callable
An allocation function accepting exactly one argument, the
size of the message receives.
allocator_type: str
The type of allocator, currently supports "host" and "cuda".
"""
if allocator_type == "host":
allocator_type = ucx_api.AllocatorType.HOST
elif allocator_type == "cuda":
allocator_type = ucx_api.AllocatorType.CUDA
else:
allocator_type = ucx_api.AllocatorType.UNSUPPORTED
self.worker.register_am_allocator(allocator, allocator_type)
@ucx_api.nvtx_annotate("UCXPY_WORKER_RECV", color="red", domain="ucxpy")
async def recv(self, buffer, tag):
"""Receive directly on worker without a local Endpoint into `buffer`.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to receive into. Raise ValueError if buffer
is smaller than nbytes or read-only.
tag: hashable, optional
Set a tag that must match the received message.
"""
if not isinstance(buffer, Array):
buffer = Array(buffer)
nbytes = buffer.nbytes
log = "[Worker Recv] worker: %s, tag: %s, nbytes: %d, type: %s" % (
hex(self.worker.handle),
hex(tag),
nbytes,
type(buffer.obj),
)
logger.debug(log)
return await comm.tag_recv(self.worker, buffer, nbytes, tag, name=log)
class Listener:
"""A handle to the listening service started by `create_listener()`
The listening continues as long as this object exist or `.close()` is called.
Please use `create_listener()` to create an Listener.
"""
def __init__(self, backend):
assert backend.initialized
self._b = backend
def closed(self):
"""Is the listener closed?"""
return not self._b.initialized
@property
def ip(self):
"""The listening network IP address"""
return self._b.ip
@property
def port(self):
"""The listening network port"""
return self._b.port
def close(self):
"""Closing the listener"""
self._b.close()
class Endpoint:
"""An endpoint represents a connection to a peer
Please use `create_listener()` and `create_endpoint()`
to create an Endpoint.
"""
def __init__(self, endpoint, ctx, tags=None):
self._ep = endpoint
self._ctx = ctx
self._send_count = 0 # Number of calls to self.send()
self._recv_count = 0 # Number of calls to self.recv()
self._finished_recv_count = 0 # Number of returned (finished) self.recv() calls
self._shutting_down_peer = False # Told peer to shutdown
self._close_after_n_recv = None
self._tags = tags
@property
def uid(self):
"""The unique ID of the underlying UCX endpoint"""
return self._ep.handle
def closed(self):
"""Is this endpoint closed?"""
return self._ep is None or not self._ep.initialized or not self._ep.is_alive()
def abort(self):
"""Close the communication immediately and abruptly.
Useful in destructors or generators' ``finally`` blocks.
Notice, this functions doesn't signal the connected peer to close.
To do that, use `Endpoint.close()`
"""
if self._ep is not None:
logger.debug("Endpoint.abort(): %s" % hex(self.uid))
self._ep.close()
self._ep = None
self._ctx = None
async def close(self):
"""Close the endpoint cleanly.
This will attempt to flush outgoing buffers before actually
closing the underlying UCX endpoint.
"""
if self.closed():
self.abort()
return
try:
# Making sure we only tell peer to shutdown once
if self._shutting_down_peer:
return
self._shutting_down_peer = True
# Send a shutdown message to the peer
msg = CtrlMsg.serialize(opcode=1, close_after_n_recv=self._send_count)
msg_arr = Array(msg)
log = "[Send shutdown] ep: %s, tag: %s, close_after_n_recv: %d" % (
hex(self.uid),
hex(self._tags["ctrl_send"]),
self._send_count,
)
logger.debug(log)
try:
await comm.tag_send(
self._ep, msg_arr, msg_arr.nbytes, self._tags["ctrl_send"], name=log
)
# The peer might already be shutting down thus we can ignore any send errors
except UCXError as e:
logging.warning(
"UCX failed closing worker %s (probably already closed): %s"
% (hex(self.uid), repr(e))
)
finally:
if not self.closed():
# Give all current outstanding send() calls a chance to return
self._ctx.worker.progress()
await asyncio.sleep(0)
self.abort()
@ucx_api.nvtx_annotate("UCXPY_SEND", color="green", domain="ucxpy")
async def send(self, buffer, tag=None, force_tag=False):
"""Send `buffer` to connected peer.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to send. Raise ValueError if buffer is smaller
than nbytes.
tag: hashable, optional
tag: hashable, optional
Set a tag that the receiver must match. Currently the tag
is hashed together with the internal Endpoint tag that is
agreed with the remote end at connection time. To enforce
using the user tag, make sure to specify `force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
if not isinstance(buffer, Array):
buffer = Array(buffer)
if tag is None:
tag = self._tags["msg_send"]
elif not force_tag:
tag = hash64bits(self._tags["msg_send"], hash(tag))
nbytes = buffer.nbytes
log = "[Send #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % (
self._send_count,
hex(self.uid),
hex(tag),
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._send_count += 1
try:
return await comm.tag_send(self._ep, buffer, nbytes, tag, name=log)
except UCXCanceled as e:
# If self._ep has already been closed and destroyed, we reraise the
# UCXCanceled exception.
if self._ep is None:
raise e
@ucx_api.nvtx_annotate("UCXPY_AM_SEND", color="green", domain="ucxpy")
async def am_send(self, buffer):
"""Send `buffer` to connected peer.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to send. Raise ValueError if buffer is smaller
than nbytes.
"""
if self.closed():
raise UCXCloseError("Endpoint closed")
if not isinstance(buffer, Array):
buffer = Array(buffer)
nbytes = buffer.nbytes
log = "[AM Send #%03d] ep: %s, nbytes: %d, type: %s" % (
self._send_count,
hex(self.uid),
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._send_count += 1
return await comm.am_send(self._ep, buffer, nbytes, name=log)
@ucx_api.nvtx_annotate("UCXPY_RECV", color="red", domain="ucxpy")
async def recv(self, buffer, tag=None, force_tag=False):
"""Receive from connected peer into `buffer`.
Parameters
----------
buffer: exposing the buffer protocol or array/cuda interface
The buffer to receive into. Raise ValueError if buffer
is smaller than nbytes or read-only.
tag: hashable, optional
Set a tag that must match the received message. Currently
the tag is hashed together with the internal Endpoint tag
that is agreed with the remote end at connection time.
To enforce using the user tag, make sure to specify
`force_tag=True`.
force_tag: bool
If true, force using `tag` as is, otherwise the value
specified with `tag` (if any) will be hashed with the
internal Endpoint tag.
"""
if tag is None:
tag = self._tags["msg_recv"]
elif not force_tag:
tag = hash64bits(self._tags["msg_recv"], hash(tag))
if not self._ctx.worker.tag_probe(tag):
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
if not isinstance(buffer, Array):
buffer = Array(buffer)
nbytes = buffer.nbytes
log = "[Recv #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % (
self._recv_count,
hex(self.uid),
hex(tag),
nbytes,
type(buffer.obj),
)
logger.debug(log)
self._recv_count += 1
ret = await comm.tag_recv(self._ep, buffer, nbytes, tag, name=log)
self._finished_recv_count += 1
if (
self._close_after_n_recv is not None
and self._finished_recv_count >= self._close_after_n_recv
):
self.abort()
return ret
@ucx_api.nvtx_annotate("UCXPY_AM_RECV", color="red", domain="ucxpy")
async def am_recv(self):
"""Receive from connected peer."""
if not self._ep.am_probe():
self._ep.raise_on_error()
if self.closed():
raise UCXCloseError("Endpoint closed")
log = "[AM Recv #%03d] ep: %s" % (self._recv_count, hex(self.uid))
logger.debug(log)
self._recv_count += 1
ret = await comm.am_recv(self._ep, name=log)
self._finished_recv_count += 1
if (
self._close_after_n_recv is not None
and self._finished_recv_count >= self._close_after_n_recv
):
self.abort()
return ret
def cuda_support(self):
"""Return whether UCX is configured with CUDA support or not"""
return self._ctx.context.cuda_support
def get_ucp_worker(self):
"""Returns the underlying UCP worker handle (ucp_worker_h)
as a Python integer.
"""
return self._ctx.worker.handle
def get_ucp_endpoint(self):
"""Returns the underlying UCP endpoint handle (ucp_ep_h)
as a Python integer.
"""
return self._ep.handle
def ucx_info(self):
"""Return low-level UCX info about this endpoint as a string"""
return self._ep.info()
def close_after_n_recv(self, n, count_from_ep_creation=False):
"""Close the endpoint after `n` received messages.
Parameters
----------
n: int
Number of messages to received before closing the endpoint.
count_from_ep_creation: bool, optional
Whether to count `n` from this function call (default) or
from the creation of the endpoint.
"""
if not count_from_ep_creation:
n += self._finished_recv_count # Make `n` absolute
if self._close_after_n_recv is not None:
raise UCXError(
"close_after_n_recv has already been set to: %d (abs)"
% self._close_after_n_recv
)
if n == self._finished_recv_count:
self.abort()
elif n > self._finished_recv_count:
self._close_after_n_recv = n
else:
raise UCXError(
"`n` cannot be less than current recv_count: %d (abs) < %d (abs)"
% (n, self._finished_recv_count)
)
async def send_obj(self, obj, tag=None):
"""Send `obj` to connected peer that calls `recv_obj()`.
The transfer includes an extra message containing the size of `obj`,
which increases the overhead slightly.
Parameters
----------
obj: exposing the buffer protocol or array/cuda interface
The object to send.
tag: hashable, optional
Set a tag that the receiver must match.
Example
-------
>>> await ep.send_obj(pickle.dumps([1,2,3]))
"""
if not isinstance(obj, Array):
obj = Array(obj)
nbytes = Array(array.array("Q", [obj.nbytes]))
await self.send(nbytes, tag=tag)
await self.send(obj, tag=tag)
async def recv_obj(self, tag=None, allocator=bytearray):
"""Receive from connected peer that calls `send_obj()`.
As opposed to `recv()`, this function returns the received object.
Data is received into a buffer allocated by `allocator`.
The transfer includes an extra message containing the size of `obj`,
which increses the overhead slightly.
Parameters
----------
tag: hashable, optional
Set a tag that must match the received message. Notice, currently
UCX-Py doesn't support a "any tag" thus `tag=None` only matches a
send that also sets `tag=None`.
allocator: callabale, optional
Function to allocate the received object. The function should
take the number of bytes to allocate as input and return a new
buffer of that size as output.
Example
-------
>>> await pickle.loads(ep.recv_obj())
"""
nbytes = array.array("Q", [0])
await self.recv(nbytes, tag=tag)
nbytes = nbytes[0]
ret = allocator(nbytes)
await self.recv(ret, tag=tag)
return ret
async def flush(self):
logger.debug("[Flush] ep: %s" % (hex(self.uid)))
return await comm.flush_ep(self._ep)
def set_close_callback(self, callback_func):
"""Register a user callback function to be called on Endpoint's closing.
Allows the user to register a callback function to be called when the
Endpoint's error callback is called, or during its finalizer if the error
callback is never called.
Once the callback is called, it's not possible to send any more messages.
However, receiving messages may still be possible, as UCP may still have
incoming messages in transit.
Parameters
----------
callback_func: callable
The callback function to be called when the Endpoint's error callback
is called, otherwise called on its finalizer.
Example
>>> ep.set_close_callback(lambda: print("Executing close callback"))
"""
self._ep.set_close_callback(callback_func)
# The following functions initialize and use a single ApplicationContext instance
def init(options={}, env_takes_precedence=False, blocking_progress_mode=None):
"""Initiate UCX.
Usually this is done automatically at the first API call
but this function makes it possible to set UCX options programmable.
Alternatively, UCX options can be specified through environment variables.
Parameters
----------
options: dict, optional
UCX options send to the underlying UCX library
env_takes_precedence: bool, optional
Whether environment variables takes precedence over the `options`
specified here.
blocking_progress_mode: bool, optional
If None, blocking UCX progress mode is used unless the environment variable
`UCXPY_NON_BLOCKING_MODE` is defined.
Otherwise, if True blocking mode is used and if False non-blocking mode is used.
"""
global _ctx
if _ctx is not None:
raise RuntimeError(
"UCX is already initiated. Call reset() and init() "
"in order to re-initate UCX with new options."
)
options = options.copy()
for k, v in options.items():
env_k = f"UCX_{k}"
env_v = os.environ.get(env_k)
if env_v is not None:
if env_takes_precedence:
options[k] = env_v
logger.debug(
f"Ignoring option {k}={v}; using environment {env_k}={env_v}"
)
else:
logger.debug(
f"Ignoring environment {env_k}={env_v}; using option {k}={v}"
)
_ctx = ApplicationContext(options, blocking_progress_mode=blocking_progress_mode)
def reset():
"""Resets the UCX library by shutting down all of UCX.
The library is initiated at next API call.
"""
global _ctx
if _ctx is not None:
weakref_ctx = weakref.ref(_ctx)
_ctx = None
gc.collect()
if weakref_ctx() is not None:
msg = (
"Trying to reset UCX but not all Endpoints and/or Listeners "
"are closed(). The following objects are still referencing "
"ApplicationContext: "
)
for o in gc.get_referrers(weakref_ctx()):
msg += "\n %s" % str(o)
raise UCXError(msg)
def get_ucx_version():
"""Return the version of the underlying UCX installation
Notice, this function doesn't initialize UCX.
Returns
-------
tuple
The version as a tuple e.g. (1, 7, 0)
"""
return ucx_api.get_ucx_version()
def progress():
"""Try to progress the communication layer
Warning, it is illegal to call this from a call-back function such as
the call-back function given to create_listener.
"""
return _get_ctx().worker.progress()
def get_config():
"""Returns all UCX configuration options as a dict.
If UCX is uninitialized, the options returned are the
options used if UCX were to be initialized now.
Notice, this function doesn't initialize UCX.
Returns
-------
dict
The current UCX configuration options
"""
if _ctx is None:
return ucx_api.get_current_options()
else:
return _get_ctx().get_config()
def register_am_allocator(allocator, allocator_type):
return _get_ctx().register_am_allocator(allocator, allocator_type)
def create_listener(callback_func, port=None, endpoint_error_handling=True):
return _get_ctx().create_listener(
callback_func,
port,
endpoint_error_handling=endpoint_error_handling,
)
async def create_endpoint(ip_address, port, endpoint_error_handling=True):
return await _get_ctx().create_endpoint(
ip_address,
port,
endpoint_error_handling=endpoint_error_handling,
)
async def create_endpoint_from_worker_address(
address,
endpoint_error_handling=True,
):
return await _get_ctx().create_endpoint_from_worker_address(
address,
endpoint_error_handling=endpoint_error_handling,
)
def continuous_ucx_progress(event_loop=None):
_get_ctx().continuous_ucx_progress(event_loop=event_loop)
def get_ucp_worker():
return _get_ctx().get_ucp_worker()
def get_worker_address():
return _get_ctx().get_worker_address()
def get_ucx_address_from_buffer(buffer):
return ucx_api.UCXAddress.from_buffer(buffer)
async def recv(buffer, tag):
return await _get_ctx().recv(buffer, tag=tag)
def get_ucp_context_info():
"""Gets information on the current UCX context, obtained from
`ucp_context_print_info`.
"""
return _get_ctx().ucp_context_info()
def get_ucp_worker_info():
"""Gets information on the current UCX worker, obtained from
`ucp_worker_print_info`.
"""
return _get_ctx().ucp_worker_info()
def get_active_transports():
"""Returns a list of all transports that are available and are currently
active in UCX, meaning UCX **may** use them depending on the type of
transfers and how it is configured but is not required to do so.
"""
info = get_ucp_context_info()
resources = re.findall("^#.*resource.*md.*dev.*flags.*$", info, re.MULTILINE)
return set([r.split()[-1].split("/")[0] for r in resources])
async def flush():
"""Flushes outstanding AMO and RMA operations. This ensures that the
operations issued on this worker have completed both locally and remotely.
This function does not guarantee ordering.
"""
if _ctx is not None:
return await _get_ctx().flush()
else:
# If ctx is not initialized we still want to do the right thing by asyncio
return await asyncio.sleep(0)
def fence():
"""Ensures ordering of non-blocking communication operations on the UCP worker.
This function returns nothing, but will raise an error if it cannot make
this guarantee. This function does not ensure any operations have completed.
"""
if _ctx is not None:
_get_ctx().fence()
# Setting the __doc__
create_listener.__doc__ = ApplicationContext.create_listener.__doc__
create_endpoint.__doc__ = ApplicationContext.create_endpoint.__doc__
continuous_ucx_progress.__doc__ = ApplicationContext.continuous_ucx_progress.__doc__
get_ucp_worker.__doc__ = ApplicationContext.get_ucp_worker.__doc__
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/exceptions.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
from ._libs.exceptions import * # noqa
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/_version.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.resources
__version__ = importlib.resources.files("ucp").joinpath("VERSION").read_text().strip()
__git_commit__ = ""
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/comm.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
import asyncio
from typing import Union
from ._libs import arr, ucx_api
from .utils import get_event_loop
def _cb_func(request, exception, event_loop, future):
if event_loop.is_closed() or future.done():
return
if exception is not None:
future.set_exception(exception)
else:
future.set_result(True)
def _call_ucx_api(event_loop, func, *args, **kwargs):
"""Help function to avoid duplicated code.
Basically, all the communication functions have the
same structure, which this wrapper implements.
"""
event_loop = event_loop or get_event_loop()
ret = event_loop.create_future()
# All the comm functions takes the call-back function and its arguments
kwargs["cb_func"] = _cb_func
kwargs["cb_args"] = (event_loop, ret)
req = func(*args, **kwargs)
if req is None and not ret.done():
ret.set_result(True)
return ret
def _am_cb_func(recv_obj, exception, event_loop, future):
if event_loop.is_closed() or future.done():
return
if exception is not None:
future.set_exception(exception)
else:
future.set_result(recv_obj)
def tag_send(
ep: ucx_api.UCXEndpoint,
buffer: arr.Array,
nbytes: int,
tag: int,
name="tag_send",
event_loop=None,
) -> asyncio.Future:
return _call_ucx_api(
event_loop, ucx_api.tag_send_nb, ep, buffer, nbytes, tag, name=name
)
def am_send(
ep: ucx_api.UCXEndpoint,
buffer: arr.Array,
nbytes: int,
name="am_send",
event_loop=None,
) -> asyncio.Future:
return _call_ucx_api(event_loop, ucx_api.am_send_nbx, ep, buffer, nbytes, name=name)
def stream_send(
ep: ucx_api.UCXEndpoint,
buffer: arr.Array,
nbytes: int,
name="stream_send",
event_loop=None,
) -> asyncio.Future:
return _call_ucx_api(
event_loop, ucx_api.stream_send_nb, ep, buffer, nbytes, name=name
)
def tag_recv(
obj: Union[ucx_api.UCXEndpoint, ucx_api.UCXWorker],
buffer: arr.Array,
nbytes: int,
tag: int,
name="tag_recv",
event_loop=None,
) -> asyncio.Future:
worker = obj if isinstance(obj, ucx_api.UCXWorker) else obj.worker
ep = obj if isinstance(obj, ucx_api.UCXEndpoint) else None
return _call_ucx_api(
event_loop,
ucx_api.tag_recv_nb,
worker,
buffer,
nbytes,
tag,
name=name,
ep=ep,
)
def am_recv(
ep: ucx_api.UCXEndpoint,
name="am_recv",
event_loop=None,
) -> asyncio.Future:
event_loop = event_loop or get_event_loop()
ret = event_loop.create_future()
# All the comm functions takes the call-back function and its arguments
cb_args = (event_loop, ret)
ucx_api.am_recv_nb(ep, cb_func=_am_cb_func, cb_args=cb_args, name=name)
return ret
def stream_recv(
ep: ucx_api.UCXEndpoint,
buffer: arr.Array,
nbytes: int,
name="stream_recv",
event_loop=None,
) -> asyncio.Future:
return _call_ucx_api(
event_loop, ucx_api.stream_recv_nb, ep, buffer, nbytes, name=name
)
def flush_worker(worker: ucx_api.UCXWorker, event_loop=None) -> asyncio.Future:
return _call_ucx_api(event_loop, worker.flush)
def flush_ep(ep: ucx_api.UCXEndpoint, event_loop=None) -> asyncio.Future:
return _call_ucx_api(event_loop, ep.flush)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/endpoint_reuse.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import ctypes
import pickle
import uuid
from . import core
class EPHandle:
def __init__(self, ep):
self.ep = ep
self.refcount = 1
class EndpointReuse:
"""Class to seamlessly reuse endpoints.
It uses the tag feature of send/recv to separate "virtual" endpoint
pairs from each other.
Warning
-------
When closing a reused endpoint, the peer might not be notified.
Performance
-----------
The overhead of creating endpoints is increased but the performance
of the created connections is the same.
Connection Protocol
-------------------
1) Client connect to server using a new endpoint.
2) Client send the IDs of all its existing endpoints.
3) Server receives the IDs from the client and checks if it has a
matching existing endpoint. It then sends the matching ID to the
client or zero if no match.
4) The client and server now continue with either the existing matching
endpoints or the new endpoints (which are registered for later reuse).
"""
existing_endpoints = {}
def __init__(self, handle, tag):
self.handle = handle
self.tag = tag
@classmethod
async def create_endpoint(cls, ip, port):
tag = ctypes.c_uint32(uuid.uuid4().int).value
ep_new = await core.create_endpoint(ip, port)
existing_endpoints = list(cls.existing_endpoints.values())
my_ep_ids = []
for ep in existing_endpoints:
if not ep.ep.closed():
ep.refcount += 1
my_ep_ids.append(ep.ep._tags["msg_recv"])
await ep_new.send_obj(pickle.dumps((my_ep_ids, tag)))
reuse_ep_id = pickle.loads(await ep_new.recv_obj())
for ep in existing_endpoints:
if not ep.ep.closed():
ep.refcount -= 1
if ep.refcount == 0:
await ep.ep.close()
if reuse_ep_id:
reuse_ep = cls.existing_endpoints[reuse_ep_id]
reuse_ep.refcount += 1
await ep_new.close()
else:
reuse_ep = EPHandle(ep_new)
assert ep_new._tags["msg_send"] not in cls.existing_endpoints
cls.existing_endpoints[ep_new._tags["msg_send"]] = reuse_ep
return cls(reuse_ep, tag)
@classmethod
def create_listener(cls, cb_coroutine, port):
async def _handle(ep_new):
peers_ep_ids, tag = pickle.loads(await ep_new.recv_obj())
existing_ep = None
for peers_ep_id in peers_ep_ids:
existing_ep = cls.existing_endpoints.get(peers_ep_id)
if existing_ep is not None and not existing_ep.ep.closed():
break
if existing_ep:
existing_ep.refcount += 1
await ep_new.send_obj(pickle.dumps(existing_ep.ep._tags["msg_recv"]))
await ep_new.close()
else:
await ep_new.send_obj(pickle.dumps(None))
existing_ep = EPHandle(ep_new)
assert ep_new._tags["msg_send"] not in cls.existing_endpoints
cls.existing_endpoints[ep_new._tags["msg_send"]] = existing_ep
await cb_coroutine(cls(existing_ep, tag))
return core.create_listener(_handle, port=port)
async def send(self, buffer):
await self.handle.ep.send(buffer, tag=self.tag)
async def recv(self, buffer):
await self.handle.ep.recv(buffer, tag=self.tag)
async def close(self):
if self.closed():
return
self.handle.refcount -= 1
if self.handle.refcount == 0:
h = self.handle
self.handle = None
await h.ep.close()
def closed(self):
return self.handle is None or self.handle.ep.closed()
def abort(self):
if self.closed():
return
self.handle.refcount -= 1
if self.handle.refcount == 0:
self.handle.ep.abort()
self.handle = None
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/__init__.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
"""UCX-Py: Python bindings for UCX <www.openucx.org>"""
import logging
import os
logger = logging.getLogger("ucx")
# Notice, if we have to update environment variables we need to do it
# before importing UCX, which must happen also before the Cython code
# import to prevent UCS unused variable warnings.
if "UCX_MEMTYPE_CACHE" not in os.environ:
# See <https://github.com/openucx/ucx/wiki/NVIDIA-GPU-Support#known-issues>
logger.debug("Setting env UCX_MEMTYPE_CACHE=n, which is required by UCX")
os.environ["UCX_MEMTYPE_CACHE"] = "n"
from .core import * # noqa
from .core import get_ucx_version # noqa
from .utils import get_ucxpy_logger # noqa
from ._libs.utils import get_address # noqa
from ._version import __git_commit__, __version__
try:
import pynvml
except ImportError:
pynvml = None
# Setup UCX-Py logger
logger = get_ucxpy_logger()
if "UCX_RNDV_THRESH" not in os.environ:
logger.info("Setting UCX_RNDV_THRESH=8192")
os.environ["UCX_RNDV_THRESH"] = "8192"
if "UCX_RNDV_FRAG_MEM_TYPE" not in os.environ:
logger.info("Setting UCX_RNDV_FRAG_MEM_TYPE=cuda")
os.environ["UCX_RNDV_FRAG_MEM_TYPE"] = "cuda"
if (
pynvml is not None
and "UCX_CUDA_COPY_MAX_REG_RATIO" not in os.environ
and get_ucx_version() >= (1, 12, 0)
):
try:
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
large_bar1 = [False] * device_count
def _is_mig_device(handle):
try:
pynvml.nvmlDeviceGetMigMode(handle)[0]
except pynvml.NVMLError:
return False
return True
for dev_idx in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(dev_idx)
# Ignore MIG devices and use rely on UCX's default for now. Increasing
# `UCX_CUDA_COPY_MAX_REG_RATIO` should be thoroughly tested, as it's
# not yet clear whether it would be safe to set `1.0` for those
# instances too.
if _is_mig_device(handle):
continue
try:
bar1_total = pynvml.nvmlDeviceGetBAR1MemoryInfo(handle).bar1Total
except pynvml.nvml.NVMLError_NotSupported:
# Bar1 access not supported on this device, set it to
# zero (always lower than device memory).
bar1_total = 0
total_memory = pynvml.nvmlDeviceGetMemoryInfo(handle).total
if total_memory <= bar1_total:
large_bar1[dev_idx] = True
if all(large_bar1):
logger.info("Setting UCX_CUDA_COPY_MAX_REG_RATIO=1.0")
os.environ["UCX_CUDA_COPY_MAX_REG_RATIO"] = "1.0"
except (
pynvml.NVMLError_LibraryNotFound,
pynvml.NVMLError_DriverNotLoaded,
pynvml.NVMLError_Unknown,
):
pass
if "UCX_MAX_RNDV_RAILS" not in os.environ and get_ucx_version() >= (1, 12, 0):
logger.info("Setting UCX_MAX_RNDV_RAILS=1")
os.environ["UCX_MAX_RNDV_RAILS"] = "1"
__ucx_version__ = "%d.%d.%d" % get_ucx_version()
if get_ucx_version() < (1, 11, 1):
raise ImportError(
f"Support for UCX {__ucx_version__} has ended. Please upgrade to "
"1.11.1 or newer."
)
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/continuous_ucx_progress.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import asyncio
import socket
import weakref
class ProgressTask(object):
def __init__(self, worker, event_loop):
"""Creates a task that keeps calling worker.progress()
Notice, class and created task is carefull not to hold a
reference to `worker` so that a danling progress task will
not prevent `worker` to be garbage collected.
Parameters
----------
worker: UCXWorker
The UCX worker context to progress
event_loop: asyncio.EventLoop
The event loop to do progress in.
"""
self.weakref_worker = weakref.ref(worker)
self.event_loop = event_loop
self.asyncio_task = None
def __del__(self):
if self.asyncio_task is not None:
self.asyncio_task.cancel()
# Hash and equality is based on the event loop
def __hash__(self):
return hash(self.event_loop)
def __eq__(self, other):
return hash(self) == hash(other)
class NonBlockingMode(ProgressTask):
def __init__(self, worker, event_loop):
super().__init__(worker, event_loop)
self.asyncio_task = event_loop.create_task(self._progress_task())
async def _progress_task(self):
"""This helper function maintains a UCX progress loop."""
while True:
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
del worker
# Give other co-routines a chance to run.
await asyncio.sleep(0)
class BlockingMode(ProgressTask):
def __init__(self, worker, event_loop, epoll_fd):
super().__init__(worker, event_loop)
# Creating a job that is ready straightaway but with low priority.
# Calling `await self.event_loop.sock_recv(self.rsock, 1)` will
# return when all non-IO tasks are finished.
# See <https://stackoverflow.com/a/48491563>.
self.rsock, wsock = socket.socketpair()
self.rsock.setblocking(0)
wsock.setblocking(0)
wsock.close()
# Bind an asyncio reader to a UCX epoll file descripter
event_loop.add_reader(epoll_fd, self._fd_reader_callback)
# Remove the reader and close socket on finalization
weakref.finalize(self, event_loop.remove_reader, epoll_fd)
weakref.finalize(self, self.rsock.close)
def _fd_reader_callback(self):
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
# Notice, we can safely overwrite `self.dangling_arm_task`
# since previous arm task is finished by now.
assert self.asyncio_task is None or self.asyncio_task.done()
self.asyncio_task = self.event_loop.create_task(self._arm_worker())
async def _arm_worker(self):
# When arming the worker, the following must be true:
# - No more progress in UCX (see doc of ucp_worker_arm())
# - All asyncio tasks that isn't waiting on UCX must be executed
# so that the asyncio's next state is epoll wait.
# See <https://github.com/rapidsai/ucx-py/issues/413>
while True:
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
worker.progress()
# Cancel inflight messages that couldn't be completed. This may
# happen if the user called ep.recv() but the remote worker
# errored before sending the message.
if worker.cancel_inflight_messages() > 0:
worker.progress()
del worker
# This IO task returns when all non-IO tasks are finished.
# Notice, we do NOT hold a reference to `worker` while waiting.
await self.event_loop.sock_recv(self.rsock, 1)
worker = self.weakref_worker()
if worker is None or not worker.initialized:
return
if worker.arm():
# At this point we know that asyncio's next state is
# epoll wait.
break
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/utils.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import asyncio
import hashlib
import logging
import multiprocessing as mp
import os
import socket
import time
import numpy as np
mp = mp.get_context("spawn")
def get_event_loop():
"""
Get running or create new event loop
In Python 3.10, the behavior of `get_event_loop()` is deprecated and in
the future it will be an alias of `get_running_loop()`. In several
situations, UCX-Py needs to create a new event loop, so this function
will remain for now as an alternative to the behavior of `get_event_loop()`
from Python < 3.10, returning the `get_running_loop()` if an event loop
exists, or returning a new one with `new_event_loop()` otherwise.
"""
try:
return asyncio.get_running_loop()
except RuntimeError:
return asyncio.new_event_loop()
def get_ucxpy_logger():
"""
Get UCX-Py logger with custom formatting
Returns
-------
logger : logging.Logger
Logger object
Examples
--------
>>> logger = get_ucxpy_logger()
>>> logger.warning("Test")
[1585175070.2911468] [dgx12:1054] UCXPY WARNING Test
"""
_level_enum = logging.getLevelName(os.getenv("UCXPY_LOG_LEVEL", "WARNING"))
logger = logging.getLogger("ucx")
# Avoid duplicate logging
logger.propagate = False
class LoggingFilter(logging.Filter):
def filter(self, record):
record.hostname = socket.gethostname()
record.timestamp = str("%.6f" % time.time())
return True
formatter = logging.Formatter(
"[%(timestamp)s] [%(hostname)s:%(process)d] UCXPY %(levelname)s %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.addFilter(LoggingFilter())
logger.addHandler(handler)
logger.setLevel(_level_enum)
return logger
def hash64bits(*args):
"""64 bit unsigned hash of `args`"""
# 64 bits hexdigest
h = hashlib.sha1(bytes(repr(args), "utf-8")).hexdigest()[:16]
# Convert to an integer and return
return int(h, 16)
def hmean(a):
"""Harmonic mean"""
if len(a):
return 1 / np.mean(1 / a)
else:
return 0
| 0 |
rapidsai_public_repos/ucx-py
|
rapidsai_public_repos/ucx-py/ucp/VERSION
|
0.36.0
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/cudf_merge.py
|
"""
Benchmark send receive on one machine
"""
import argparse
import asyncio
import cProfile
import gc
import io
import os
import pickle
import pstats
import sys
import tempfile
from time import monotonic as clock
import cupy
import numpy as np
import ucp
from ucp._libs.utils import (
format_bytes,
format_time,
print_multi,
print_separator,
)
from ucp.benchmarks.asyncssh import run_ssh_cluster
from ucp.benchmarks.utils import (
_run_cluster_server,
_run_cluster_workers,
run_cluster_server,
run_cluster_workers,
)
from ucp.utils import hmean
# Must be set _before_ importing RAPIDS libraries (cuDF, RMM)
os.environ["RAPIDS_NO_INITIALIZE"] = "True"
import cudf # noqa: E402
import rmm # noqa: E402
from rmm.allocators.cupy import rmm_cupy_allocator # noqa: E402
def sizeof_cudf_dataframe(df):
return int(
sum(col.memory_usage for col in df._data.columns) + df._index.memory_usage()
)
async def send_df(ep, df):
header, frames = df.serialize()
header["frame_ifaces"] = [f.__cuda_array_interface__ for f in frames]
header = pickle.dumps(header)
header_nbytes = np.array([len(header)], dtype=np.uint64)
await ep.send(header_nbytes)
await ep.send(header)
for frame in frames:
await ep.send(frame)
async def recv_df(ep):
header_nbytes = np.empty((1,), dtype=np.uint64)
await ep.recv(header_nbytes)
header = bytearray(header_nbytes[0])
await ep.recv(header)
header = pickle.loads(header)
frames = [
cupy.empty(iface["shape"], dtype=iface["typestr"])
for iface in header["frame_ifaces"]
]
for frame in frames:
await ep.recv(frame)
cudf_typ = pickle.loads(header["type-serialized"])
return cudf_typ.deserialize(header, frames)
async def barrier(rank, eps):
if rank == 0:
await asyncio.gather(*[ep.recv(np.empty(1, dtype="u1")) for ep in eps.values()])
else:
await eps[0].send(np.zeros(1, dtype="u1"))
async def send_bins(eps, bins):
futures = []
for rank, ep in eps.items():
futures.append(send_df(ep, bins[rank]))
await asyncio.gather(*futures)
async def recv_bins(eps, bins):
futures = []
for ep in eps.values():
futures.append(recv_df(ep))
bins.extend(await asyncio.gather(*futures))
async def exchange_and_concat_bins(rank, eps, bins, timings=None):
ret = [bins[rank]]
if timings is not None:
t1 = clock()
await asyncio.gather(recv_bins(eps, ret), send_bins(eps, bins))
if timings is not None:
t2 = clock()
timings.append(
(
t2 - t1,
sum(
[sizeof_cudf_dataframe(b) for i, b in enumerate(bins) if i != rank]
),
)
)
return cudf.concat(ret)
async def distributed_join(args, rank, eps, left_table, right_table, timings=None):
left_bins = left_table.partition_by_hash(["key"], args.n_chunks)
right_bins = right_table.partition_by_hash(["key"], args.n_chunks)
left_df = await exchange_and_concat_bins(rank, eps, left_bins, timings)
right_df = await exchange_and_concat_bins(rank, eps, right_bins, timings)
return left_df.merge(right_df, on="key")
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match):
cupy.random.seed(42)
if chunk_type == "build":
# Build dataframe
#
# "key" column is a unique sample within [0, local_size * num_chunks)
#
# "shuffle" column is a random selection of partitions (used for shuffle)
#
# "payload" column is a random permutation of the chunk_size
start = local_size * i_chunk
stop = start + local_size
df = cudf.DataFrame(
{
"key": cupy.arange(start, stop=stop, dtype="int64"),
"payload": cupy.arange(local_size, dtype="int64"),
}
)
else:
# Other dataframe
#
# "key" column matches values from the build dataframe
# for a fraction (`frac_match`) of the entries. The matching
# entries are perfectly balanced across each partition of the
# "base" dataframe.
#
# "payload" column is a random permutation of the chunk_size
# Step 1. Choose values that DO match
sub_local_size = local_size // num_chunks
sub_local_size_use = max(int(sub_local_size * frac_match), 1)
arrays = []
for i in range(num_chunks):
bgn = (local_size * i) + (sub_local_size * i_chunk)
end = bgn + sub_local_size
ar = cupy.arange(bgn, stop=end, dtype="int64")
arrays.append(cupy.random.permutation(ar)[:sub_local_size_use])
key_array_match = cupy.concatenate(tuple(arrays), axis=0)
# Step 2. Add values that DON'T match
missing_size = local_size - key_array_match.shape[0]
start = local_size * num_chunks + local_size * i_chunk
stop = start + missing_size
key_array_no_match = cupy.arange(start, stop=stop, dtype="int64")
# Step 3. Combine and create the final dataframe chunk
key_array_combine = cupy.concatenate(
(key_array_match, key_array_no_match), axis=0
)
df = cudf.DataFrame(
{
"key": cupy.random.permutation(key_array_combine),
"payload": cupy.arange(local_size, dtype="int64"),
}
)
return df
def _get_server_command(args, num_workers):
cmd_args = " ".join(
[
"--server",
f"--devs {args.devs}",
f"--chunks-per-dev {args.chunks_per_dev}",
f"--chunk-size {args.chunk_size}",
f"--frac-match {args.frac_match}",
f"--iter {args.iter}",
f"--warmup-iter {args.warmup_iter}",
f"--num-workers {num_workers}",
]
)
return f"{sys.executable} -m ucp.benchmarks.cudf_merge {cmd_args}"
def _get_worker_command_without_address(
args,
num_workers,
node_idx,
):
cmd_list = [
f"--devs {args.devs}",
f"--chunks-per-dev {args.chunks_per_dev}",
f"--chunk-size {args.chunk_size}",
f"--frac-match {args.frac_match}",
f"--iter {args.iter}",
f"--warmup-iter {args.warmup_iter}",
f"--num-workers {num_workers}",
f"--node-idx {node_idx}",
]
if args.rmm_init_pool_size:
cmd_list.append(f"--rmm-init-pool-size {args.rmm_init_pool_size}")
if args.profile:
cmd_list.append(f"--profile {args.profile}")
if args.cuda_profile:
cmd_list.append("--cuda-profile")
if args.collect_garbage:
cmd_list.append("--collect-garbage")
cmd_args = " ".join(cmd_list)
return f"{sys.executable} -m ucp.benchmarks.cudf_merge {cmd_args}"
def _get_worker_command(
server_info,
args,
num_workers,
node_idx,
):
server_address = f"{server_info['address']}:{server_info['port']}"
worker_cmd = _get_worker_command_without_address(args, num_workers, node_idx)
worker_cmd += f" --server-address {server_address}"
return worker_cmd
async def worker(rank, eps, args):
# Setting current device and make RMM use it
rmm.reinitialize(pool_allocator=True, initial_pool_size=args.rmm_init_pool_size)
# Make cupy use RMM
cupy.cuda.set_allocator(rmm_cupy_allocator)
df1 = generate_chunk(rank, args.chunk_size, args.n_chunks, "build", args.frac_match)
df2 = generate_chunk(rank, args.chunk_size, args.n_chunks, "other", args.frac_match)
# Let's warmup and sync before benchmarking
for i in range(args.warmup_iter):
await distributed_join(args, rank, eps, df1, df2)
await barrier(rank, eps)
if args.collect_garbage:
gc.collect()
if args.cuda_profile:
cupy.cuda.profiler.start()
if args.profile:
pr = cProfile.Profile()
pr.enable()
iter_results = {"bw": [], "wallclock": [], "throughput": [], "data_processed": []}
timings = []
t1 = clock()
for i in range(args.iter):
iter_timings = []
iter_t = clock()
ret = await distributed_join(args, rank, eps, df1, df2, iter_timings)
await barrier(rank, eps)
iter_took = clock() - iter_t
# Ensure the number of matches falls within `args.frac_match` +/- 2%.
# Small chunk sizes may not have enough matches, skip check for chunks
# smaller than 100k.
if args.chunk_size >= 100_000:
expected_len = args.chunk_size * args.frac_match
expected_len_err = expected_len * 0.02
assert abs(len(ret) - expected_len) <= expected_len_err
if args.collect_garbage:
gc.collect()
iter_bw = sum(t[1] for t in iter_timings) / sum(t[0] for t in iter_timings)
iter_data_processed = len(df1) * sum([t.itemsize for t in df1.dtypes])
iter_data_processed += len(df2) * sum([t.itemsize for t in df2.dtypes])
iter_throughput = args.n_chunks * iter_data_processed / iter_took
iter_results["bw"].append(iter_bw)
iter_results["wallclock"].append(iter_took)
iter_results["throughput"].append(iter_throughput)
iter_results["data_processed"].append(iter_data_processed)
timings += iter_timings
took = clock() - t1
if args.profile:
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.dump_stats("%s.%0d" % (args.profile, rank))
if args.cuda_profile:
cupy.cuda.profiler.stop()
data_processed = len(df1) * sum([t.itemsize * args.iter for t in df1.dtypes])
data_processed += len(df2) * sum([t.itemsize * args.iter for t in df2.dtypes])
return {
"bw": sum(t[1] for t in timings) / sum(t[0] for t in timings),
"wallclock": took,
"throughput": args.n_chunks * data_processed / took,
"data_processed": data_processed,
"iter_results": iter_results,
}
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--chunks-per-dev",
metavar="N",
default=1,
type=int,
help="Number of chunks per device",
)
parser.add_argument(
"-d",
"--devs",
metavar="LIST",
default="0",
type=str,
help='GPU devices to use (default "0").',
)
parser.add_argument(
"-l",
"--listen-address",
metavar="ip",
default=ucp.get_address(),
type=str,
help="Server listen address (default `ucp.get_address()`).",
)
parser.add_argument("-c", "--chunk-size", type=int, default=4, metavar="N")
parser.add_argument(
"--frac-match",
metavar="FRAC",
default=0.3,
type=float,
help="Fraction of rows that matches (default 0.3)",
)
parser.add_argument(
"--profile",
metavar="FILENAME",
default=None,
type=str,
help="Write profile for each worker to `filename.RANK`",
)
parser.add_argument(
"--cuda-profile",
default=False,
action="store_true",
help="Enable CUDA profiling, use with `nvprof --profile-child-processes \
--profile-from-start off`",
)
parser.add_argument(
"--rmm-init-pool-size",
metavar="BYTES",
default=None,
type=int,
help="Initial RMM pool size (default 1/2 total GPU memory)",
)
parser.add_argument(
"--collect-garbage",
default=False,
action="store_true",
help="Trigger Python garbage collection after each iteration.",
)
parser.add_argument(
"--iter",
default=1,
type=int,
help="Number of benchmark iterations.",
)
parser.add_argument(
"--warmup-iter",
default=5,
type=int,
help="Number of warmup iterations.",
)
parser.add_argument(
"--server",
default=False,
action="store_true",
help="Run server only.",
)
parser.add_argument(
"--server-file",
type=str,
help="File to store server's address (if `--server` is specified) or to "
"read its address from otherwise.",
)
parser.add_argument(
"--server-address",
type=str,
help="Address where server is listening, in the IP:PORT or HOST:PORT "
"format. Only to be used to connect to a remote server started with "
"`--server`.",
)
parser.add_argument(
"--num-workers",
type=int,
help="Number of workers in the entire cluster, mandatory when "
"`--server` is specified. This number can be calculated as: "
"`number_of_devices_per_node * number_of_nodes * chunks_per_device`.",
)
parser.add_argument(
"--node-idx",
type=int,
help="On a multi-node setup, specify the index of the node that this "
"process is running. Must be a unique number in the "
"[0, `--n-workers` / `len(--devs)`) range.",
)
parser.add_argument(
"--hosts",
type=str,
help="The list of hosts to use for a multi-node run. All hosts need "
"to be reachable via SSH without a password (i.e., with a password-less "
"key). Usage example: --hosts 'dgx12,dgx12,10.10.10.10,dgx13'. In the "
"example, the benchmark is launched with server (manages workers "
"synchronization) on dgx12 (first in the list), and then three workers "
"on hosts 'dgx12', '10.10.10.10', 'dgx13'. "
"This option cannot be used with `--server`, `--server-file`, "
"`--num-workers `, or `--node-idx` which are all used for a "
"manual multi-node setup.",
)
parser.add_argument(
"--print-commands-only",
default=False,
action="store_true",
help="Print commands for each node in case you don't want to or can't "
"use SSH for launching a cluster. To be used together with `--hosts`, "
"specifying this argument will list the commands that should be "
"launched in each node. This is only a convenience function, and the "
"user can write the same command lines by just following the guidance "
"in this file's argument descriptions and existing documentation.",
)
args = parser.parse_args()
if args.hosts:
try:
import asyncssh # noqa
except ImportError:
raise RuntimeError(
"The use of `--hosts` for SSH multi-node benchmarking requires "
"`asyncssh` to be installed."
)
if any(
arg
for arg in [
args.server,
args.num_workers,
args.node_idx,
]
):
raise RuntimeError(
"A multi-node setup using `--hosts` for automatic SSH configuration "
"cannot be used together with `--server`, `--num-workers` or "
"`--node-idx`."
)
elif args.server_file and not args.print_commands_only:
raise RuntimeError(
"Specifying `--server-file` together with `--hosts` is not "
"allowed, except when used with `--print-commands-only`."
)
else:
args.devs = [int(d) for d in args.devs.split(",")]
args.num_node_workers = len(args.devs) * args.chunks_per_dev
if any([args.server, args.server_file, args.server_address]):
if args.server_address:
server_host, server_port = args.server_address.split(":")
args.server_address = {"address": server_host, "port": int(server_port)}
args.server_info = args.server_file or args.server_address
if args.num_workers is None:
raise RuntimeError(
"A multi-node setup requires specifying `--num-workers`."
)
elif args.num_workers < 2:
raise RuntimeError("A multi-node setup requires `--num-workers >= 2`.")
if not args.server and args.node_idx is None:
raise RuntimeError(
"Each worker on a multi-node is required to specify `--node-num`."
)
args.n_chunks = args.num_workers
else:
args.n_chunks = args.num_node_workers
if args.n_chunks < 2:
raise RuntimeError(
"Number of chunks must be greater than 1 (chunks-per-dev: "
f"{args.chunks_per_dev}, devs: {args.devs})"
)
return args
def main():
args = parse_args()
if not args.server and not args.hosts:
assert args.n_chunks > 1
assert args.n_chunks % 2 == 0
if args.hosts:
hosts = args.hosts.split(",")
server_host, worker_hosts = hosts[0], hosts[1:]
num_workers = (
len(args.devs.split(",")) * len(worker_hosts) * args.chunks_per_dev
)
if args.print_commands_only:
server_cmd = _get_server_command(args, num_workers)
print(f"[{server_host}] Server command line: {server_cmd}")
for node_idx, worker_host in enumerate(worker_hosts):
worker_cmd = _get_worker_command_without_address(
args, num_workers, node_idx
)
if args.server_file:
worker_cmd += f" --server-file '{args.server_file}'"
else:
worker_cmd += " --server-address 'REPLACE WITH SERVER ADDRESS'"
print(f"[{worker_host}] Worker command line: {worker_cmd}")
return
else:
return run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
_get_server_command,
_get_worker_command,
)
elif args.server:
stats = run_cluster_server(
args.server_file,
args.n_chunks,
)
elif args.server_file or args.server_address:
return run_cluster_workers(
args.server_info,
args.n_chunks,
args.num_node_workers,
args.node_idx,
worker,
worker_args=args,
ensure_cuda_device=True,
)
else:
server_file = tempfile.NamedTemporaryFile()
server_proc, server_queue = _run_cluster_server(
server_file.name,
args.n_chunks,
)
# Wait for server to become available
with open(server_file.name, "r") as f:
while len(f.read()) == 0:
pass
worker_procs = _run_cluster_workers(
server_file.name,
args.n_chunks,
args.num_node_workers,
0,
worker,
worker_args=args,
ensure_cuda_device=True,
)
stats = [server_queue.get() for i in range(args.n_chunks)]
[p.join() for p in worker_procs]
server_proc.join()
wc = stats[0]["wallclock"]
bw = hmean(np.array([s["bw"] for s in stats]))
tp = stats[0]["throughput"]
dp = sum(s["data_processed"] for s in stats)
dp_iter = sum(s["iter_results"]["data_processed"][0] for s in stats)
print("cuDF merge benchmark")
print_separator(separator="-", length=110)
print_multi(values=["Device(s)", f"{args.devs}"])
print_multi(values=["Chunks per device", f"{args.chunks_per_dev}"])
print_multi(values=["Rows per chunk", f"{args.chunk_size}"])
print_multi(values=["Total data processed", f"{format_bytes(dp)}"])
print_multi(values=["Data processed per iter", f"{format_bytes(dp_iter)}"])
print_multi(values=["Row matching fraction", f"{args.frac_match}"])
print_separator(separator="=", length=110)
print_multi(values=["Wall-clock", f"{format_time(wc)}"])
print_multi(values=["Bandwidth", f"{format_bytes(bw)}/s"])
print_multi(values=["Throughput", f"{format_bytes(tp)}/s"])
print_separator(separator="=", length=110)
print_multi(values=["Run", "Wall-clock", "Bandwidth", "Throughput"])
for i in range(args.iter):
iter_results = stats[0]["iter_results"]
iter_wc = iter_results["wallclock"][i]
iter_bw = hmean(np.array([s["iter_results"]["bw"][i] for s in stats]))
iter_tp = iter_results["throughput"][i]
print_multi(
values=[
i,
f"{format_time(iter_wc)}",
f"{format_bytes(iter_bw)}/s",
f"{format_bytes(iter_tp)}/s",
]
)
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/README.md
|
# Running Benchmarks
## cuDF Merge
The cuDF merge benchmark can be executed in 3 different ways:
1. Local-node only;
1. Multi-node with automatic process launch via SSH;
1. Multi-node with manual process launch.
In the following subsections we will exemplify how to launch the benchmark for each case. Be sure to check `python -m ucp.benchmarks.cudf_merge --help` to see a complete description of available options and their description.
### Local-node only
This is the simplest setup and will execute the benchmark only using GPUs on the host the process is being launched from. In its simple form, it can be executed as:
```bash
python -m ucp.benchmarks.cudf_merge -d 0,1 -c 1_000_000 --iter 10
```
The process above will execute the benchmark with the first 2 GPUs in the node, with 1M rows per chunk and a single chunk per node, for 10 iterations. To extend the same to 8 GPUs we could simply write:
```bash
python -m ucp.benchmarks.cudf_merge -d 0,1,2,3,4,5,6,7 -c 1_000_000 --iter 10
```
### Multi-node with automatic process launch via SSH
In this setup, the user can run the benchmark spanning multiple nodes, but a password-less SSH setup is required. That is often achieved by having a private key that doesn't require a passphrase authorized to login to all the nodes where the benchmark is expected to run. To test if this setup is already correct, it should suffice to execute `ssh NODE_HOSTNAME` or `ssh NODE_IP`, if you get a shell in the remote machine without being asked for a password, you are probably good to go. If this is not yet setup, please consult the documentation of your operating systems to set this up.
Once the SSH setup is complete, we can extend the local-node example by simply introducing a new argument `--hosts`. The `--hosts` argument takes a comma-separated list of node hostnames or IP addresses, where the first element is where the server will run, followed by any number of worker hosts. The server is required to synchronize workers and doesn't require a GPU but is allowed to share the same node as one of the worker nodes in that list. For example, if we want the server to run on a node with hostname 'dgx12', with workers on machine with hostname 'dgx12', another with IP address '10.10.10.10' and another with hostname 'dgx13', we would run the command as below:
```bash
python -m ucp.benchmarks.cudf_merge -d 0,1,2,3 -c 1_000_000 --iter 10 --hosts dgx12,dgx12,10.10.10.10,dgx15
```
Note that in the example above we repeated dgx12, the first node identifies the server, and the remaining arguments identify workers. Multi-node setups also require every node to use the same GPU indices, so if you specify `-d 0,1,2,3`, all worker nodes must have those four GPUs available.
Should anything go wrong, you may specify the `UCXPY_ASYNCSSH_LOG_LEVEL=DEBUG` environment variable. This will print additional information, including the exact command that is being executed on the server and workers, as well as the output for each process.
### Multi-node with manual process setup
This is by far the most complicated setup, and should be used only if running on a cluster where SSH isn't possible or not recommended.
Before diving in to the details of this setup, the user should know there is a convenience function to generate commands for each of the nodes. To generate that, the user may refer to the SSH section, where we pass `--hosts` to specify server and worker nodes, this time with an added `--print-commands-only` argument. The output of the command will print one line for server and each node with the exact command that should be executed on those nodes.
Now if the user would like to continue and learn the details this setup, the user is required specify the exact amount of workers that will be executed by the cluster, and the index of each node. Unlike the SSH setup, this time we may not use `--hosts` to specify where to launch.
**WARNING: All processes must specify the same values for `--devs`, `--chunks-per-dev`, `--chunk-size`, `--frac-match`, `--iter`, `--warmup-iter`, `--num-workers`.**
#### Calculating number of workers
Calculating the number of workers is straightforward but critical to be done right. It can be calculated as follows:
```python
# num_workers: number of workers, passed as `--num-workers`
# len(devs): length of the GPU list specified via `--devs`
# chunks_per_dev: number of chunks (processes) per GPU specified via `--chunks-per-dev`
# num_worker_nodes: number of nodes that will run workers
num_workers = len(devs) * chunks_per_dev * num_worker_nodes
```
In the examples that follow, we will launch 4 GPUs per node, 2 chunks per device and 2 worker nodes.
```python
# num_workers = len(devs) * chunks_per_dev * num_worker_nodes
16 = 4 * 2 * 2
```
#### Server
First, the user must launch the server, which may be in a node that doesn't contain any GPUs, or could be on one of the nodes where workers will run. To do so, two options exist:
##### Address on a file
In this setup, a file that is reachable on all nodes of the cluster must be specified, for example in a network file system.
```bash
python -m ucp.benchmarks.cudf_merge --server --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16 --server-file /path/to/network/fs/server.json
```
##### Address on stdout
```bash
python -m ucp.benchmarks.cudf_merge --server --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16
```
#### Workers
Once the server is up and running, workers must be launched on multiple nodes. Each node must execute workers with all options matching other nodes, both workers and server, with the execption of `--node-idx`. The `--node-idx` argument is used as a unique identifier to each worker, so that it may compute the rank for each GPU worker.
For the examples to follow, we will assume workers will run on two hosts: `worker-0` and `worker-1`.
##### Server address on a file
```bash
# Run on: worker-0
python -m ucp.benchmarks.cudf_merge --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16 --node-idx 0 --rmm-init-pool-size 4000000000 --server-file '/path/to/network/fs/server.json'
# Run on: worker-1
python -m ucp.benchmarks.cudf_merge --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16 --node-idx 1 --rmm-init-pool-size 4000000000 --server-file '/path/to/network/fs/server.json'
```
##### Server address on stdout
```bash
# Run on: worker-0
python -m ucp.benchmarks.cudf_merge --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16 --node-idx 0 --rmm-init-pool-size 4000000000 --server-address 'REPLACE WITH SERVER ADDRESS'
# Run on: worker-1
python -m ucp.benchmarks.cudf_merge --devs 0,1,2,3 --chunks-per-dev 2 --chunk-size 1000000 --frac-match 0.5 --iter 10 --warmup-iter 5 --num-workers 16 --node-idx 1 --rmm-init-pool-size 4000000000 --server-address 'REPLACE WITH SERVER ADDRESS'
```
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/asyncssh.py
|
import asyncio
import json
import logging
import os
import queue
import sys
from functools import partial
logger = logging.getLogger("ucx.asyncssh")
logger.setLevel(logging.getLevelName(os.getenv("UCXPY_ASYNCSSH_LOG_LEVEL", "WARNING")))
try:
import asyncssh
class SSHProc(asyncssh.SSHClientSession):
def __init__(self, out_queue):
assert isinstance(out_queue, queue.Queue)
self.out_queue = out_queue
def data_received(self, data, datatype):
logger.debug(f"SSHProc.data_received(): {data=}")
self.out_queue.put(data)
def connection_lost(self, exc):
if exc:
logger.error(f"SSH session error: {exc}", file=sys.stderr)
else:
logger.debug(
f"SSH connection terminated succesfully {self.out_queue.empty()=}"
)
class SSHServerProc(SSHProc):
address = None
port = None
def data_received(self, data, datatype):
if self.address is None and self.port is None:
logger.debug(f"SSHServerProc.data_received() address: {data=}")
server_info = json.loads(data)
self.address = server_info["address"]
self.port = server_info["port"]
self.out_queue.put(server_info)
else:
super().data_received(data, datatype)
async def _run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
):
"""
Run benchmarks in an SSH cluster.
The results are printed to stdout.
At the moment, only `ucp.benchmarks.cudf_merge` is supported.
Parameters
----------
args: Namespace
The arguments that were passed to `ucp.benchmarks.cudf_merge`.
server_host: str
String containing hostname or IP address of node where the server
will run.
worker_hosts: list
List of strings containing hostnames or IP addresses of nodes where
workers will run.
num_workers: int
get_server_command: callable
Function returning the full command that the server node will run.
Must have signature `get_server_command(args, num_workers)`,
where:
- `args` is the parsed `argparse.Namespace` object as parsed by
the caller application;
- `num_workers` number of workers in the entire cluster.
get_worker_command: callable
Function returning the full command that each worker node will run.
Must have signature `get_worker_command(args, num_workers, node_idx)`,
where:
- `args` is the parsed `argparse.Namespace` object as parsed by
the caller application;
- `num_workers` number of workers in the entire cluster;
- `node_idx` index of the node that the process will launch.
"""
logger.debug(f"{server_host=}, {worker_hosts=}")
async with asyncssh.connect(server_host, known_hosts=None) as conn:
server_queue = queue.Queue()
server_cmd = (get_server_command(args, num_workers, logger=logger),)
logger.debug(f"[{server_host}] {server_cmd=}")
server_chan, _ = await conn.create_session(
partial(SSHServerProc, server_queue),
server_cmd,
)
while True:
try:
server_info = server_queue.get_nowait()
except queue.Empty:
await asyncio.sleep(0.01)
else:
break
logger.info(f"Server session created {server_info=}")
workers_conn = await asyncio.gather(
*[asyncssh.connect(host, known_hosts=None) for host in worker_hosts]
)
workers_chan, workers_queue = [], []
for node_idx, worker_conn in enumerate(workers_conn):
worker_queue = queue.Queue()
worker_cmd = get_worker_command(
server_info,
args,
num_workers,
node_idx,
logger=logger,
)
logger.debug(f"[{worker_hosts[node_idx]}] {worker_cmd=}")
worker_chan, _ = await worker_conn.create_session(
partial(SSHProc, worker_queue),
worker_cmd,
)
workers_chan.append(worker_chan)
workers_queue.append(worker_queue)
await asyncio.gather(*[chan.wait_closed() for chan in workers_chan])
await server_chan.wait_closed()
while not server_queue.empty():
print(server_queue.get())
for i, worker_queue in enumerate(workers_queue):
if not worker_queue.empty():
logger.warning(
f"Worker {worker_hosts[i]} stdout wasn't empty. This "
"likely indicates errors may have occurred. You may "
"run with `UCXPY_ASYNCSSH_LOG_LEVEL=DEBUG` to see the "
"full output."
)
while not worker_queue.empty():
logger.debug(worker_queue.get())
def run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
):
"""
Same as `_run_ssh_cluster()` but running on event loop until completed.
"""
try:
asyncio.get_event_loop().run_until_complete(
_run_ssh_cluster(
args,
server_host,
worker_hosts,
num_workers,
get_server_command,
get_worker_command,
)
)
except (OSError, asyncssh.Error) as exc:
sys.exit(f"SSH connection failed: {exc}")
except ImportError:
SSHProc = None
SSHServerProce = None
run_ssh_cluster = None
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/utils.py
|
import asyncio
import json
import logging
import multiprocessing as mp
import os
import pickle
import threading
from types import ModuleType
import numpy as np
from ucp._libs.utils import get_address
logger = logging.getLogger("ucx")
def _ensure_cuda_device(devs, rank):
import numba.cuda
dev_id = devs[rank % len(devs)]
os.environ["CUDA_VISIBLE_DEVICES"] = str(dev_id)
logger.debug(f"{dev_id=}, {rank=}")
numba.cuda.current_context()
def get_allocator(
object_type: str, rmm_init_pool_size: int, rmm_managed_memory: bool
) -> ModuleType:
"""
Initialize and return array-allocator based on arguments passed.
Parameters
----------
object_type: str
The type of object the allocator should return. Options are: "numpy", "cupy"
or "rmm".
rmm_init_pool_size: int
If the object type is "rmm" (implies usage of RMM pool), define the initial
pool size.
rmm_managed_memory: bool
If the object type is "rmm", use managed memory if `True`, or default memory
otherwise.
Returns
-------
A handle to a module, one of ``numpy`` or ``cupy`` (if device memory is requested).
If the object type is ``rmm``, then ``cupy`` is configured to use RMM as an
allocator.
"""
if object_type == "numpy":
import numpy as xp
elif object_type == "cupy":
import cupy as xp
else:
import cupy as xp
import rmm
from rmm.allocators.cupy import rmm_cupy_allocator
rmm.reinitialize(
pool_allocator=True,
managed_memory=rmm_managed_memory,
initial_pool_size=rmm_init_pool_size,
)
xp.cuda.set_allocator(rmm_cupy_allocator)
return xp
async def send_pickled_msg(ep, obj):
msg = pickle.dumps(obj)
await ep.send_obj(msg)
async def recv_pickled_msg(ep):
msg = await ep.recv_obj()
return pickle.loads(msg)
def _server_process(
q,
server_file,
n_workers,
ucx_options_list,
):
import ucp
if ucx_options_list is not None:
ucp.init(ucx_options_list)
import sys
async def run():
lock = threading.Lock()
eps = {}
results = {}
async def server_handler(ep):
worker_rank, worker_ip, worker_port = await recv_pickled_msg(ep)
with lock:
eps[worker_rank] = (worker_ip, worker_port)
while len(eps) != n_workers:
await asyncio.sleep(0.1)
await send_pickled_msg(ep, eps)
worker_results = await recv_pickled_msg(ep)
with lock:
results[worker_rank] = worker_results
lf = ucp.create_listener(server_handler)
if server_file is None:
fp = open(sys.stdout.fileno(), mode="w", closefd=False)
else:
fp = open(server_file, mode="w")
with fp:
json.dump({"address": get_address(), "port": lf.port}, fp)
while len(results) != n_workers:
await asyncio.sleep(0.1)
return results
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
for rank in range(n_workers):
q.put(ret[rank])
def _run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Create a server that synchronizes workers.
The server will wait for all `n_workers` to connect and communicate their
endpoint information, then send the aggregate information to all workers
so that they will create endpoints to each other, in a fully-connected
network. Each worker will then communicate its result back to the scheduler
which will return that result back to the caller.
Parameters
----------
server_file: str or None
A string containing the path to a file that will be populated to contain
the address and port of the server, or `None` to print that information
to stdout.
num_workers : int
Number of workers in the entire network, required to infer when all
workers have connected and completed.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
Returns
-------
return : tuple
A tuple with two elements: the process spawned and a queue where results
will eventually be stored.
"""
q = mp.Queue()
p = mp.Process(
target=_server_process,
args=(
q,
server_file,
n_workers,
ucx_options_list,
),
)
p.start()
return p, q
def run_cluster_server(
server_file,
n_workers,
ucx_options_list=None,
):
"""
Blocking version of `_run_cluster_server()`.
Provides same behavior as `_run_cluster_server()`, except that it will join
processes and thus cause the function to be blocking. It will also combine
the queue as a list with results for each worker in the `[0..n_workers)` range.
"""
p, q = _run_cluster_server(
server_file=server_file,
n_workers=n_workers,
ucx_options_list=ucx_options_list,
)
# Joining the process if the queue is too large (reproducible for more than
# 32 workers) causes the process to hang. We join the queue results in a
# list and return the list instead.
ret = [q.get() for i in range(n_workers)]
p.join()
assert not p.exitcode
return ret
def _worker_process(
queue,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
func,
args,
):
if ensure_cuda_device is True:
_ensure_cuda_device(args.devs, rank % num_node_workers)
import ucp
if ucx_options_list is not None:
ucp.init(ucx_options_list[rank])
async def run():
eps = {}
async def server_handler(ep):
peer_rank = np.empty((1,), dtype=np.uint64)
await ep.recv(peer_rank)
assert peer_rank[0] not in eps
eps[peer_rank[0]] = ep
lf = ucp.create_listener(server_handler)
logger.debug(f"Sending message info to {server_info=}, {rank=}")
server_ep = await ucp.create_endpoint(
server_info["address"], server_info["port"]
)
await send_pickled_msg(server_ep, (rank, get_address(), lf.port))
logger.debug(f"Receiving network info from server {rank=}")
workers_info = await recv_pickled_msg(server_ep)
n_workers = len(workers_info)
logger.debug(f"Creating endpoints to network {rank=}")
for i in range(rank + 1, n_workers):
remote_worker_ip, remote_worker_port = workers_info[i]
eps[i] = await ucp.create_endpoint(remote_worker_ip, remote_worker_port)
await eps[i].send(np.array([rank], dtype=np.uint64))
while len(eps) != n_workers - 1:
await asyncio.sleep(0.1)
logger.debug(f"Running worker {rank=}")
if asyncio.iscoroutinefunction(func):
results = await func(rank, eps, args)
else:
results = func(rank, eps, args)
await send_pickled_msg(server_ep, results)
loop = asyncio.new_event_loop()
ret = loop.run_until_complete(run())
queue.put(ret)
def _run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Create `n_workers` UCX processes that each run `worker_func`.
Each process will first connect to a server spawned with
`run_cluster_server()` which will synchronize workers across the nodes.
This function is non-blocking and the processes created by this function
call are started but not joined, making this function non-blocking. It's the
user's responsibility to join all processes in the returned list to ensure
their completion.
Parameters
----------
server_info: str or dict
A string containing the path to a file created by `run_cluster_server()`
containing the address and port of the server. Alternatively, a
dictionary containing keys `"address"` and `"port"` may be used the same
way.
num_workers : int
Number of workers in the entire network. Every node must run the same
number of workers, and thus this value should be equal to
`node_num_workers * num_cluster_nodes`.
num_node_workers: int
Number of workers that this node will run.
node_idx: int
Index of the node in the entire cluster, within the range
`[0..num_cluster_nodes)`. This value is used to calculate the rank
of each worker. Each node must have a unique index.
worker_func: callable (can be a coroutine)
Function that each worker executes.
Must have signature: `worker(rank, eps, args)` where
- rank is the worker id
- eps is a dict of ranks to ucx endpoints
- args given here as `worker_args`
worker_args: object
The argument to pass to `worker_func`.
ucx_options_list: list of dict
Options to pass to UCX when initializing workers, one for each worker.
ensure_cuda_device: bool
If `True`, sets the `CUDA_VISIBLE_DEVICES` environment variable to match
the proper CUDA device based on the worker's rank and create the CUDA
context on the corresponding device before calling `import ucp` for the
first time on the newly-spawned worker process, otherwise continues
without modifying `CUDA_VISIBLE_DEVICES` and creating a CUDA context.
Please note that having this set to `False` may cause all workers to use
device 0 and will not ensure proper InfiniBand<->GPU mapping on UCX,
potentially leading to low performance as GPUDirectRDMA will not be
active.
Returns
-------
processes : list
The list of processes spawned (one for each worker).
"""
if isinstance(server_info, str):
with open(server_info, mode="r") as fp:
server_info = json.load(fp)
elif not isinstance(server_info, dict):
raise ValueError(
"server_info must be the path to a server file, or a dictionary "
"with the unpacked values."
)
processes = []
for worker_num in range(num_node_workers):
rank = node_idx * num_node_workers + worker_num
q = mp.Queue()
p = mp.Process(
target=_worker_process,
args=(
q,
server_info,
num_node_workers,
rank,
ucx_options_list,
ensure_cuda_device,
worker_func,
worker_args,
),
)
p.start()
processes.append(p)
return processes
def run_cluster_workers(
server_info,
num_workers,
num_node_workers,
node_idx,
worker_func,
worker_args=None,
ucx_options_list=None,
ensure_cuda_device=False,
):
"""
Blocking version of `_run_cluster_workers()`.
Provides same behavior as `_run_cluster_workers()`, except that it will join
processes and thus cause the function to be blocking.
"""
processes = _run_cluster_workers(
server_info=server_info,
num_workers=num_workers,
num_node_workers=num_node_workers,
node_idx=node_idx,
worker_func=worker_func,
worker_args=worker_args,
ucx_options_list=ucx_options_list,
ensure_cuda_device=ensure_cuda_device,
)
for proc in processes:
proc.join()
assert not proc.exitcode
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/send_recv.py
|
"""
Benchmark send receive on one machine:
UCX_TLS=tcp,cuda_copy,cuda_ipc python send-recv.py \
--server-dev 2 --client-dev 1 --object_type rmm \
--reuse-alloc --n-bytes 1GB
Benchmark send receive on two machines (IB testing):
# server process
UCX_MAX_RNDV_RAILS=1 UCX_TLS=tcp,cuda_copy,rc python send-recv.py \
--server-dev 0 --client-dev 5 --object_type rmm --reuse-alloc \
--n-bytes 1GB --server-only --port 13337 --n-iter 100
# client process
UCX_MAX_RNDV_RAILS=1 UCX_TLS=tcp,cuda_copy,rc python send-recv.py \
--server-dev 0 --client-dev 5 --object_type rmm --reuse-alloc \
--n-bytes 1GB --client-only --server-address SERVER_IP --port 13337 \
--n-iter 100
"""
import argparse
import asyncio
import multiprocessing as mp
import os
import numpy as np
import ucp
from ucp._libs.utils import (
format_bytes,
parse_bytes,
print_key_value,
print_separator,
)
from ucp.benchmarks.backends.ucp_async import (
UCXPyAsyncClient,
UCXPyAsyncServer,
)
from ucp.benchmarks.backends.ucp_core import UCXPyCoreClient, UCXPyCoreServer
from ucp.utils import get_event_loop
mp = mp.get_context("spawn")
def _get_backend_implementation(backend):
if backend == "ucp-async":
return {"client": UCXPyAsyncClient, "server": UCXPyAsyncServer}
elif backend == "ucp-core":
return {"client": UCXPyCoreClient, "server": UCXPyCoreServer}
elif backend == "tornado":
from ucp.benchmarks.backends.tornado import (
TornadoClient,
TornadoServer,
)
return {"client": TornadoClient, "server": TornadoServer}
raise ValueError(f"Unknown backend {backend}")
def _set_cuda_device(object_type, device):
if object_type in ["cupy", "rmm"]:
import numba.cuda
os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
numba.cuda.current_context()
def server(queue, args):
if args.server_cpu_affinity >= 0:
os.sched_setaffinity(0, [args.server_cpu_affinity])
_set_cuda_device(args.object_type, args.server_dev)
server = _get_backend_implementation(args.backend)["server"](args, queue)
if asyncio.iscoroutinefunction(server.run):
loop = get_event_loop()
loop.run_until_complete(server.run())
else:
server.run()
def client(queue, port, server_address, args):
if args.client_cpu_affinity >= 0:
os.sched_setaffinity(0, [args.client_cpu_affinity])
_set_cuda_device(args.object_type, args.client_dev)
client = _get_backend_implementation(args.backend)["client"](
args, queue, server_address, port
)
if asyncio.iscoroutinefunction(client.run):
loop = get_event_loop()
loop.run_until_complete(client.run())
else:
client.run()
times = queue.get()
if args.report_gil_contention:
contention_metric = queue.get()
assert len(times) == args.n_iter
bw_avg = format_bytes(2 * args.n_iter * args.n_bytes / sum(times))
bw_med = format_bytes(2 * args.n_bytes / np.median(times))
lat_avg = int(sum(times) * 1e9 / (2 * args.n_iter))
lat_med = int(np.median(times) * 1e9 / 2)
print("Roundtrip benchmark")
print_separator(separator="=")
print_key_value(key="Iterations", value=f"{args.n_iter}")
print_key_value(key="Bytes", value=f"{format_bytes(args.n_bytes)}")
print_key_value(key="Object type", value=f"{args.object_type}")
print_key_value(key="Reuse allocation", value=f"{args.reuse_alloc}")
client.print_backend_specific_config()
print_separator(separator="=")
if args.object_type == "numpy":
print_key_value(key="Device(s)", value="CPU-only")
s_aff = (
args.server_cpu_affinity
if args.server_cpu_affinity >= 0
else "affinity not set"
)
c_aff = (
args.client_cpu_affinity
if args.client_cpu_affinity >= 0
else "affinity not set"
)
print_key_value(key="Server CPU", value=f"{s_aff}")
print_key_value(key="Client CPU", value=f"{c_aff}")
else:
print_key_value(key="Device(s)", value=f"{args.server_dev}, {args.client_dev}")
print_separator(separator="=")
print_key_value("Bandwidth (average)", value=f"{bw_avg}/s")
print_key_value("Bandwidth (median)", value=f"{bw_med}/s")
print_key_value("Latency (average)", value=f"{lat_avg} ns")
print_key_value("Latency (median)", value=f"{lat_med} ns")
if args.report_gil_contention:
print_key_value("GIL contention", value=f"{contention_metric}")
if not args.no_detailed_report:
print_separator(separator="=")
print_key_value(key="Iterations", value="Bandwidth, Latency")
print_separator(separator="-")
for i, t in enumerate(times):
ts = format_bytes(2 * args.n_bytes / t)
lat = int(t * 1e9 / 2)
print_key_value(key=i, value=f"{ts}/s, {lat}ns")
def parse_args():
parser = argparse.ArgumentParser(description="Roundtrip benchmark")
if callable(parse_bytes):
parser.add_argument(
"-n",
"--n-bytes",
metavar="BYTES",
default="10 Mb",
type=parse_bytes,
help="Message size. Default '10 Mb'.",
)
else:
parser.add_argument(
"-n",
"--n-bytes",
metavar="BYTES",
default=10_000_000,
type=int,
help="Message size in bytes. Default '10_000_000'.",
)
parser.add_argument(
"--n-iter",
metavar="N",
default=10,
type=int,
help="Number of send / recv iterations (default 10).",
)
parser.add_argument(
"--n-warmup-iter",
default=10,
type=int,
help="Number of send / recv warmup iterations (default 10).",
)
parser.add_argument(
"-b",
"--server-cpu-affinity",
metavar="N",
default=-1,
type=int,
help="CPU affinity for server process (default -1: not set).",
)
parser.add_argument(
"-c",
"--client-cpu-affinity",
metavar="N",
default=-1,
type=int,
help="CPU affinity for client process (default -1: not set).",
)
parser.add_argument(
"-o",
"--object_type",
default="numpy",
choices=["numpy", "cupy", "rmm"],
help="In-memory array type.",
)
parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="Whether to print timings per iteration.",
)
parser.add_argument(
"-s",
"--server-address",
metavar="ip",
default=ucp.get_address(),
type=str,
help="Server address (default `ucp.get_address()`).",
)
parser.add_argument(
"-d",
"--server-dev",
metavar="N",
default=0,
type=int,
help="GPU device on server (default 0).",
)
parser.add_argument(
"-e",
"--client-dev",
metavar="N",
default=0,
type=int,
help="GPU device on client (default 0).",
)
parser.add_argument(
"--reuse-alloc",
default=False,
action="store_true",
help="Reuse memory allocations between communication.",
)
parser.add_argument(
"--cuda-profile",
default=False,
action="store_true",
help="Setting CUDA profiler.start()/stop() around send/recv "
"typically used with `nvprof --profile-from-start off "
"--profile-child-processes`",
)
parser.add_argument(
"--rmm-init-pool-size",
metavar="BYTES",
default=None,
type=int,
help="Initial RMM pool size (default 1/2 total GPU memory)",
)
parser.add_argument(
"--server-only",
default=False,
action="store_true",
help="Start up only a server process (to be used with --client).",
)
parser.add_argument(
"--client-only",
default=False,
action="store_true",
help="Connect to solitary server process (to be user with --server-only)",
)
parser.add_argument(
"-p",
"--port",
default=None,
help="The port the server will bind to, if not specified, UCX will bind "
"to a random port. Must be specified when --client-only is used.",
type=int,
)
parser.add_argument(
"--enable-am",
default=False,
action="store_true",
help="Use Active Message API instead of TAG for transfers",
)
parser.add_argument(
"--rmm-managed-memory",
default=False,
action="store_true",
help="Use RMM managed memory (requires `--object-type rmm`)",
)
parser.add_argument(
"--no-detailed-report",
default=False,
action="store_true",
help="Disable detailed report per iteration.",
)
parser.add_argument(
"-l",
"--backend",
default="ucp-async",
type=str,
help="Backend Library (-l) to use, options are: 'ucp-async' (default), "
"'ucp-core' and 'tornado'.",
)
parser.add_argument(
"--report-gil-contention",
default=False,
action="store_true",
help="Report GIL contention (requires the `gilknocker` package).",
)
parser.add_argument(
"--delay-progress",
default=False,
action="store_true",
help="Only applies to 'ucp-core' backend: delay ucp_worker_progress calls "
"until a minimum number of outstanding operations is reached, implies "
"non-blocking send/recv. The --max-outstanding argument may be used to "
"control number of maximum outstanding operations. (Default: disabled)",
)
parser.add_argument(
"--max-outstanding",
metavar="N",
default=32,
type=int,
help="Only applies to 'ucp-core' backend: number of maximum outstanding "
"operations, see --delay-progress. (Default: 32)",
)
parser.add_argument(
"--error-handling",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable endpoint error handling.",
)
args = parser.parse_args()
if args.cuda_profile and args.object_type == "numpy":
raise RuntimeError(
"`--cuda-profile` requires `--object_type=cupy` or `--object_type=rmm`"
)
if args.rmm_managed_memory and args.object_type != "rmm":
raise RuntimeError("`--rmm-managed-memory` requires `--object_type=rmm`")
backend_impl = _get_backend_implementation(args.backend)
if not (
backend_impl["client"].has_cuda_support
and backend_impl["server"].has_cuda_support
):
if args.object_type in {"cupy", "rmm"}:
raise RuntimeError(
f"Backend '{args.backend}' does not support CUDA transfers"
)
if args.backend != "ucp-core" and args.delay_progress:
raise RuntimeError("`--delay-progress` requires `--backend=ucp-core`")
if args.report_gil_contention:
try:
import gilknocker # noqa: F401
except ImportError:
raise RuntimeError(
"Could not import `gilknocker`. Make sure it is installed or "
"remove the `--report-gil-contention` argument."
)
return args
def main():
args = parse_args()
server_address = args.server_address
# if you are the server, only start the `server process`
# if you are the client, only start the `client process`
# otherwise, start everything
if not args.client_only:
# server process
q1 = mp.Queue()
p1 = mp.Process(target=server, args=(q1, args))
p1.start()
port = q1.get()
print(f"Server Running at {server_address}:{port}")
else:
port = args.port
if not args.server_only or args.client_only:
# client process
print(f"Client connecting to server at {server_address}:{port}")
q2 = mp.Queue()
p2 = mp.Process(target=client, args=(q2, port, server_address, args))
p2.start()
p2.join()
assert not p2.exitcode
else:
p1.join()
assert not p1.exitcode
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/ucx-py/ucp/benchmarks
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/backends/ucp_async.py
|
import asyncio
from argparse import Namespace
from queue import Queue
from time import monotonic
import ucp
from ucp._libs.arr import Array
from ucp._libs.utils import print_key_value
from ucp.benchmarks.backends.base import BaseClient, BaseServer
from ucp.benchmarks.utils import get_allocator
def register_am_allocators(args: Namespace):
"""
Register Active Message allocator in worker to correct memory type if the
benchmark is set to use the Active Message API.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during to
determine whether the caller is using the Active Message API and what
memory type.
"""
if not args.enable_am:
return
import numpy as np
ucp.register_am_allocator(lambda n: np.empty(n, dtype=np.uint8), "host")
if args.object_type == "cupy":
import cupy as cp
ucp.register_am_allocator(lambda n: cp.empty(n, dtype=cp.uint8), "cuda")
elif args.object_type == "rmm":
import rmm
ucp.register_am_allocator(lambda n: rmm.DeviceBuffer(size=n), "cuda")
class UCXPyAsyncServer(BaseServer):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
):
self.args = args
self.queue = queue
async def run(self):
ucp.init()
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args)
async def server_handler(ep):
if not self.args.enable_am:
if self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
assert recv_msg.nbytes == self.args.n_bytes
for i in range(self.args.n_iter + self.args.n_warmup_iter):
if self.args.enable_am:
recv = await ep.am_recv()
await ep.am_send(recv)
else:
if not self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
await ep.recv(recv_msg)
await ep.send(recv_msg)
await ep.close()
lf.close()
lf = ucp.create_listener(
server_handler,
port=self.args.port,
endpoint_error_handling=self.args.error_handling,
)
self.queue.put(lf.port)
while not lf.closed():
await asyncio.sleep(0.5)
class UCXPyAsyncClient(BaseClient):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
server_address: str,
port: int,
):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
async def run(self):
ucp.init()
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args)
ep = await ucp.create_endpoint(
self.server_address,
self.port,
endpoint_error_handling=self.args.error_handling,
)
if self.args.enable_am:
msg = xp.arange(self.args.n_bytes, dtype="u1")
else:
send_msg = Array(xp.arange(self.args.n_bytes, dtype="u1"))
if self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
assert send_msg.nbytes == self.args.n_bytes
assert recv_msg.nbytes == self.args.n_bytes
if self.args.cuda_profile:
xp.cuda.profiler.start()
if self.args.report_gil_contention:
from gilknocker import KnockKnock
# Use smallest polling interval possible to ensure, contention will always
# be zero for small messages otherwise and inconsistent for large messages.
knocker = KnockKnock(polling_interval_micros=1)
knocker.start()
times = []
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if self.args.enable_am:
await ep.am_send(msg)
await ep.am_recv()
else:
if not self.args.reuse_alloc:
recv_msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
await ep.send(send_msg)
await ep.recv(recv_msg)
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
if self.args.report_gil_contention:
knocker.stop()
if self.args.cuda_profile:
xp.cuda.profiler.stop()
self.queue.put(times)
if self.args.report_gil_contention:
self.queue.put(knocker.contention_metric)
def print_backend_specific_config(self):
print_key_value(
key="Transfer API", value=f"{'AM' if self.args.enable_am else 'TAG'}"
)
print_key_value(key="UCX_TLS", value=f"{ucp.get_config()['TLS']}")
print_key_value(
key="UCX_NET_DEVICES", value=f"{ucp.get_config()['NET_DEVICES']}"
)
| 0 |
rapidsai_public_repos/ucx-py/ucp/benchmarks
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/backends/tornado.py
|
import asyncio
from time import monotonic
import numpy as np
from tornado.iostream import StreamClosedError
from tornado.tcpclient import TCPClient
from tornado.tcpserver import TCPServer
from ucp.benchmarks.backends.base import BaseClient, BaseServer
class TornadoServer(BaseServer):
has_cuda_support = False
def __init__(self, args, queue):
self.args = args
self.queue = queue
def _start_listener(self, server, port):
if port is not None:
server.listen(port)
else:
for i in range(10000, 60000):
try:
server.listen(i)
except OSError:
continue
else:
port = i
break
return port
async def run(self):
args = self.args
event = asyncio.Event()
class TransferServer(TCPServer):
async def handle_stream(self, stream, address):
if args.reuse_alloc:
recv_msg = np.zeros(args.n_bytes, dtype="u1")
assert recv_msg.nbytes == args.n_bytes
for i in range(args.n_iter + args.n_warmup_iter):
if not args.reuse_alloc:
recv_msg = np.zeros(args.n_bytes, dtype="u1")
try:
await stream.read_into(recv_msg.data)
await stream.write(recv_msg.data)
except StreamClosedError as e:
print(e)
break
event.set()
# Set max_buffer_size to 1 GiB for now
server = TransferServer(max_buffer_size=1024**3)
port = self._start_listener(server, args.port)
self.queue.put(port)
await event.wait()
class TornadoClient(BaseClient):
has_cuda_support = False
def __init__(self, args, queue, server_address, port):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
async def run(self) -> bool:
client = TCPClient()
# Set max_buffer_size to 1 GiB for now
stream = await client.connect(
self.server_address, self.port, max_buffer_size=1024**3
)
send_msg = np.arange(self.args.n_bytes, dtype="u1")
assert send_msg.nbytes == self.args.n_bytes
if self.args.reuse_alloc:
recv_msg = np.zeros(self.args.n_bytes, dtype="u1")
assert recv_msg.nbytes == self.args.n_bytes
if self.args.report_gil_contention:
from gilknocker import KnockKnock
# Use smallest polling interval possible to ensure, contention will always
# be zero for small messages otherwise and inconsistent for large messages.
knocker = KnockKnock(polling_interval_micros=1)
knocker.start()
times = []
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if not self.args.reuse_alloc:
recv_msg = np.zeros(self.args.n_bytes, dtype="u1")
await stream.write(send_msg.data)
await stream.read_into(recv_msg.data)
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
if self.args.report_gil_contention:
knocker.stop()
self.queue.put(times)
if self.args.report_gil_contention:
self.queue.put(knocker.contention_metric)
| 0 |
rapidsai_public_repos/ucx-py/ucp/benchmarks
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/backends/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
| 0 |
rapidsai_public_repos/ucx-py/ucp/benchmarks
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/backends/base.py
|
from abc import ABC, abstractmethod
from argparse import Namespace
from queue import Queue
class BaseServer(ABC):
@abstractmethod
def __init__(self, args: Namespace, queue: Queue):
"""
Benchmark server.
Parameters
----------
args: argparse.Namespace
Parsed command-line arguments that will be used as parameters during
the `run` method.
queue: Queue
Queue object where server will put the port it is listening at.
"""
pass
@property
@abstractmethod
def has_cuda_support() -> bool:
"""
Check whether server implementation supports CUDA memory transfers.
Returns
-------
ret: bool
`True` if CUDA is supported, `False` otherwise.
"""
return False
@abstractmethod
def run(self):
"""
Run the benchmark server.
The server is executed as follows:
1. Start the listener and put port where it is listening into the queue
registered in constructor;
2. Setup any additional context (Active Message registration, memory buffers
to reuse, etc.);
3. Transfer data back-and-forth with client;
4. Shutdown server.
"""
pass
class BaseClient(ABC):
@abstractmethod
def __init__(self, args: Namespace, queue: Queue, server_address: str, port: int):
"""
Benchmark client.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during
the `run` method.
queue
Queue object where to put timing results.
server_address
Hostname or IP address where server is listening at.
port
Port where server is listening at.
"""
pass
@property
@abstractmethod
def has_cuda_support() -> bool:
"""
Check whether client implementation supports CUDA memory transfers.
Returns
-------
ret: bool
`True` if CUDA is supported, `False` otherwise.
"""
return False
@abstractmethod
def run(self):
"""
Run the benchmark client.
The client is executed as follows:
1. Connects to listener;
2. Setup any additional context (Active Message registration, memory buffers
to reuse, etc.);
3. Transfer data back-and-forth with server;
4. Shutdown client;
5. Put timing results into the queue registered in constructor.
"""
pass
def print_backend_specific_config(self):
"""
Pretty print configuration specific to backend implementation.
"""
pass
| 0 |
rapidsai_public_repos/ucx-py/ucp/benchmarks
|
rapidsai_public_repos/ucx-py/ucp/benchmarks/backends/ucp_core.py
|
from argparse import Namespace
from queue import Queue
from threading import Lock
from time import monotonic
import ucp
from ucp._libs import ucx_api
from ucp._libs.arr import Array
from ucp._libs.utils import print_key_value
from ucp._libs.utils_test import (
blocking_am_recv,
blocking_am_send,
blocking_recv,
blocking_send,
non_blocking_recv,
non_blocking_send,
)
from ucp.benchmarks.backends.base import BaseClient, BaseServer
from ucp.benchmarks.utils import get_allocator
WireupMessage = bytearray(b"wireup")
def register_am_allocators(args: Namespace, worker: ucx_api.UCXWorker):
"""
Register Active Message allocator in worker to correct memory type if the
benchmark is set to use the Active Message API.
Parameters
----------
args
Parsed command-line arguments that will be used as parameters during to
determine whether the caller is using the Active Message API and what
memory type.
worker
UCX-Py core Worker object where to register the allocator.
"""
if not args.enable_am:
return
import numpy as np
worker.register_am_allocator(
lambda n: np.empty(n, dtype=np.uint8), ucx_api.AllocatorType.HOST
)
if args.object_type == "cupy":
import cupy as cp
worker.register_am_allocator(
lambda n: cp.empty(n, dtype=cp.uint8), ucx_api.AllocatorType.CUDA
)
elif args.object_type == "rmm":
import rmm
worker.register_am_allocator(
lambda n: rmm.DeviceBuffer(size=n), ucx_api.AllocatorType.CUDA
)
class UCXPyCoreServer(BaseServer):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
):
self.args = args
self.queue = queue
def run(self):
self.ep = None
ctx = ucx_api.UCXContext(
feature_flags=(
ucx_api.Feature.AM if self.args.enable_am else ucx_api.Feature.TAG,
)
)
worker = ucx_api.UCXWorker(ctx)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args, worker)
op_lock = Lock()
finished = [0]
outstanding = [0]
def op_started():
with op_lock:
outstanding[0] += 1
def op_completed():
with op_lock:
outstanding[0] -= 1
finished[0] += 1
def _send_handle(request, exception, msg):
# Notice, we pass `msg` to the handler in order to make sure
# it doesn't go out of scope prematurely.
assert exception is None
op_completed()
def _tag_recv_handle(request, exception, ep, msg):
assert exception is None
req = ucx_api.tag_send_nb(
ep, msg, msg.nbytes, tag=0, cb_func=_send_handle, cb_args=(msg,)
)
if req is None:
op_completed()
def _am_recv_handle(recv_obj, exception, ep):
assert exception is None
msg = Array(recv_obj)
ucx_api.am_send_nbx(
ep, msg, msg.nbytes, cb_func=_send_handle, cb_args=(msg,)
)
def _listener_handler(conn_request, msg):
self.ep = ucx_api.UCXEndpoint.create_from_conn_request(
worker,
conn_request,
endpoint_error_handling=self.args.error_handling,
)
# Wireup before starting to transfer data
if self.args.enable_am is True:
ucx_api.am_recv_nb(self.ep, cb_func=_am_recv_handle, cb_args=(self.ep,))
else:
wireup = Array(bytearray(len(WireupMessage)))
op_started()
ucx_api.tag_recv_nb(
worker,
wireup,
wireup.nbytes,
tag=0,
cb_func=_tag_recv_handle,
cb_args=(self.ep, wireup),
)
for i in range(self.args.n_iter + self.args.n_warmup_iter):
if self.args.enable_am is True:
ucx_api.am_recv_nb(
self.ep, cb_func=_am_recv_handle, cb_args=(self.ep,)
)
else:
if not self.args.reuse_alloc:
msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
op_started()
ucx_api.tag_recv_nb(
worker,
msg,
msg.nbytes,
tag=0,
cb_func=_tag_recv_handle,
cb_args=(self.ep, msg),
)
if not self.args.enable_am and self.args.reuse_alloc:
msg = Array(xp.zeros(self.args.n_bytes, dtype="u1"))
else:
msg = None
listener = ucx_api.UCXListener(
worker=worker,
port=self.args.port or 0,
cb_func=_listener_handler,
cb_args=(msg,),
)
self.queue.put(listener.port)
while outstanding[0] == 0:
worker.progress()
# +1 to account for wireup message
if self.args.delay_progress:
while finished[0] < self.args.n_iter + self.args.n_warmup_iter + 1 and (
outstanding[0] >= self.args.max_outstanding
or finished[0] + self.args.max_outstanding
>= self.args.n_iter + self.args.n_warmup_iter + 1
):
worker.progress()
else:
while finished[0] != self.args.n_iter + self.args.n_warmup_iter + 1:
worker.progress()
del self.ep
class UCXPyCoreClient(BaseClient):
has_cuda_support = True
def __init__(
self,
args: Namespace,
queue: Queue,
server_address: str,
port: int,
):
self.args = args
self.queue = queue
self.server_address = server_address
self.port = port
def run(self):
ctx = ucx_api.UCXContext(
feature_flags=(
ucx_api.Feature.AM
if self.args.enable_am is True
else ucx_api.Feature.TAG,
)
)
worker = ucx_api.UCXWorker(ctx)
xp = get_allocator(
self.args.object_type,
self.args.rmm_init_pool_size,
self.args.rmm_managed_memory,
)
register_am_allocators(self.args, worker)
ep = ucx_api.UCXEndpoint.create(
worker,
self.server_address,
self.port,
endpoint_error_handling=self.args.error_handling,
)
send_msg = xp.arange(self.args.n_bytes, dtype="u1")
if self.args.reuse_alloc:
recv_msg = xp.zeros(self.args.n_bytes, dtype="u1")
if self.args.enable_am:
blocking_am_send(worker, ep, send_msg)
blocking_am_recv(worker, ep)
else:
wireup_recv = bytearray(len(WireupMessage))
blocking_send(worker, ep, WireupMessage)
blocking_recv(worker, ep, wireup_recv)
op_lock = Lock()
finished = [0]
outstanding = [0]
def maybe_progress():
while outstanding[0] >= self.args.max_outstanding:
worker.progress()
def op_started():
with op_lock:
outstanding[0] += 1
def op_completed():
with op_lock:
outstanding[0] -= 1
finished[0] += 1
if self.args.cuda_profile:
xp.cuda.profiler.start()
if self.args.report_gil_contention:
from gilknocker import KnockKnock
# Use smallest polling interval possible to ensure, contention will always
# be zero for small messages otherwise and inconsistent for large messages.
knocker = KnockKnock(polling_interval_micros=1)
knocker.start()
times = []
last_iter = self.args.n_iter + self.args.n_warmup_iter - 1
for i in range(self.args.n_iter + self.args.n_warmup_iter):
start = monotonic()
if self.args.enable_am:
blocking_am_send(worker, ep, send_msg)
blocking_am_recv(worker, ep)
else:
if not self.args.reuse_alloc:
recv_msg = xp.zeros(self.args.n_bytes, dtype="u1")
if self.args.delay_progress:
non_blocking_recv(worker, ep, recv_msg, op_started, op_completed)
non_blocking_send(worker, ep, send_msg, op_started, op_completed)
maybe_progress()
else:
blocking_send(worker, ep, send_msg)
blocking_recv(worker, ep, recv_msg)
if i == last_iter and self.args.delay_progress:
while finished[0] != 2 * (self.args.n_iter + self.args.n_warmup_iter):
worker.progress()
stop = monotonic()
if i >= self.args.n_warmup_iter:
times.append(stop - start)
if self.args.report_gil_contention:
knocker.stop()
if self.args.cuda_profile:
xp.cuda.profiler.stop()
self.queue.put(times)
if self.args.report_gil_contention:
self.queue.put(knocker.contention_metric)
def print_backend_specific_config(self):
delay_progress_str = (
f"True ({self.args.max_outstanding})"
if self.args.delay_progress is True
else "False"
)
print_key_value(
key="Transfer API", value=f"{'AM' if self.args.enable_am else 'TAG'}"
)
print_key_value(key="Delay progress", value=f"{delay_progress_str}")
print_key_value(key="UCX_TLS", value=f"{ucp.get_config()['TLS']}")
print_key_value(
key="UCX_NET_DEVICES", value=f"{ucp.get_config()['NET_DEVICES']}"
)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_api.pyi
|
import enum
from typing import Callable, Dict, Iterable, Mapping, Optional, Tuple
# typedefs.pyx
class AllocatorType(enum.Enum):
HOST: int
CUDA: int
UNSUPPORTED: int
class Feature(enum.Enum):
TAG: int
RMA: int
AMO32: int
AMO64: int
WAKEUP: int
STREAM: int
AM: int
# utils.pyx
def get_current_options() -> Dict[str, str]: ...
def get_ucx_version() -> Tuple[int]: ...
# ucx_object.pyx
class UCXObject:
def close(self) -> None: ...
# ucx_context.pyx
class UCXContext(UCXObject):
def __init__(
self, config_dict: Mapping = ..., feature_flags: Iterable[Feature] = ...
): ...
# ucx_address.pyx
class UCXAddress:
@classmethod
def from_buffer(cls, buffer) -> UCXAddress: ...
@classmethod
def from_worker(cls, worker: UCXWorker) -> UCXAddress: ...
@property
def address(self) -> int: ...
@property
def length(self) -> int: ...
# ucx_worker.pyx
class UCXWorker(UCXObject):
def __init__(self, context: UCXContext): ...
def progress(self) -> None: ...
def ep_create(
self, ip_address: str, port: int, endpoint_error_handling: bool
) -> UCXEndpoint: ...
def ep_create_from_worker_address(
self, ip_address: str, port: int, endpoint_error_handling: bool
) -> UCXEndpoint: ...
def ep_create_from_conn_request(
self, conn_request: int, endpoint_error_handling: bool
) -> UCXEndpoint: ...
def register_am_allocator(
self, allocator: Callable, allocator_type: AllocatorType
) -> None: ...
# ucx_listener.pyx
class UCXListener(UCXObject):
port: int
ip: str
def __init__(
self,
worker: UCXWorker,
port: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: dict = ...,
): ...
# ucx_endpoint.pyx
class UCXEndpoint(UCXObject):
def info(self) -> str: ...
@property
def worker(self) -> UCXWorker: ...
def unpack_rkey(self, rkey) -> UCXRkey: ...
# ucx_memory_handle.pyx
class UCXMemoryHandle(UCXObject):
@classmethod
def alloc(cls, ctx: UCXContext, size: int) -> UCXMemoryHandle: ...
@classmethod
def map(cls, ctx: UCXContext, buffer) -> UCXMemoryHandle: ...
def pack_rkey(self) -> PackedRemoteKey: ...
# transfer_am.pyx
def am_send_nbx(
ep: UCXEndpoint,
buffer,
nbytes: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
): ...
def am_recv_nb(
ep: UCXEndpoint,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
): ...
# transfer_stream.pyx
def stream_send_nb(
ep: UCXEndpoint,
buffer,
nbytes: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
): ...
def stream_recv_nb(
ep: UCXEndpoint,
buffer,
nbytes: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
): ...
# transfer_tag.pyx
def tag_send_nb(
ep: UCXEndpoint,
buffer,
nbytes: int,
tag: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
): ...
def tag_recv_nb(
worker: UCXWorker,
buffer,
nbytes: int,
tag: int,
cb_func: Callable,
cb_args: Optional[tuple] = ...,
cb_kwargs: Optional[dict] = ...,
name: Optional[str] = ...,
ep: Optional[UCXEndpoint] = ...,
): ...
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_worker.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import logging
import socket
from libc.stdint cimport uint16_t, uintptr_t
from libc.stdio cimport FILE
from libc.string cimport memset
from .exceptions import UCXError
from .ucx_api_dep cimport *
from .utils import nvtx_annotate
logger = logging.getLogger("ucx")
cdef _drain_worker_tag_recv(ucp_worker_h handle):
cdef ucp_tag_message_h message
cdef ucp_tag_recv_info_t info
cdef ucs_status_ptr_t status
cdef void *buf
cdef ucp_tag_recv_callback_t _tag_recv_cb = (
<ucp_tag_recv_callback_t>_tag_recv_callback
)
while True:
message = ucp_tag_probe_nb(handle, 0, 0, 1, &info)
if message == NULL:
break
logger.debug(
"Draining tag receive messages, worker: %s, tag: %s, length: %d" % (
hex(int(<uintptr_t>handle)),
hex(int(info.sender_tag)),
info.length
)
)
_finished = [False]
def _req_cb(request, exception):
_finished[0] = True
buf = malloc(info.length)
status = ucp_tag_msg_recv_nb(
handle, buf, info.length, ucp_dt_make_contig(1), message, _tag_recv_cb
)
try:
req = _handle_status(
status, info.length, _req_cb, (), {}, u"ucp_tag_msg_recv_nb", set()
)
if req is not None:
while _finished[0] is not True:
ucp_worker_progress(handle)
finally:
free(buf)
def _ucx_worker_handle_finalizer(
uintptr_t handle_as_int, UCXContext ctx, set inflight_msgs, bint tag_support
):
assert ctx.initialized
cdef ucp_worker_h handle = <ucp_worker_h>handle_as_int
# This drains all the receive messages that were not received by the user with
# `tag_recv_nb` when UCP_FEATURE_TAG is enabled. Without this, UCX raises
# warnings such as below upon exit:
# `unexpected tag-receive descriptor ... was not matched`
if tag_support:
_drain_worker_tag_recv(handle)
# Cancel all inflight messages
cdef UCXRequest req
cdef dict req_info
cdef str name
for req in list(inflight_msgs):
assert not req.closed()
req_info = <dict>req._handle.info
name = req_info["name"]
logger.debug("Future cancelling: %s" % name)
ucp_request_cancel(handle, <void*>req._handle)
ucp_worker_destroy(handle)
cdef class UCXWorker(UCXObject):
"""Python representation of `ucp_worker_h`"""
cdef:
ucp_worker_h _handle
UCXContext _context
set _inflight_msgs
dict _inflight_msgs_to_cancel
dict _am_recv_pool
dict _am_recv_wait
object _am_host_allocator
object _am_cuda_allocator
def __init__(self, UCXContext context):
cdef ucp_params_t ucp_params
cdef ucp_worker_params_t worker_params
cdef ucp_am_handler_param_t am_handler_param
cdef ucs_status_t status
cdef bint tag_enabled
assert context.initialized
self._context = context
memset(&worker_params, 0, sizeof(worker_params))
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE
worker_params.thread_mode = UCS_THREAD_MODE_MULTI
status = ucp_worker_create(context._handle, &worker_params, &self._handle)
assert_ucs_status(status)
self._inflight_msgs = set()
self._inflight_msgs_to_cancel = {"am": set(), "tag": set()}
cdef int AM_MSG_ID = 0
if Feature.AM in context._feature_flags:
self._am_recv_pool = dict()
self._am_recv_wait = dict()
self._am_host_allocator = bytearray
self._am_cuda_allocator = None
am_handler_param.field_mask = (
UCP_AM_HANDLER_PARAM_FIELD_ID |
UCP_AM_HANDLER_PARAM_FIELD_CB |
UCP_AM_HANDLER_PARAM_FIELD_ARG
)
am_handler_param.id = AM_MSG_ID
am_handler_param.cb = <ucp_am_recv_callback_t>_am_recv_callback
am_handler_param.arg = <void *>self
status = ucp_worker_set_am_recv_handler(self._handle, &am_handler_param)
tag_enabled = Feature.TAG in context._feature_flags
self.add_handle_finalizer(
_ucx_worker_handle_finalizer,
int(<uintptr_t>self._handle),
self._context,
self._inflight_msgs,
tag_enabled,
)
context.add_child(self)
def register_am_allocator(self, object allocator, allocator_type):
"""Register an allocator for received Active Messages.
The allocator registered by this function is always called by the
active message receive callback when an incoming message is
available. The appropriate allocator is called depending on whether
the message received is a host message or CUDA message.
Note that CUDA messages can only be received via rendezvous, all
eager messages are received on a host object.
By default, the host allocator is `bytearray`. There is no default
CUDA allocator and one must always be registered if CUDA is used.
Parameters
----------
allocator: callable
An allocation function accepting exactly one argument, the
size of the message receives.
allocator_type: AllocatorType
The type of allocator, currently supports AllocatorType.HOST
and AllocatorType.CUDA.
"""
if allocator_type is AllocatorType.HOST:
self._am_host_allocator = allocator
elif allocator_type is AllocatorType.CUDA:
self._am_cuda_allocator = allocator
else:
raise UCXError("Allocator type not supported")
def init_blocking_progress_mode(self):
assert self.initialized
# In blocking progress mode, we create an epoll file
# descriptor that we can wait on later.
cdef ucs_status_t status
cdef int ucp_epoll_fd
cdef epoll_event ev
cdef int err
status = ucp_worker_get_efd(self._handle, &ucp_epoll_fd)
assert_ucs_status(status)
self.arm()
epoll_fd = epoll_create(1)
if epoll_fd == -1:
raise IOError("epoll_create(1) returned -1")
ev.data.fd = ucp_epoll_fd
ev.data.ptr = NULL
ev.data.u32 = 0
ev.data.u64 = 0
ev.events = EPOLLIN
err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, ucp_epoll_fd, &ev)
if err != 0:
raise IOError("epoll_ctl() returned %d" % err)
return epoll_fd
cpdef bint arm(self) except *:
assert self.initialized
cdef ucs_status_t status
status = ucp_worker_arm(self._handle)
if status == UCS_ERR_BUSY:
return False
assert_ucs_status(status)
return True
@nvtx_annotate("UCXPY_PROGRESS", color="blue", domain="ucxpy")
def progress(self):
"""Try to progress the communication layer
Warning, it is illegal to call this from a call-back function such as
the call-back function given to UCXListener, tag_send_nb, and tag_recv_nb.
"""
assert self.initialized
with nogil:
while ucp_worker_progress(self._handle) != 0:
pass
@property
def handle(self):
assert self.initialized
return int(<uintptr_t>self._handle)
cpdef void request_cancel(self, UCXRequest req) except *:
assert self.initialized
assert not req.closed()
# Notice, `ucp_request_cancel()` calls the send/recv callback function,
# which will handle the request cleanup.
ucp_request_cancel(self._handle, req._handle)
cpdef ucs_status_t fence(self) except *:
cdef ucs_status_t status = ucp_worker_fence(self._handle)
assert_ucs_status(status)
return status
cpdef bint tag_probe(self, tag) except *:
cdef ucp_tag_recv_info_t info
cdef ucp_tag_message_h tag_message = ucp_tag_probe_nb(
self._handle, tag, -1, 0, &info
)
return tag_message != NULL
def flush(self, cb_func, tuple cb_args=None, dict cb_kwargs=None):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
cdef ucs_status_ptr_t req
cdef ucp_send_callback_t _send_cb = <ucp_send_callback_t>_send_callback
cdef ucs_status_ptr_t status = ucp_worker_flush_nb(self._handle, 0, _send_cb)
return _handle_status(
status, 0, cb_func, cb_args, cb_kwargs, u'flush', self._inflight_msgs
)
def get_address(self):
return UCXAddress.from_worker(self)
def info(self):
assert self.initialized
cdef FILE *text_fd = create_text_fd()
ucp_worker_print_info(self._handle, text_fd)
return decode_text_fd(text_fd)
def cancel_inflight_messages(self):
"""Cancel inflight messages scheduled for canceling
If any messages are scheduled for canceling, we need to trigger their
cancelation and return the number of canceled messages, if there are
any messages to cancel, the worker needs to progress to complete the
cancelation.
Returns
-------
total: The total number of inflight messages canceled.
"""
len_tag = _cancel_inflight_msgs(self)
len_am = _cancel_am_recv(self)
return len_tag + len_am
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_rma.pyx
|
from io import RawIOBase
from .arr cimport Array
from .exceptions import UCXError
from .ucx_api_dep cimport *
class RemoteMemory:
"""This class wraps all of the rkey meta data and remote memory locations to do
simple RMA operations.
"""
def __init__(self, rkey, base, length):
self._rkey = rkey
self._base = base
self._length = length
def put_nb(self,
memory,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
offset=0,
size=0,
):
"""RMA put operation. Takes the memory specified in the buffer object and writes
it to the specified remote address.
Parameters
----------
memory: buffer
An ``Array`` wrapping a user-provided array-like object
cb_func: callable
The call-back function, which must accept `request` and `exception` as the
first two arguments.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
offset: int, optional
Optional parameter to indicate an offset into the remote buffer to place the
input buffer buffer into. By default it will write to the base provided in
the constructor
size: int, optional
Optional parameter to indicate how much remote memory to write. If 0 or not
specified it will write the entire buffer provided
Returns
-------
UCXRequest
request object that holds metadata about the driver's progress
"""
memory = Array(memory)
dest = self._base + offset
if size == 0:
size = memory.nbytes
if size + offset > self._length:
raise IndexError("Out of bounds in UCX RMA interface")
return put_nb(memory, size, dest, self._rkey, cb_func,
cb_args, cb_kwargs, u"get_nb")
def get_nb(self,
memory,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
offset=0,
size=0
):
"""
Parameters
----------
memory: buffer
An ``Array`` wrapping a user-provided array-like object
cb_func: callable
The call-back function, which must accept `request` and `exception` as the
first two arguments.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
offset: int, optional
Optional parameter to indicate an offset into the remote buffer to place the
input buffer buffer into
size: int, optional
Optional parameter to indicate how much remote memory to read. If 0 or not
specified it will read enough bytes to fill the buffer
Returns
-------
UCXRequest
request object that holds metadata about the driver's progress
"""
memory = Array(memory)
dest = self._base + offset
if size == 0:
size = memory.nbytes
if size + offset > self._length:
raise IndexError("Out of bounds in UCX RMA interface")
return get_nb(memory, size, dest, self._rkey, cb_func,
cb_args, cb_kwargs, u"get_nb")
def put_nbi(self, memory, size=0, offset=0):
"""RMA put operation. Takes the memory specified in the buffer object and writes
it to remote memory. Contrast with the *_nb interface this does not return a
request object.
Parameters
----------
memory: buffer
An ``Array`` wrapping a user-provided array-like object
offset: int, optional
Optional parameter to indicate an offset into the remote buffer to place the
input buffer buffer into
Returns
-------
True
UCX holds no references to this buffer and it maybe reused immediately
False
Buffer is in use by the underlying driver and not safe for reuse until the
endpoint or worker is flushed
"""
memory = Array(memory)
dest = self._base + offset
if size == 0:
size = memory.nbytes
if size + offset > self._length:
raise IndexError("Out of bounds in UCX RMA interface")
return put_nbi(memory, size, dest, self._rkey)
def get_nbi(self, memory, size=0, offset=0):
"""RMA get operation. Reads remote memory into a local buffer. Contrast with the
*_nb interface this does not return a request object.
Parameters
----------
memory: buffer
An ``Array`` wrapping a user-provided array-like object
offset: int, optional
Optional parameter to indicate an offset into the remote buffer to place the
input buffer buffer into
Returns
-------
True
UCX holds no references to this buffer and it maybe reused immediately
False
Buffer is in use by the underlying driver and not safe for reuse until the
endpoint or worker is flushed
"""
memory = Array(memory)
dest = self._base + offset
if size == 0:
size = memory.nbytes
if size + offset > self._length:
raise IndexError("Out of bounds in UCX RMA interface")
return get_nbi(memory, size, dest, self._rkey)
def put_nbi(Array buffer, size_t nbytes, uint64_t remote_addr, UCXRkey rkey, name=None):
if name is None:
name = u"put_nbi"
cdef ucs_status_t status = ucp_put_nbi(rkey.ep._handle,
<const void *>buffer.ptr,
nbytes,
remote_addr,
rkey._handle)
return assert_ucs_status(status)
def get_nbi(Array buffer, size_t nbytes, uint64_t remote_addr, UCXRkey rkey, name=None):
if name is None:
name = u"get_nbi"
cdef ucs_status_t status = ucp_get_nbi(rkey.ep._handle,
<void *>buffer.ptr,
nbytes,
remote_addr,
rkey._handle)
return assert_ucs_status(status)
def put_nb(Array buffer,
size_t nbytes,
uint64_t remote_addr,
UCXRkey rkey,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
name=None
):
cdef ucs_status_t ucx_status
if name is None:
name = u"put_nb"
cdef ucp_send_callback_t send_cb = <ucp_send_callback_t>_send_callback
cdef ucs_status_ptr_t status = ucp_put_nb(rkey.ep._handle,
<const void *>buffer.ptr,
nbytes,
remote_addr,
rkey._handle,
send_cb)
return _handle_status(
status, nbytes, cb_func, cb_args, cb_kwargs, name, rkey.ep._inflight_msgs
)
def get_nb(Array buffer,
size_t nbytes,
uint64_t remote_addr,
UCXRkey rkey,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
name=None
):
cdef ucs_status_t ucx_status
cdef ucp_send_callback_t send_cb = <ucp_send_callback_t>_send_callback
cdef ucs_status_ptr_t status = ucp_get_nb(rkey.ep._handle,
<void *>buffer.ptr,
nbytes,
remote_addr,
rkey._handle,
send_cb)
return _handle_status(
status, nbytes, cb_func, cb_args, cb_kwargs, name, rkey.ep._inflight_msgs
)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/transfer_common.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uintptr_t
from .exceptions import UCXCanceled, UCXError, log_errors
from .ucx_api_dep cimport *
# This callback function is currently needed by stream_send_nb and
# tag_send_nb transfer functions, as well as UCXEndpoint and UCXWorker
# flush methods.
cdef void _send_callback(void *request, ucs_status_t status) with gil:
cdef UCXRequest req
cdef dict req_info
cdef str name, ucx_status_msg, msg
cdef set inflight_msgs
cdef tuple cb_args
cdef dict cb_kwargs
with log_errors():
req = UCXRequest(<uintptr_t><void*> request)
assert not req.closed()
req_info = <dict>req._handle.info
req_info["status"] = "finished"
if "cb_func" not in req_info:
# This callback function was called before ucp_tag_send_nb() returned
return
exception = None
if status == UCS_ERR_CANCELED:
name = req_info["name"]
msg = "<%s>: " % name
exception = UCXCanceled(msg)
elif status != UCS_OK:
name = req_info["name"]
ucx_status_msg = ucs_status_string(status).decode("utf-8")
msg = "<%s>: %s" % (name, ucx_status_msg)
exception = UCXError(msg)
try:
inflight_msgs = req_info["inflight_msgs"]
inflight_msgs.discard(req)
cb_func = req_info["cb_func"]
if cb_func is not None:
cb_args = req_info["cb_args"]
if cb_args is None:
cb_args = ()
cb_kwargs = req_info["cb_kwargs"]
if cb_kwargs is None:
cb_kwargs = {}
cb_func(req, exception, *cb_args, **cb_kwargs)
finally:
req.close()
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/utils_test.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import multiprocessing as mp
from ucp._libs import ucx_api
from ucp._libs.arr import Array
mp = mp.get_context("spawn")
def blocking_handler(request, exception, finished):
assert exception is None
finished[0] = True
def blocking_flush(obj):
finished = [False]
if not hasattr(obj, "progress"):
progress = obj.worker.progress
else:
progress = obj.progress
req = obj.flush(cb_func=blocking_handler, cb_args=(finished,))
if req is not None:
while not finished[0]:
progress()
def blocking_send(worker, ep, msg, tag=0):
msg = Array(msg)
finished = [False]
req = ucx_api.tag_send_nb(
ep,
msg,
msg.nbytes,
tag=tag,
cb_func=blocking_handler,
cb_args=(finished,),
)
if req is not None:
while not finished[0]:
worker.progress()
def blocking_recv(worker, ep, msg, tag=0):
msg = Array(msg)
finished = [False]
req = ucx_api.tag_recv_nb(
worker,
msg,
msg.nbytes,
tag=tag,
cb_func=blocking_handler,
cb_args=(finished,),
ep=ep,
)
if req is not None:
while not finished[0]:
worker.progress()
def non_blocking_handler(request, exception, completed_cb):
if exception is not None:
print(exception)
assert exception is None
completed_cb()
def non_blocking_send(worker, ep, msg, started_cb, completed_cb, tag=0):
msg = Array(msg)
started_cb()
req = ucx_api.tag_send_nb(
ep,
msg,
msg.nbytes,
tag=tag,
cb_func=non_blocking_handler,
cb_args=(completed_cb,),
)
if req is None:
completed_cb()
return req
def non_blocking_recv(worker, ep, msg, started_cb, completed_cb, tag=0):
msg = Array(msg)
started_cb()
req = ucx_api.tag_recv_nb(
worker,
msg,
msg.nbytes,
tag=tag,
cb_func=non_blocking_handler,
cb_args=(completed_cb,),
ep=ep,
)
if req is None:
completed_cb()
return req
def blocking_am_send(worker, ep, msg):
msg = Array(msg)
finished = [False]
req = ucx_api.am_send_nbx(
ep,
msg,
msg.nbytes,
cb_func=blocking_handler,
cb_args=(finished,),
)
if req is not None:
while not finished[0]:
worker.progress()
def blocking_am_recv_handler(recv_obj, exception, ret):
assert exception is None
ret[0] = recv_obj
def blocking_am_recv(worker, ep):
ret = [None]
ucx_api.am_recv_nb(
ep,
cb_func=blocking_am_recv_handler,
cb_args=(ret,),
)
while ret[0] is None:
worker.progress()
return ret[0]
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_object.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import weakref
def _handle_finalizer_wrapper(
children, handle_finalizer, handle_as_int, *extra_args, **extra_kargs
):
for weakref_to_child in children:
child = weakref_to_child()
if child is not None:
child.close()
handle_finalizer(handle_as_int, *extra_args, **extra_kargs)
cdef class UCXObject:
"""Base class for UCX classes
This base class streamlines the cleanup of UCX objects and reduces duplicate code.
"""
cdef:
object __weakref__
object _finalizer
list _children
def __cinit__(self):
# The finalizer, which can be called multiple times but only
# evoke the finalizer function once.
# Is None when the underlying UCX handle hasen't been initialized.
self._finalizer = None
# List of weak references of UCX objects that make use of this object
self._children = []
cpdef void close(self) except *:
"""Close the object and free the underlying UCX handle.
Does nothing if the object is already closed
"""
if self.initialized:
self._finalizer()
@property
def initialized(self):
"""Is the underlying UCX handle initialized"""
return self._finalizer and self._finalizer.alive
cpdef void add_child(self, child) except *:
"""Add a UCX object to this object's children. The underlying UCX
handle will be freed when this obejct is freed.
"""
self._children.append(weakref.ref(child))
def add_handle_finalizer(self, handle_finalizer, handle_as_int, *extra_args):
"""Add a finalizer of `handle_as_int`"""
self._finalizer = weakref.finalize(
self,
_handle_finalizer_wrapper,
self._children,
handle_finalizer,
handle_as_int,
*extra_args
)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_listener.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uint16_t, uintptr_t
from .exceptions import log_errors
from .ucx_api_dep cimport *
cdef void _listener_callback(ucp_conn_request_h conn_request, void *args) with gil:
"""Callback function used by UCXListener"""
cdef dict cb_data = <dict> args
with log_errors():
cb_data['cb_func'](
int(<uintptr_t>conn_request),
*cb_data['cb_args'],
**cb_data['cb_kwargs']
)
def _ucx_listener_handle_finalizer(uintptr_t handle):
ucp_listener_destroy(<ucp_listener_h> handle)
cdef class UCXListener(UCXObject):
"""Python representation of `ucp_listener_h`
Create and start a listener to accept incoming connections.
Notice, the listening is closed when the returned Listener
goes out of scope thus remember to keep a reference to the object.
Parameters
----------
worker: UCXWorker
Listening worker.
port: int
An unused port number for listening, or `0` to let UCX assign
an unused port.
callback_func: callable
A callback function that gets invoked when an incoming
connection is accepted. The arguments are `conn_request`
followed by *cb_args and **cb_kwargs (if not None).
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
Returns
-------
Listener: UCXListener
The new listener. When this object is deleted, the listening stops
"""
cdef:
ucp_listener_h _handle
dict cb_data
cdef public:
uint16_t port
str ip
def __init__(
self,
UCXWorker worker,
uint16_t port,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None
):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
cdef ucp_listener_params_t params
cdef ucp_listener_conn_callback_t _listener_cb = (
<ucp_listener_conn_callback_t>_listener_callback
)
cdef ucp_listener_attr_t attr
self.cb_data = {
"cb_func": cb_func,
"cb_args": cb_args,
"cb_kwargs": cb_kwargs,
}
params.field_mask = (
UCP_LISTENER_PARAM_FIELD_SOCK_ADDR | UCP_LISTENER_PARAM_FIELD_CONN_HANDLER
)
params.conn_handler.cb = _listener_cb
params.conn_handler.arg = <void*> self.cb_data
if c_util_set_sockaddr(¶ms.sockaddr, NULL, port):
raise MemoryError("Failed allocation of sockaddr")
cdef ucs_status_t status = ucp_listener_create(
worker._handle, ¶ms, &self._handle
)
c_util_sockaddr_free(¶ms.sockaddr)
assert_ucs_status(status)
attr.field_mask = UCP_LISTENER_ATTR_FIELD_SOCKADDR
status = ucp_listener_query(self._handle, &attr)
if status != UCS_OK:
ucp_listener_destroy(self._handle)
assert_ucs_status(status)
DEF MAX_STR_LEN = 50
cdef char ip_str[MAX_STR_LEN]
cdef char port_str[MAX_STR_LEN]
c_util_sockaddr_get_ip_port_str(&attr.sockaddr,
ip_str,
port_str,
MAX_STR_LEN)
self.port = <uint16_t>int(port_str.decode(errors="ignore"))
self.ip = ip_str.decode(errors="ignore")
self.add_handle_finalizer(
_ucx_listener_handle_finalizer,
int(<uintptr_t>self._handle)
)
worker.add_child(self)
@property
def handle(self):
assert self.initialized
return int(<uintptr_t>self._handle)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/exceptions.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
import contextlib
import logging
logger = logging.getLogger("ucx")
@contextlib.contextmanager
def log_errors(reraise_exception=False):
try:
yield
except BaseException as e:
logger.exception(e)
if reraise_exception:
raise
class UCXBaseException(Exception):
pass
class UCXError(UCXBaseException):
pass
class UCXConfigError(UCXError):
pass
class UCXWarning(UserWarning):
pass
class UCXCloseError(UCXBaseException):
pass
class UCXCanceled(UCXBaseException):
pass
class UCXConnectionReset(UCXBaseException):
pass
class UCXMsgTruncated(UCXBaseException):
pass
class UCXNotConnected(UCXBaseException):
pass
class UCXUnreachable(UCXBaseException):
pass
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/arr.pxd
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uintptr_t
cdef class Array:
cdef readonly uintptr_t ptr
cdef readonly bint readonly
cdef readonly object obj
cdef readonly Py_ssize_t itemsize
cdef readonly Py_ssize_t ndim
cdef Py_ssize_t[::1] shape_mv
cdef Py_ssize_t[::1] strides_mv
cdef readonly bint cuda
cpdef bint _c_contiguous(self)
cpdef bint _f_contiguous(self)
cpdef bint _contiguous(self)
cpdef Py_ssize_t _nbytes(self)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_memory_handle.pyx
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uintptr_t
from .arr cimport Array
from .ucx_api_dep cimport *
def _ucx_mem_handle_finalizer(uintptr_t handle_as_int, UCXContext ctx):
assert ctx.initialized
cdef ucp_mem_h handle = <ucp_mem_h><void *>handle_as_int
cdef ucs_status_t status
status = ucp_mem_unmap(ctx._handle, handle)
assert_ucs_status(status)
cdef class UCXMemoryHandle(UCXObject):
""" Python representation for ucp_mem_h type. Users should not instance this class
directly and instead use either the map or the alloc class methods
"""
cdef ucp_mem_h _mem_handle
cdef UCXContext _context
cdef uint64_t r_address
cdef size_t _length
def __cinit__(self, UCXContext ctx, uintptr_t par):
cdef ucs_status_t status
cdef ucp_context_h ctx_handle = <ucp_context_h><uintptr_t>ctx.handle
cdef ucp_mem_map_params_t *params = <ucp_mem_map_params_t *>par
self._context = ctx
status = ucp_mem_map(ctx_handle, params, &self._mem_handle)
assert_ucs_status(status)
self._populate_metadata()
self.add_handle_finalizer(
_ucx_mem_handle_finalizer,
int(<uintptr_t>self._mem_handle),
self._context
)
ctx.add_child(self)
@classmethod
def alloc(cls, ctx, size):
""" Allocate a new pool of registered memory. This memory can be used for
RMA and AMO operations. This memory should not be accessed from outside
these operations.
Parameters
----------
ctx: UCXContext
The UCX context that this memory should be registered to
size: int
Minimum amount of memory to allocate
"""
cdef ucp_mem_map_params_t params
cdef ucs_status_t status
params.field_mask = (
UCP_MEM_MAP_PARAM_FIELD_FLAGS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH
)
params.length = <size_t>size
params.flags = UCP_MEM_MAP_NONBLOCK | UCP_MEM_MAP_ALLOCATE
return UCXMemoryHandle(ctx, <uintptr_t>¶ms)
@classmethod
def map(cls, ctx, mem):
""" Register an existing memory object to UCX for use in RMA and AMO operations
It is not safe to access this memory from outside UCX while operations are
outstanding
Parameters
----------
ctx: UCXContext
The UCX context that this memory should be registered to
mem: buffer
The memory object to be registered
"""
cdef ucp_mem_map_params_t params
cdef ucs_status_t status
buff = Array(mem)
params.field_mask = (
UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH
)
params.address = <void*>buff.ptr
params.length = buff.nbytes
return UCXMemoryHandle(ctx, <uintptr_t>¶ms)
def pack_rkey(self):
""" Returns an UCXRKey object that represents a packed key. This key is what
allows the UCX API to associate this memory with an EP.
"""
return PackedRemoteKey.from_mem_handle(self)
@property
def mem_handle(self):
return <uintptr_t>self._mem_handle
# Done as a separate function because some day I plan on making this loaded lazily
# I believe this reports the actual registered space, rather than what was requested
def _populate_metadata(self):
cdef ucs_status_t status
cdef ucp_mem_attr_t attr
attr.field_mask = (
UCP_MEM_ATTR_FIELD_ADDRESS |
UCP_MEM_ATTR_FIELD_LENGTH
)
status = ucp_mem_query(self._mem_handle, &attr)
assert_ucs_status(status)
self.r_address = <uintptr_t>attr.address
self._length = attr.length
@property
def address(self):
""" Get base address for the memory registration """
return self.r_address
@property
def length(self):
""" Get length of registered memory """
return self._length
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/utils.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from cpython.buffer cimport PyBUF_FORMAT, PyBUF_ND, PyBUF_WRITABLE
from libc.stdio cimport (
FILE,
SEEK_END,
SEEK_SET,
fclose,
fread,
fseek,
ftell,
rewind,
tmpfile,
)
from libc.stdlib cimport free
from .exceptions import UCXConfigError, UCXError
from .ucx_api_dep cimport *
cdef FILE * create_text_fd() except *:
cdef FILE *text_fd = tmpfile()
if text_fd == NULL:
raise IOError("tmpfile() failed")
return text_fd
cdef unicode decode_text_fd(FILE * text_fd):
cdef unicode py_text
cdef size_t size
cdef char *text
rewind(text_fd)
fseek(text_fd, 0, SEEK_END)
size = ftell(text_fd)
rewind(text_fd)
text = <char *>malloc(sizeof(char) * (size + 1))
try:
if fread(text, sizeof(char), size, text_fd) != size:
raise IOError("fread() failed")
text[size] = 0
py_text = text.decode(errors="ignore")
finally:
free(text)
fclose(text_fd)
return py_text
# This function will be called by UCX only on the very first time
# a request memory is initialized
cdef void ucx_py_request_reset(void* request):
cdef ucx_py_request *req = <ucx_py_request*> request
req.finished = False
req.uid = 0
req.info = NULL
# Helper function for the python buffer protocol to handle UCX's opaque memory objects
cdef get_ucx_object(Py_buffer *buffer, int flags,
void *ucx_data, Py_ssize_t length, obj):
if (flags & PyBUF_WRITABLE) == PyBUF_WRITABLE:
raise BufferError("Requested writable view on readonly data")
buffer.buf = ucx_data
buffer.obj = obj
buffer.len = length
buffer.readonly = True
buffer.itemsize = 1
if (flags & PyBUF_FORMAT) == PyBUF_FORMAT:
buffer.format = b"B"
else:
buffer.format = NULL
buffer.ndim = 1
if (flags & PyBUF_ND) == PyBUF_ND:
buffer.shape = &buffer.len
else:
buffer.shape = NULL
buffer.strides = NULL
buffer.suboffsets = NULL
buffer.internal = NULL
# Helper function to process ucs return codes. Returns True if the status is UCS_OK to
# indicate the operation completed inline, and False if UCX is still holding user
# resources. Raises an error if the return code is an error.
cdef bint assert_ucs_status(ucs_status_t status, str msg_context=None) except *:
cdef str msg, ucs_status
if status == UCS_OK:
return True
if status == UCS_INPROGRESS:
return False
# If the status is not OK or INPROGRESS it is an error
ucs_status = ucs_status_string(status).decode("utf-8")
if msg_context is not None:
msg = f"[{msg_context}] {ucs_status}"
else:
msg = ucs_status
raise UCXError(msg)
cdef ucp_config_t * _read_ucx_config(dict user_options) except *:
"""
Reads the UCX config and returns a config handle,
which should freed using `ucp_config_release()`.
"""
cdef ucp_config_t *config
cdef ucs_status_t status
cdef str status_msg
status = ucp_config_read(NULL, NULL, &config)
if status != UCS_OK:
status_msg = ucs_status_string(status).decode("utf-8")
raise UCXConfigError(f"Couldn't read the UCX options: {status_msg}")
# Modify the UCX configuration options based on `config_dict`
cdef str k, v
cdef bytes kb, vb
try:
for k, v in user_options.items():
kb = k.encode()
vb = v.encode()
status = ucp_config_modify(config, <const char*>kb, <const char*>vb)
if status == UCS_ERR_NO_ELEM:
raise UCXConfigError(f"Option {k} doesn't exist")
elif status != UCS_OK:
status_msg = ucs_status_string(status).decode("utf-8")
raise UCXConfigError(
f"Couldn't set option {k} to {v}: {status_msg}"
)
except Exception:
ucp_config_release(config)
raise
return config
cdef dict ucx_config_to_dict(ucp_config_t *config):
"""Returns a dict of a UCX config"""
cdef unicode py_text, line, k, v
cdef dict ret = {}
cdef FILE *text_fd = create_text_fd()
ucp_config_print(config, text_fd, NULL, UCS_CONFIG_PRINT_CONFIG)
py_text = decode_text_fd(text_fd)
for line in py_text.splitlines():
k, v = line.split("=")
k = k[4:] # Strip "UCX_" prefix
ret[k] = v
return ret
def get_current_options():
"""
Returns the current UCX options
if UCX were to be initialized now.
"""
cdef ucp_config_t *config = _read_ucx_config({})
try:
return ucx_config_to_dict(config)
finally:
ucp_config_release(config)
def get_ucx_version():
cdef unsigned int a, b, c
ucp_get_version(&a, &b, &c)
return (a, b, c)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/arr.pyi
|
from typing import Tuple
class Array:
def __init__(self, obj: object): ...
@property
def c_contiguous(self) -> bool: ...
@property
def f_contiguous(self) -> bool: ...
@property
def contiguous(self) -> bool: ...
@property
def nbytes(self) -> int: ...
@property
def shape(self) -> Tuple[int]: ...
@property
def strides(self) -> Tuple[int]: ...
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_context.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import functools
import logging
from libc.stdint cimport uintptr_t
from libc.stdio cimport FILE
from libc.string cimport memset
from .ucx_api_dep cimport *
logger = logging.getLogger("ucx")
def _ucx_context_handle_finalizer(uintptr_t handle):
ucp_cleanup(<ucp_context_h> handle)
cdef class UCXContext(UCXObject):
"""Python representation of `ucp_context_h`
Parameters
----------
config_dict: Mapping[str, str]
UCX options such as "MEMTYPE_CACHE=n" and "SEG_SIZE=3M"
feature_flags: Iterable[Feature]
Tuple of UCX feature flags
"""
cdef:
ucp_context_h _handle
dict _config
tuple _feature_flags
readonly bint cuda_support
def __init__(
self,
config_dict={},
feature_flags=(
Feature.TAG,
Feature.WAKEUP,
Feature.STREAM,
Feature.AM,
Feature.RMA
)
):
cdef ucp_params_t ucp_params
cdef ucp_worker_params_t worker_params
cdef ucs_status_t status
self._feature_flags = tuple(feature_flags)
memset(&ucp_params, 0, sizeof(ucp_params))
ucp_params.field_mask = (
UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT
)
ucp_params.features = functools.reduce(
lambda x, y: x | y.value, feature_flags, 0
)
ucp_params.request_size = sizeof(ucx_py_request)
ucp_params.request_init = (
<ucp_request_init_callback_t>ucx_py_request_reset
)
cdef ucp_config_t *config = _read_ucx_config(config_dict)
try:
status = ucp_init(&ucp_params, config, &self._handle)
assert_ucs_status(status)
self._config = ucx_config_to_dict(config)
finally:
ucp_config_release(config)
# UCX supports CUDA if "cuda" is part of the TLS or TLS is "all"
cdef str tls = self._config["TLS"]
cuda_transports = {"cuda", "cuda_copy"}
if tls.startswith("^"):
# UCX_TLS=^x,y,z means "all \ {x, y, z}"
disabled = set(tls[1:].split(","))
self.cuda_support = not (disabled & cuda_transports)
else:
enabled = set(tls.split(","))
self.cuda_support = bool(
enabled & ({"all", "cuda_ipc"} | cuda_transports)
)
self.add_handle_finalizer(
_ucx_context_handle_finalizer,
int(<uintptr_t>self._handle)
)
logger.info("UCP initiated using config: ")
cdef str k, v
for k, v in self._config.items():
logger.info(f" {k}: {v}")
cpdef dict get_config(self):
return self._config
@property
def handle(self):
assert self.initialized
return int(<uintptr_t>self._handle)
def info(self):
assert self.initialized
cdef FILE *text_fd = create_text_fd()
ucp_context_print_info(self._handle, text_fd)
return decode_text_fd(text_fd)
def map(self, mem):
return UCXMemoryHandle.map(self, mem)
def alloc(self, size):
return UCXMemoryHandle.alloc(self, size)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucxio.pyx
|
# Copyright (c) 2021, UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
from io import SEEK_CUR, SEEK_END, SEEK_SET, RawIOBase
from .arr cimport Array
from .ucx_api_dep cimport *
def blocking_handler(request, exception, finished):
assert exception is None
finished[0] = True
class UCXIO(RawIOBase):
"""A class to simulate python streams backed by UCX RMA operations
Parameters
----------
dest: int
A 64 bit number that represents the remote address that will be written to
and read from.
length: int
Maximum length of the region that can be written to and read from.
rkey: UCXRkey
An unpacked UCXRkey that represents the remote memory that was unpacked by
UCX for use in RMA operations.
"""
def __init__(self, dest, length, rkey):
self.pos = 0
self.remote_addr = dest
self.length = length
self.rkey = rkey
self.cb_finished = [False]
def block_on_request(self, req):
if req is not None:
while not self.cb_finished[0]:
self.rkey.ep.worker.progress()
self.cb_finished[0] = False
def flush(self):
req = self.rkey.ep.flush(blocking_handler, cb_args=(self.cb_finished,))
self.block_on_request(req)
def seek(self, pos, whence=SEEK_SET):
if whence == SEEK_SET:
self.pos = min(max(pos, 0), self.length)
elif whence == SEEK_CUR:
if pos < 0:
self.pos = max(self.pos + pos, 0)
else:
self.pos = min(self.pos + pos, self.length)
elif whence == SEEK_END:
self.pos = min(max(self.pos + pos, 0), self.length)
else:
raise ValueError("Invalid argument")
return self.pos
def _do_rma(self, op, buff):
data = Array(buff)
size = data.nbytes
if self.pos + size > self.length:
size = self.length - self.pos
finished = op(data, size, self.remote_addr + self.pos, self.rkey)
self.pos += size
if not finished:
self.flush()
return size
def readinto(self, buff):
return self._do_rma(get_nbi, buff)
def write(self, buff):
return self._do_rma(put_nbi, buff)
def seekable(self):
return True
def writable(self):
return True
def readable(self):
return True
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_endpoint.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020-2021, UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import logging
import warnings
from libc.stdint cimport uintptr_t
from libc.stdio cimport FILE
from .exceptions import UCXCanceled, UCXConnectionReset, UCXError
from .ucx_api_dep cimport *
logger = logging.getLogger("ucx")
cdef bint _is_am_enabled(UCXWorker worker):
return Feature.AM in worker._context._feature_flags
cdef size_t _cancel_inflight_msgs(UCXWorker worker, set inflight_msgs=None):
cdef UCXRequest req
cdef dict req_info
cdef str name
cdef size_t len_inflight_msgs
if inflight_msgs is None:
inflight_msgs = worker._inflight_msgs_to_cancel["tag"]
len_inflight_msgs = len(inflight_msgs)
for req in list(inflight_msgs):
if not req.closed():
req_info = <dict>req._handle.info
name = req_info["name"]
logger.debug("Future cancelling: %s" % name)
# Notice, `request_cancel()` evoke the send/recv callback functions
worker.request_cancel(req)
inflight_msgs.clear()
return len_inflight_msgs
cdef size_t _cancel_am_recv_single(UCXWorker worker, uintptr_t handle_as_int):
cdef dict recv_wait
cdef size_t len_wait = 0
if _is_am_enabled(worker) and handle_as_int in worker._am_recv_wait:
len_wait = len(worker._am_recv_wait[handle_as_int])
while len(worker._am_recv_wait[handle_as_int]) > 0:
recv_wait = worker._am_recv_wait[handle_as_int].pop(0)
cb_func = recv_wait["cb_func"]
cb_args = recv_wait["cb_args"]
cb_kwargs = recv_wait["cb_kwargs"]
logger.debug("Cancelling am_recv wait on ep %s" % hex(int(handle_as_int)))
cb_func(
None,
UCXCanceled("While waiting for am_recv the endpoint was closed"),
*cb_args,
**cb_kwargs
)
del worker._am_recv_wait[handle_as_int]
return len_wait
cdef size_t _cancel_am_recv(UCXWorker worker, uintptr_t handle_as_int=0):
cdef size_t len_wait = 0
if _is_am_enabled(worker):
if handle_as_int == 0:
for handle_as_int in worker._inflight_msgs_to_cancel["am"]:
len_wait += _cancel_am_recv_single(worker, handle_as_int)
# Prevent endpoint canceling AM messages multiple times. This is important
# because UCX may reuse the same endpoint handle, and if a message is
# canceled during the endpoint finalizer, a message received on the same
# (new) endpoint handle may be canceled incorrectly.
worker._inflight_msgs_to_cancel["am"].clear()
else:
len_wait = _cancel_am_recv_single(worker, handle_as_int)
worker._inflight_msgs_to_cancel["am"].discard(handle_as_int)
return len_wait
class UCXEndpointCloseCallback():
def __init__(self):
self._cb_func = None
def run(self):
if self._cb_func is not None:
# Deregister callback to prevent calling from the endpoint error
# callback and again from the finalizer.
cb_func, self._cb_func = self._cb_func, None
cb_func()
def set(self, cb_func):
self._cb_func = cb_func
cdef void _err_cb(void *arg, ucp_ep_h ep, ucs_status_t status) with gil:
cdef UCXEndpoint ucx_ep = <UCXEndpoint> arg
cdef UCXWorker ucx_worker = ucx_ep.worker
cdef set inflight_msgs = ucx_ep._inflight_msgs
assert ucx_worker.initialized
cdef ucs_status_t *ep_status = <ucs_status_t *> <uintptr_t>ucx_ep._status
ep_status[0] = status
cdef str status_str = ucs_status_string(status).decode("utf-8")
cdef str msg = (
"Error callback for endpoint %s called with status %d: %s" % (
hex(int(<uintptr_t>ep)), status, status_str
)
)
ucx_ep._endpoint_close_callback.run()
logger.debug(msg)
# Schedule inflight messages to be canceled after all UCP progress is
# complete. This may happen if the user called ep.recv() or ep.am_recv()
# but the remote worker errored before sending the message.
ucx_worker._inflight_msgs_to_cancel["tag"].update(inflight_msgs)
if _is_am_enabled(ucx_worker):
ucx_worker._inflight_msgs_to_cancel["am"].add(<uintptr_t>ep)
cdef (ucp_err_handler_cb_t, uintptr_t) _get_error_callback(
str tls, bint endpoint_error_handling
) with gil:
cdef ucp_err_handler_cb_t err_cb = <ucp_err_handler_cb_t>NULL
cdef ucs_status_t *cb_status = <ucs_status_t *>NULL
if endpoint_error_handling:
err_cb = <ucp_err_handler_cb_t>_err_cb
cb_status = <ucs_status_t *> malloc(sizeof(ucs_status_t))
cb_status[0] = UCS_OK
return (err_cb, <uintptr_t> cb_status)
def _ucx_endpoint_finalizer(
uintptr_t handle_as_int,
uintptr_t status_handle_as_int,
bint endpoint_error_handling,
UCXWorker worker,
set inflight_msgs,
object endpoint_close_callback,
):
assert worker.initialized
cdef ucp_ep_h handle = <ucp_ep_h>handle_as_int
cdef ucs_status_ptr_t status
cdef ucs_status_t ep_status
if <void *>status_handle_as_int == NULL:
ep_status = UCS_OK
else:
ep_status = (<ucs_status_t *>status_handle_as_int)[0]
free(<void *>status_handle_as_int)
# Cancel all inflight messages
_cancel_inflight_msgs(worker, inflight_msgs)
# Cancel waiting `am_recv` calls
_cancel_am_recv(worker, handle_as_int=handle_as_int)
# Close the endpoint
cdef str msg
cdef unsigned close_mode = UCP_EP_CLOSE_MODE_FLUSH
if (endpoint_error_handling and <void *>ep_status != NULL and ep_status != UCS_OK):
# We force close endpoint if endpoint error handling is enabled and
# the endpoint status is not UCS_OK
close_mode = UCP_EP_CLOSE_MODE_FORCE
status = ucp_ep_close_nb(handle, close_mode)
if UCS_PTR_IS_PTR(status):
while ucp_request_check_status(status) == UCS_INPROGRESS:
worker.progress()
ucp_request_free(status)
elif UCS_PTR_STATUS(status) != UCS_OK:
msg = ucs_status_string(UCS_PTR_STATUS(status)).decode("utf-8")
raise UCXError("Error while closing endpoint: %s" % msg)
endpoint_close_callback.run()
cdef class UCXEndpoint(UCXObject):
"""Python representation of `ucp_ep_h`"""
cdef:
ucp_ep_h _handle
uintptr_t _status
bint _endpoint_error_handling
set _inflight_msgs
object _endpoint_close_callback
cdef readonly:
UCXWorker worker
def __init__(
self,
UCXWorker worker,
uintptr_t params_as_int,
bint endpoint_error_handling
):
"""The Constructor"""
assert worker.initialized
self.worker = worker
self._inflight_msgs = set()
self._endpoint_close_callback = UCXEndpointCloseCallback()
cdef ucp_err_handler_cb_t err_cb
cdef uintptr_t ep_status
err_cb, ep_status = (
_get_error_callback(worker._context._config["TLS"], endpoint_error_handling)
)
cdef ucp_ep_params_t *params = <ucp_ep_params_t *>params_as_int
if err_cb == NULL:
params.err_mode = UCP_ERR_HANDLING_MODE_NONE
else:
params.err_mode = UCP_ERR_HANDLING_MODE_PEER
params.err_handler.cb = err_cb
params.err_handler.arg = <void *>self
cdef ucp_ep_h ucp_ep
cdef ucs_status_t status = ucp_ep_create(worker._handle, params, &ucp_ep)
assert_ucs_status(status)
self._handle = ucp_ep
self._status = <uintptr_t>ep_status
self._endpoint_error_handling = endpoint_error_handling
self.add_handle_finalizer(
_ucx_endpoint_finalizer,
int(<uintptr_t>ucp_ep),
int(<uintptr_t>ep_status),
endpoint_error_handling,
worker,
self._inflight_msgs,
self._endpoint_close_callback,
)
worker.add_child(self)
@classmethod
def create(
cls,
UCXWorker worker,
str ip_address,
uint16_t port,
bint endpoint_error_handling
):
assert worker.initialized
cdef ucp_ep_params_t *params = (
<ucp_ep_params_t *>malloc(sizeof(ucp_ep_params_t))
)
ip_address = socket.gethostbyname(ip_address)
params.field_mask = (
UCP_EP_PARAM_FIELD_FLAGS |
UCP_EP_PARAM_FIELD_SOCK_ADDR |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE |
UCP_EP_PARAM_FIELD_ERR_HANDLER
)
params.flags = UCP_EP_PARAMS_FLAGS_CLIENT_SERVER
if c_util_set_sockaddr(¶ms.sockaddr, ip_address.encode(), port):
raise MemoryError("Failed allocation of sockaddr")
try:
return cls(worker, <uintptr_t>params, endpoint_error_handling)
finally:
c_util_sockaddr_free(¶ms.sockaddr)
free(<void *>params)
@classmethod
def create_from_worker_address(
cls, UCXWorker worker, UCXAddress address, bint endpoint_error_handling
):
assert worker.initialized
cdef ucp_ep_params_t *params = (
<ucp_ep_params_t *>malloc(sizeof(ucp_ep_params_t))
)
params.field_mask = (
UCP_EP_PARAM_FIELD_REMOTE_ADDRESS |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE |
UCP_EP_PARAM_FIELD_ERR_HANDLER
)
params.address = address._address
try:
return cls(worker, <uintptr_t>params, endpoint_error_handling)
finally:
free(<void *>params)
@classmethod
def create_from_conn_request(
cls, UCXWorker worker, uintptr_t conn_request, bint endpoint_error_handling
):
assert worker.initialized
cdef ucp_ep_params_t *params = (
<ucp_ep_params_t *>malloc(sizeof(ucp_ep_params_t))
)
params.field_mask = (
UCP_EP_PARAM_FIELD_FLAGS |
UCP_EP_PARAM_FIELD_CONN_REQUEST |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE |
UCP_EP_PARAM_FIELD_ERR_HANDLER
)
params.flags = UCP_EP_PARAMS_FLAGS_NO_LOOPBACK
params.conn_request = <ucp_conn_request_h> conn_request
try:
return cls(worker, <uintptr_t>params, endpoint_error_handling)
finally:
free(<void *>params)
def info(self):
assert self.initialized
cdef FILE *text_fd = create_text_fd()
ucp_ep_print_info(self._handle, text_fd)
return decode_text_fd(text_fd)
def _get_status_and_str(self):
cdef ucs_status_t *_status = <ucs_status_t *>self._status
cdef str status_str = ucs_status_string(_status[0]).decode("utf-8")
status = int(_status[0])
return (status, str(status_str))
def is_alive(self):
if not self._endpoint_error_handling:
return True
status, _ = self._get_status_and_str()
return status == UCS_OK
def raise_on_error(self):
if not self._endpoint_error_handling:
return
status, status_str = self._get_status_and_str()
if status == UCS_OK:
return
ep_str = str(hex(int(<uintptr_t>self._handle)))
error_msg = f"Endpoint {ep_str} error: {status_str}"
if status == UCS_ERR_CONNECTION_RESET:
raise UCXConnectionReset(error_msg)
else:
raise UCXError(error_msg)
@property
def handle(self):
assert self.initialized
return int(<uintptr_t>self._handle)
def flush(self, cb_func, tuple cb_args=None, dict cb_kwargs=None):
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
cdef ucs_status_ptr_t req
cdef ucp_send_callback_t _send_cb = <ucp_send_callback_t>_send_callback
cdef ucs_status_ptr_t status = ucp_ep_flush_nb(self._handle, 0, _send_cb)
return _handle_status(
status, 0, cb_func, cb_args, cb_kwargs, u'flush', self._inflight_msgs
)
def unpack_rkey(self, rkey):
return UCXRkey(self, rkey)
def set_close_callback(self, cb_func):
self._endpoint_close_callback.set(cb_func)
def am_probe(self):
return self.handle in self.worker._am_recv_pool
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/transfer_tag.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uintptr_t
from .arr cimport Array
from .exceptions import UCXCanceled, UCXError, UCXMsgTruncated, log_errors
from .ucx_api_dep cimport *
def tag_send_nb(
UCXEndpoint ep,
Array buffer,
size_t nbytes,
ucp_tag_t tag,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
str name=None
):
""" This routine sends a message to a destination endpoint
Each message is associated with a tag value that is used for message matching
on the receiver. The routine is non-blocking and therefore returns immediately,
however the actual send operation may be delayed. The send operation is
considered completed when it is safe to reuse the source buffer. If the send
operation is completed immediately the routine return None and the call-back
function **is not invoked**. If the operation is not completed immediately
and no exception raised then the UCP library will schedule to invoke the call-back
whenever the send operation will be completed. In other words, the completion
of a message can be signaled by the return code or the call-back.
Note
----
The user should not modify any part of the buffer after this operation is called,
until the operation completes.
Parameters
----------
ep: UCXEndpoint
The destination endpoint
buffer: Array
An ``Array`` wrapping a user-provided array-like object
nbytes: int
Size of the buffer to use. Must be equal or less than the size of buffer
tag: int
The tag of the message
cb_func: callable
The call-back function, which must accept `request` and `exception` as the
first two arguments.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
name: str, optional
Descriptive name of the operation
"""
ep.raise_on_error()
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
if name is None:
name = u"tag_send_nb"
if Feature.TAG not in ep.worker._context._feature_flags:
raise ValueError("UCXContext must be created with `Feature.TAG`")
if buffer.cuda and not ep.worker._context.cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please add "
"`cuda_copy` and/or `cuda_ipc` to the `UCX_TLS` environment"
"variable if you're manually setting a different value. If you"
"are building UCX from source, please see "
"https://ucx-py.readthedocs.io/en/latest/install.html for "
"more information."
)
if not buffer._contiguous():
raise ValueError("Array must be C or F contiguous")
cdef ucp_send_callback_t _send_cb = <ucp_send_callback_t>_send_callback
cdef ucs_status_ptr_t status = ucp_tag_send_nb(
ep._handle,
<void*>buffer.ptr,
nbytes,
ucp_dt_make_contig(1),
tag,
_send_cb
)
return _handle_status(
status, nbytes, cb_func, cb_args, cb_kwargs, name, ep._inflight_msgs
)
cdef void _tag_recv_callback(
void *request, ucs_status_t status, ucp_tag_recv_info_t *info
) with gil:
cdef UCXRequest req
cdef dict req_info
cdef str name, ucx_status_msg, msg
cdef set inflight_msgs
cdef tuple cb_args
cdef dict cb_kwargs
with log_errors():
req = UCXRequest(<uintptr_t><void*> request)
assert not req.closed()
req_info = <dict>req._handle.info
req_info["status"] = "finished"
if "cb_func" not in req_info:
# This callback function was called before ucp_tag_recv_nb() returned
return
exception = None
if status == UCS_ERR_CANCELED:
name = req_info["name"]
msg = "<%s>: " % name
exception = UCXCanceled(msg)
elif status != UCS_OK:
name = req_info["name"]
ucx_status_msg = ucs_status_string(status).decode("utf-8")
msg = "<%s>: %s" % (name, ucx_status_msg)
exception = UCXError(msg)
elif info.length != <size_t>req_info["expected_receive"]:
name = req_info["name"]
msg = "<%s>: length mismatch: %d (got) != %d (expected)" % (
name, info.length, req_info["expected_receive"]
)
exception = UCXMsgTruncated(msg)
try:
inflight_msgs = req_info["inflight_msgs"]
inflight_msgs.discard(req)
cb_func = req_info["cb_func"]
if cb_func is not None:
cb_args = req_info["cb_args"]
if cb_args is None:
cb_args = ()
cb_kwargs = req_info["cb_kwargs"]
if cb_kwargs is None:
cb_kwargs = {}
cb_func(req, exception, *cb_args, **cb_kwargs)
finally:
req.close()
def tag_recv_nb(
UCXWorker worker,
Array buffer,
size_t nbytes,
ucp_tag_t tag,
cb_func,
ucp_tag_t tag_mask=-1,
tuple cb_args=None,
dict cb_kwargs=None,
str name=None,
UCXEndpoint ep=None
):
""" This routine receives a message on a worker
The tag value of the receive message has to match the tag and tag_mask values,
where the tag_mask indicates what bits of the tag have to be matched.
The routine is a non-blocking and therefore returns immediately. The receive
operation is considered completed when the message is delivered to the buffer.
In order to notify the application about completion of the receive operation
the UCP library will invoke the call-back function when the received message
is in the receive buffer and ready for application access. If the receive
operation cannot be stated the routine raise an exception.
Note
----
This routine cannot return None. It always returns a request handle or raise an
exception.
Parameters
----------
worker: UCXWorker
The worker that is used for the receive operation
buffer: Array
An ``Array`` wrapping a user-provided array-like object
nbytes: int
Size of the buffer to use. Must be equal or less than the size of buffer
tag: int
Message tag to expect
cb_func: callable
The call-back function, which must accept `request` and `exception` as the
first two arguments.
tag_mask: int, optional
Bit mask that indicates the bits that are used for the matching of the
incoming tag against the expected tag.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
name: str, optional
Descriptive name of the operation
ep: UCXEndpoint, optional
Registrate the inflight message at `ep` instead of `worker`, which
guarantee that the message is cancelled when `ep` closes as opposed to
when the `worker` closes.
"""
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
if name is None:
name = u"tag_recv_nb"
if buffer.readonly:
raise ValueError("writing to readonly buffer!")
if Feature.TAG not in worker._context._feature_flags:
raise ValueError("UCXContext must be created with `Feature.TAG`")
cdef bint cuda_support
if buffer.cuda:
if ep is None:
cuda_support = <bint>worker._context.cuda_support
else:
cuda_support = <bint>ep.worker._context.cuda_support
if not cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please add "
"`cuda_copy` and/or `cuda_ipc` to the `UCX_TLS` environment"
"variable if you're manually setting a different value. If you"
"are building UCX from source, please see "
"https://ucx-py.readthedocs.io/en/latest/install.html for "
"more information."
)
if not buffer._contiguous():
raise ValueError("Array must be C or F contiguous")
cdef ucp_tag_recv_callback_t _tag_recv_cb = (
<ucp_tag_recv_callback_t>_tag_recv_callback
)
cdef ucs_status_ptr_t status = ucp_tag_recv_nb(
worker._handle,
<void*>buffer.ptr,
nbytes,
ucp_dt_make_contig(1),
tag,
tag_mask,
_tag_recv_cb
)
cdef set inflight_msgs = (
worker._inflight_msgs if ep is None else ep._inflight_msgs
)
return _handle_status(
status, nbytes, cb_func, cb_args, cb_kwargs, name, inflight_msgs
)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/__init__.pxd
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/packed_remote_key.pyx
|
# Copyright (c) 2021 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from libc.stdint cimport uintptr_t
from libc.stdlib cimport free
from libc.string cimport memcpy
from .arr cimport Array
from .ucx_api_dep cimport *
cdef class PackedRemoteKey:
""" A packed remote key. This key is suitable for sending to remote nodes to setup
remote access to local memory. Users should not instance this class directly and
should use the from_buffer() and from_mem_handle() class methods or the
pack_rkey() method on the UCXMemoryHandle class
"""
cdef void *_key
cdef Py_ssize_t _length
def __cinit__(self, uintptr_t packed_key_as_int, Py_ssize_t length):
key = <void *> packed_key_as_int
self._key = malloc(length)
self._length = length
memcpy(self._key, key, length)
@classmethod
def from_buffer(cls, buffer):
""" Wrap a received buffer in a PackedRemoteKey to turn magic buffers into a
python class suitable for unpacking on an EP
Parameters
----------
buffer:
Python buffer to be wrapped
"""
buf = Array(buffer)
assert buf.c_contiguous
return PackedRemoteKey(buf.ptr, buf.nbytes)
@classmethod
def from_mem_handle(self, UCXMemoryHandle mem):
""" Create a new packed remote key from a given UCXMemoryHandle class
Parameters
----------
mem: UCXMemoryHandle
The memory handle to be packed in an rkey for sending
"""
cdef void *key
cdef size_t len
cdef ucs_status_t status
status = ucp_rkey_pack(mem._context._handle, mem._mem_handle, &key, &len)
packed_key = PackedRemoteKey(<uintptr_t>key, len)
ucp_rkey_buffer_release(key)
assert_ucs_status(status)
return packed_key
def __dealloc__(self):
free(self._key)
@property
def key(self):
return int(<uintptr_t><void*>self._key)
@property
def length(self):
return int(self._length)
def __getbuffer__(self, Py_buffer *buffer, int flags):
get_ucx_object(buffer, flags, <void*>self._key, self._length, self)
def __releasebuffer__(self, Py_buffer *buffer):
pass
def __reduce__(self):
return (PackedRemoteKey.from_buffer, (bytes(self),))
def __hash__(self):
return hash(bytes(self))
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_rkey.pyx
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import logging
from libc.stdint cimport uintptr_t
from .arr cimport Array
from .ucx_api_dep cimport *
logger = logging.getLogger("ucx")
def _ucx_remote_mem_finalizer_post_flush(req, exception, UCXRkey rkey):
if exception is not None:
logger.debug("Remote memory finalizer exception: %s" % str(exception))
ucp_rkey_destroy(rkey._handle)
def _ucx_rkey_finalizer(UCXRkey rkey, UCXEndpoint ep):
req = ep.flush(_ucx_remote_mem_finalizer_post_flush, (rkey,))
# Flush completed immediately and callback wasn't called
if req is None:
ucp_rkey_destroy(rkey._handle)
cdef class UCXRkey(UCXObject):
cdef ucp_rkey_h _handle
cdef UCXEndpoint ep
def __init__(self, UCXEndpoint ep, PackedRemoteKey rkey):
cdef ucs_status_t status
rkey_arr = Array(rkey)
cdef const void *key_data = <const void *><const uintptr_t>rkey_arr.ptr
status = ucp_ep_rkey_unpack(ep._handle, key_data, &self._handle)
assert_ucs_status(status)
self.ep = ep
self.add_handle_finalizer(
_ucx_rkey_finalizer,
self,
ep
)
ep.add_child(self)
@property
def ep(self):
return self.ep
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/arr.pyx
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from cpython.array cimport array, newarrayobject
from cpython.buffer cimport PyBuffer_IsContiguous
from cpython.memoryview cimport (
PyMemoryView_FromObject,
PyMemoryView_GET_BUFFER,
)
from cpython.object cimport PyObject
from cpython.ref cimport Py_INCREF
from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM
from cython cimport (
auto_pickle,
boundscheck,
initializedcheck,
nonecheck,
wraparound,
)
from libc.stdint cimport uintptr_t
from libc.string cimport memcpy
try:
from numpy import dtype as numpy_dtype
except ImportError:
numpy_dtype = None
cdef dict itemsize_mapping = {
intern("|b1"): 1,
intern("|i1"): 1,
intern("|u1"): 1,
intern("<i2"): 2,
intern(">i2"): 2,
intern("<u2"): 2,
intern(">u2"): 2,
intern("<i4"): 4,
intern(">i4"): 4,
intern("<u4"): 4,
intern(">u4"): 4,
intern("<i8"): 8,
intern(">i8"): 8,
intern("<u8"): 8,
intern(">u8"): 8,
intern("<f2"): 2,
intern(">f2"): 2,
intern("<f4"): 4,
intern(">f4"): 4,
intern("<f8"): 8,
intern(">f8"): 8,
intern("<f16"): 16,
intern(">f16"): 16,
intern("<c8"): 8,
intern(">c8"): 8,
intern("<c16"): 16,
intern(">c16"): 16,
intern("<c32"): 32,
intern(">c32"): 32,
}
cdef array array_Py_ssize_t = array("q")
cdef inline Py_ssize_t[::1] new_Py_ssize_t_array(Py_ssize_t n):
return newarrayobject(
(<PyObject*>array_Py_ssize_t).ob_type, n, array_Py_ssize_t.ob_descr
)
@auto_pickle(False)
cdef class Array:
""" An efficient wrapper for host and device array-like objects
Parameters
----------
obj: Object exposing the buffer protocol or __cuda_array_interface__
A host and device array-like object
"""
def __cinit__(self, obj):
cdef dict iface = getattr(obj, "__cuda_array_interface__", None)
self.cuda = (iface is not None)
cdef const Py_buffer* pybuf
cdef str typestr
cdef tuple data, shape, strides
cdef Py_ssize_t i
if self.cuda:
if iface.get("mask") is not None:
raise NotImplementedError("mask attribute not supported")
self.obj = obj
data = iface["data"]
self.ptr, self.readonly = data
typestr = iface["typestr"]
if typestr is None:
raise ValueError("Expected `str`, but got `None`")
elif typestr == "":
raise ValueError("Got unexpected empty `str`")
else:
try:
self.itemsize = itemsize_mapping[typestr]
except KeyError:
if numpy_dtype is not None:
self.itemsize = numpy_dtype(typestr).itemsize
else:
raise ValueError(
f"Unexpected data type, '{typestr}'."
" Please install NumPy to handle this format."
)
shape = iface["shape"]
strides = iface.get("strides")
self.ndim = len(shape)
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.shape_mv[i] = shape[i]
if strides is not None:
if len(strides) != self.ndim:
raise ValueError(
"The length of shape and strides must be equal"
)
self.strides_mv = new_Py_ssize_t_array(self.ndim)
for i in range(self.ndim):
self.strides_mv[i] = strides[i]
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
else:
mv = PyMemoryView_FromObject(obj)
pybuf = PyMemoryView_GET_BUFFER(mv)
if pybuf.suboffsets != NULL:
raise NotImplementedError("Suboffsets are not supported")
self.ptr = <uintptr_t>pybuf.buf
self.obj = pybuf.obj
self.readonly = <bint>pybuf.readonly
self.ndim = <Py_ssize_t>pybuf.ndim
self.itemsize = <Py_ssize_t>pybuf.itemsize
if self.ndim > 0:
self.shape_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.shape_mv[0],
pybuf.shape,
self.ndim * sizeof(Py_ssize_t)
)
if not PyBuffer_IsContiguous(pybuf, b"C"):
self.strides_mv = new_Py_ssize_t_array(self.ndim)
memcpy(
&self.strides_mv[0],
pybuf.strides,
self.ndim * sizeof(Py_ssize_t)
)
else:
self.strides_mv = None
else:
self.shape_mv = None
self.strides_mv = None
cpdef bint _c_contiguous(self):
return _c_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def c_contiguous(self):
return self._c_contiguous()
cpdef bint _f_contiguous(self):
return _f_contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def f_contiguous(self):
return self._f_contiguous()
cpdef bint _contiguous(self):
return _contiguous(
self.itemsize, self.ndim, self.shape_mv, self.strides_mv
)
@property
def contiguous(self):
return self._contiguous()
cpdef Py_ssize_t _nbytes(self):
return _nbytes(self.itemsize, self.ndim, self.shape_mv)
@property
def nbytes(self):
return self._nbytes()
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def shape(self):
cdef tuple shape = PyTuple_New(self.ndim)
cdef Py_ssize_t i
cdef object o
for i in range(self.ndim):
o = self.shape_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(shape, i, o)
return shape
@property
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
def strides(self):
cdef tuple strides = PyTuple_New(self.ndim)
cdef Py_ssize_t i, s
cdef object o
if self.strides_mv is not None:
for i from self.ndim > i >= 0 by 1:
o = self.strides_mv[i]
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
else:
s = self.itemsize
for i from self.ndim > i >= 0 by 1:
o = s
Py_INCREF(o)
PyTuple_SET_ITEM(strides, i, o)
s *= self.shape_mv[i]
return strides
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _c_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from ndim > i >= 0 by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
return True
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline bint _f_contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef Py_ssize_t i, s
if strides_mv is not None:
s = itemsize
for i from 0 <= i < ndim by 1:
if s != strides_mv[i]:
return False
s *= shape_mv[i]
elif ndim > 1:
return False
return True
cdef inline bint _contiguous(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv,
Py_ssize_t[::1] strides_mv) nogil:
cdef bint r = _c_contiguous(itemsize, ndim, shape_mv, strides_mv)
if not r:
r = _f_contiguous(itemsize, ndim, shape_mv, strides_mv)
return r
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef inline Py_ssize_t _nbytes(Py_ssize_t itemsize,
Py_ssize_t ndim,
Py_ssize_t[::1] shape_mv) nogil:
cdef Py_ssize_t i, nbytes = itemsize
for i in range(ndim):
nbytes *= shape_mv[i]
return nbytes
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_api_dep.pxd
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
from posix.stdio cimport open_memstream
from posix.unistd cimport close
from cpython.ref cimport Py_DECREF, Py_INCREF, PyObject
from libc.stdint cimport *
from libc.stdio cimport FILE, fclose, fflush, printf, stderr, stdin, stdout
from libc.stdlib cimport free, malloc
from libc.string cimport memset
cdef extern from "sys/socket.h":
ctypedef struct sockaddr_storage_t:
pass
cdef extern from "src/c_util.h":
ctypedef struct ucs_sock_addr_t:
pass
int c_util_set_sockaddr(ucs_sock_addr_t *sockaddr,
const char *ip_address,
uint16_t port)
void c_util_sockaddr_free(ucs_sock_addr_t *sockaddr)
void c_util_sockaddr_get_ip_port_str(const sockaddr_storage_t *sock_addr,
char *ip_str,
char *port_str,
size_t max_size)
cdef extern from "ucs/memory/memory_type.h":
cdef enum ucs_memory_type_t:
UCS_MEMORY_TYPE_HOST
UCS_MEMORY_TYPE_CUDA
UCS_MEMORY_TYPE_CUDA_MANAGED
UCS_MEMORY_TYPE_ROCM
UCS_MEMORY_TYPE_ROCM_MANAGED
UCS_MEMORY_TYPE_LAST
UCS_MEMORY_TYPE_UNKNOWN = UCS_MEMORY_TYPE_LAST
cdef extern from "ucp/api/ucp.h":
ctypedef struct ucp_context:
pass
ctypedef ucp_context* ucp_context_h
ctypedef struct ucp_worker:
pass
ctypedef ucp_worker* ucp_worker_h
ctypedef enum ucs_status_t:
pass
ctypedef struct ucp_config_t:
pass
ctypedef struct ucp_address_t:
pass
ctypedef struct ucp_listener_accept_handler_t:
pass
ctypedef ucp_conn_request* ucp_conn_request_h
ctypedef void(*ucp_listener_conn_callback_t)(ucp_conn_request_h request, void *arg)
ctypedef struct ucp_listener_conn_handler_t:
ucp_listener_conn_callback_t cb
void *arg
int UCP_LISTENER_PARAM_FIELD_SOCK_ADDR
int UCP_LISTENER_PARAM_FIELD_CONN_HANDLER
ctypedef struct ucp_listener_params_t:
uint64_t field_mask
ucs_sock_addr_t sockaddr
ucp_listener_accept_handler_t accept_handler
ucp_listener_conn_handler_t conn_handler
ctypedef struct ucp_ep:
pass
ctypedef ucp_ep* ucp_ep_h
ctypedef struct ucp_conn_request:
pass
ctypedef enum ucp_err_handling_mode_t:
UCP_ERR_HANDLING_MODE_NONE
UCP_ERR_HANDLING_MODE_PEER
ctypedef void(*ucp_err_handler_cb_t) (void *arg, ucp_ep_h ep, ucs_status_t status)
ctypedef struct ucp_err_handler_t:
ucp_err_handler_cb_t cb
void *arg
int UCP_EP_PARAM_FIELD_REMOTE_ADDRESS
int UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE
int UCP_EP_PARAM_FIELD_ERR_HANDLER
int UCP_EP_PARAM_FIELD_USER_DATA
int UCP_EP_PARAM_FIELD_SOCK_ADDR
int UCP_EP_PARAM_FIELD_FLAGS
int UCP_EP_PARAM_FIELD_CONN_REQUEST
int UCP_EP_PARAMS_FLAGS_NO_LOOPBACK
int UCP_EP_PARAMS_FLAGS_CLIENT_SERVER
ctypedef struct ucp_ep_params_t:
uint64_t field_mask
const ucp_address_t *address
ucp_err_handling_mode_t err_mode
ucp_err_handler_t err_handler
void *user_data
unsigned flags
ucs_sock_addr_t sockaddr
ucp_conn_request_h conn_request
ctypedef void(* ucp_request_init_callback_t)(void *request)
ctypedef struct ucp_params_t:
uint64_t field_mask
uint64_t features
size_t request_size
ucp_request_init_callback_t request_init
ucs_status_t UCS_OK
ucs_status_t UCS_INPROGRESS
ucs_status_t UCS_ERR_NO_ELEM
ucs_status_t UCS_ERR_BUSY
ucs_status_t UCS_ERR_CANCELED
ucs_status_t UCS_ERR_NOT_CONNECTED
ucs_status_t UCS_ERR_UNREACHABLE
ucs_status_t UCS_ERR_CONNECTION_RESET
void ucp_get_version(unsigned * major_version,
unsigned *minor_version,
unsigned *release_number)
ucs_status_t ucp_config_read(const char * env_prefix,
const char * filename,
ucp_config_t **config_p)
void ucp_config_release(ucp_config_t *config)
int UCP_PARAM_FIELD_FEATURES
int UCP_PARAM_FIELD_REQUEST_SIZE
int UCP_PARAM_FIELD_REQUEST_INIT
int UCP_FEATURE_TAG
int UCP_FEATURE_WAKEUP
int UCP_FEATURE_STREAM
int UCP_FEATURE_RMA
int UCP_FEATURE_AMO32
int UCP_FEATURE_AMO64
int UCP_FEATURE_AM
ucs_status_t ucp_init(const ucp_params_t *params,
const ucp_config_t *config,
ucp_context_h *context_p)
void ucp_cleanup(ucp_context_h context_p)
void ucp_context_print_info(const ucp_context_h context, FILE *stream)
ctypedef enum ucs_thread_mode_t:
pass
# < Only the main thread can access (i.e. the thread that initialized
# the context; multiple threads may exist and never access) */
ucs_thread_mode_t UCS_THREAD_MODE_SINGLE,
# < Multiple threads can access, but only one at a time */
ucs_thread_mode_t UCS_THREAD_MODE_SERIALIZED
# < Multiple threads can access concurrently */
ucs_thread_mode_t UCS_THREAD_MODE_MULTI
ucs_thread_mode_t UCS_THREAD_MODE_LAST
ctypedef struct ucp_worker_params_t:
uint64_t field_mask
ucs_thread_mode_t thread_mode
int UCP_WORKER_PARAM_FIELD_THREAD_MODE
ucs_status_t ucp_worker_create(ucp_context_h context,
const ucp_worker_params_t *params,
ucp_worker_h *worker_p)
void ucp_worker_destroy(ucp_worker_h worker)
void ucp_worker_print_info(const ucp_worker_h context, FILE *stream)
ctypedef struct ucp_listener:
pass
ctypedef ucp_listener* ucp_listener_h
int UCP_LISTENER_ATTR_FIELD_SOCKADDR
ctypedef struct ucp_listener_attr_t:
uint64_t field_mask
sockaddr_storage_t sockaddr
ucs_status_t ucp_listener_create(ucp_worker_h worker,
const ucp_listener_params_t *params,
ucp_listener_h *listener_p)
ucs_status_t ucp_listener_query(ucp_listener_h listener,
ucp_listener_attr_t *attr)
ucs_status_t ucp_ep_create(ucp_worker_h worker,
const ucp_ep_params_t *params,
ucp_ep_h *ep_p)
ctypedef void* ucs_status_ptr_t
ctypedef uint64_t ucp_tag_t
ctypedef uint64_t ucp_datatype_t
bint UCS_PTR_IS_ERR(ucs_status_ptr_t)
bint UCS_PTR_IS_PTR(ucs_status_ptr_t)
ucs_status_t UCS_PTR_STATUS(ucs_status_ptr_t)
ctypedef void (*ucp_send_callback_t)(void *request, ucs_status_t status) # noqa
ucs_status_ptr_t ucp_tag_send_nb(ucp_ep_h ep, const void *buffer,
size_t count, ucp_datatype_t datatype,
ucp_tag_t tag, ucp_send_callback_t cb)
ucp_datatype_t ucp_dt_make_contig(size_t elem_size)
unsigned ucp_worker_progress(ucp_worker_h worker) nogil
ctypedef struct ucp_tag_recv_info_t:
ucp_tag_t sender_tag
size_t length
ctypedef void (*ucp_tag_recv_callback_t)(void *request, # noqa
ucs_status_t status,
ucp_tag_recv_info_t *info)
ucs_status_ptr_t ucp_tag_recv_nb(ucp_worker_h worker, void *buffer,
size_t count, ucp_datatype_t datatype,
ucp_tag_t tag, ucp_tag_t tag_mask,
ucp_tag_recv_callback_t cb)
ctypedef struct ucp_tag_message:
pass
ctypedef ucp_tag_message* ucp_tag_message_h
ucp_tag_message_h ucp_tag_probe_nb(ucp_worker_h worker, ucp_tag_t tag,
ucp_tag_t tag_mask, int remove,
ucp_tag_recv_info_t *info)
ucs_status_ptr_t ucp_tag_msg_recv_nb(ucp_worker_h worker, void *buffer,
size_t count, ucp_datatype_t datatype,
ucp_tag_message_h message,
ucp_tag_recv_callback_t cb)
ctypedef void (*ucp_stream_recv_callback_t)(void *request, # noqa
ucs_status_t status,
size_t length)
ucs_status_ptr_t ucp_stream_send_nb(ucp_ep_h ep, const void *buffer,
size_t count, ucp_datatype_t datatype,
ucp_send_callback_t cb, unsigned flags)
unsigned UCP_STREAM_RECV_FLAG_WAITALL
ucs_status_ptr_t ucp_stream_recv_nb(ucp_ep_h ep, void *buffer,
size_t count, ucp_datatype_t datatype,
ucp_stream_recv_callback_t cb,
size_t *length, unsigned flags)
void ucp_request_free(void *request)
void ucp_ep_print_info(ucp_ep_h ep, FILE *stream)
ucs_status_t ucp_worker_get_efd(ucp_worker_h worker, int *fd)
ucs_status_t ucp_worker_arm(ucp_worker_h worker)
void ucp_listener_destroy(ucp_listener_h listener)
const char *ucs_status_string(ucs_status_t status)
unsigned UCP_EP_CLOSE_MODE_FORCE
unsigned UCP_EP_CLOSE_MODE_FLUSH
ucs_status_ptr_t ucp_ep_close_nb(ucp_ep_h ep, unsigned mode)
void ucp_request_cancel(ucp_worker_h worker, void *request)
ucs_status_t ucp_request_check_status(void *request)
ucs_status_t ucp_config_modify(ucp_config_t *config,
const char *name,
const char *value)
ctypedef enum ucs_config_print_flags_t:
pass
ucs_config_print_flags_t UCS_CONFIG_PRINT_CONFIG
void ucp_config_print(const ucp_config_t *config,
FILE *stream,
const char *title,
ucs_config_print_flags_t print_flags)
ucs_status_t ucp_config_modify(ucp_config_t *config, const char *name,
const char *value)
ucs_status_t ucp_worker_get_address(ucp_worker_h worker,
ucp_address_t **address,
size_t *len)
void ucp_worker_release_address(ucp_worker_h worker,
ucp_address_t *address)
ucs_status_t ucp_worker_fence(ucp_worker_h worker)
ucs_status_ptr_t ucp_worker_flush_nb(ucp_worker_h worker,
unsigned flags,
ucp_send_callback_t cb)
ucs_status_ptr_t ucp_ep_flush_nb(ucp_ep_h ep,
unsigned flags,
ucp_send_callback_t cb)
unsigned UCP_AM_SEND_FLAG_REPLY
unsigned UCP_AM_SEND_FLAG_EAGER
unsigned UCP_AM_SEND_FLAG_RNDV
unsigned UCP_AM_RECV_ATTR_FIELD_REPLY_EP
unsigned UCP_AM_RECV_ATTR_FIELD_TOTAL_LENGTH
unsigned UCP_AM_RECV_ATTR_FIELD_FRAG_OFFSET
unsigned UCP_AM_RECV_ATTR_FIELD_MSG_CONTEXT
unsigned UCP_AM_RECV_ATTR_FLAG_DATA
unsigned UCP_AM_RECV_ATTR_FLAG_RNDV
unsigned UCP_AM_RECV_ATTR_FLAG_FIRST
unsigned UCP_AM_RECV_ATTR_FLAG_ONLY
unsigned UCP_AM_HANDLER_PARAM_FIELD_ID
unsigned UCP_AM_HANDLER_PARAM_FIELD_FLAGS
unsigned UCP_AM_HANDLER_PARAM_FIELD_CB
unsigned UCP_AM_HANDLER_PARAM_FIELD_ARG
ctypedef ucs_status_t(*ucp_am_recv_data_nbx_callback_t)(void *request,
ucs_status_t status,
size_t length,
void *used_data)
ctypedef void (*ucp_send_nbx_callback_t)(void *request, ucs_status_t status,
void *user_data) # noqa
ctypedef union _ucp_request_param_cb_t:
ucp_send_nbx_callback_t send
ucp_am_recv_data_nbx_callback_t recv_am
ctypedef union _ucp_request_param_recv_info_t:
size_t *length
ucs_status_ptr_t ucp_am_send_nbx(ucp_ep_h ep, unsigned id,
const void *header, size_t header_length,
const void *buffer, size_t count,
const ucp_request_param_t *param)
ucs_status_ptr_t ucp_am_recv_data_nbx(ucp_worker_h worker, void *data_desc,
void *buffer, size_t count,
const ucp_request_param_t *param)
int UCP_OP_ATTR_FIELD_REQUEST
int UCP_OP_ATTR_FIELD_CALLBACK
int UCP_OP_ATTR_FIELD_USER_DATA
int UCP_OP_ATTR_FIELD_DATATYPE
int UCP_OP_ATTR_FIELD_FLAGS
int UCP_OP_ATTR_FIELD_REPLY_BUFFER
int UCP_OP_ATTR_FIELD_MEMORY_TYPE
int UCP_OP_ATTR_FIELD_RECV_INFO
int UCP_OP_ATTR_FLAG_NO_IMM_CMPL
int UCP_OP_ATTR_FLAG_FAST_CMPL
int UCP_OP_ATTR_FLAG_FORCE_IMM_CMPL
ctypedef struct ucp_request_param_t:
uint32_t op_attr_mask
uint32_t flags
void *request
_ucp_request_param_cb_t cb
ucp_datatype_t datatype
void *user_data
void *reply_buffer
ucs_memory_type_t memory_type
_ucp_request_param_recv_info_t recv_info
ctypedef struct ucp_am_recv_param_t:
uint64_t recv_attr
ucp_ep_h reply_ep
size_t total_length
size_t frag_offset
void **msg_context
ctypedef ucs_status_t(*ucp_am_recv_callback_t)(void *arg, const void *header,
size_t header_length,
void *data, size_t length,
const ucp_am_recv_param_t *param)
ctypedef struct ucp_am_handler_param_t:
uint64_t field_mask
unsigned id
uint32_t flags
ucp_am_recv_callback_t cb
void *arg
ucs_status_t ucp_worker_set_am_recv_handler(ucp_worker_h worker,
const ucp_am_handler_param_t *param)
cdef extern from "sys/epoll.h":
cdef enum:
EPOLL_CTL_ADD = 1
EPOLL_CTL_DEL = 2
EPOLL_CTL_MOD = 3
cdef enum EPOLL_EVENTS:
EPOLLIN = 0x001
EPOLLPRI = 0x002
EPOLLOUT = 0x004
EPOLLRDNORM = 0x040
EPOLLRDBAND = 0x080
EPOLLWRNORM = 0x100
EPOLLWRBAND = 0x200
EPOLLMSG = 0x400
EPOLLERR = 0x008
EPOLLHUP = 0x010
EPOLLET = (1 << 31)
ctypedef union epoll_data_t:
void *ptr
int fd
uint32_t u32
uint64_t u64
cdef struct epoll_event:
uint32_t events
epoll_data_t data
int epoll_create(int size)
int epoll_ctl(int epfd, int op, int fd, epoll_event *event)
int epoll_wait(int epfd, epoll_event *events, int maxevents, int timeout)
ctypedef struct ucp_address_t:
pass
ctypedef struct ucp_rkey_h:
pass
int UCP_MEM_MAP_NONBLOCK
int UCP_MEM_MAP_ALLOCATE
int UCP_MEM_MAP_FIXED
ctypedef struct ucp_mem_h:
pass
ctypedef struct ucp_mem_attr_t:
uint64_t field_mask
void *address
size_t length
int UCP_MEM_MAP_PARAM_FIELD_LENGTH
int UCP_MEM_MAP_PARAM_FIELD_ADDRESS
int UCP_MEM_MAP_PARAM_FIELD_FLAGS
int UCP_MEM_ATTR_FIELD_ADDRESS
int UCP_MEM_ATTR_FIELD_LENGTH
ctypedef struct ucp_mem_map_params_t:
uint64_t field_mask
void *address
size_t length
unsigned flags
ucs_status_t ucp_mem_map(ucp_context_h context, const ucp_mem_map_params_t *params,
ucp_mem_h *memh_p)
ucs_status_t ucp_mem_unmap(ucp_context_h context, ucp_mem_h memh)
ucs_status_t ucp_mem_query(const ucp_mem_h memh, ucp_mem_attr_t *attr)
ucs_status_t ucp_rkey_pack(ucp_context_h context, ucp_mem_h memh,
void **rkey_buffer_p, size_t *size_p)
ucs_status_t ucp_ep_rkey_unpack(ucp_ep_h ep, const void *rkey_buffer,
ucp_rkey_h *rkey_p)
void ucp_rkey_buffer_release(void *rkey_buffer)
ucs_status_t ucp_rkey_ptr(ucp_rkey_h rkey, uint64_t raddr, void **addr_p)
void ucp_rkey_destroy(ucp_rkey_h rkey)
ucs_status_t ucp_put_nbi(ucp_ep_h ep, const void *buffer, size_t length,
uint64_t remote_addr, ucp_rkey_h rkey)
ucs_status_t ucp_get_nbi(ucp_ep_h ep, void *buffer, size_t length,
uint64_t remote_addr, ucp_rkey_h rkey)
ucs_status_ptr_t ucp_put_nb(ucp_ep_h ep, const void *buffer, size_t length,
uint64_t remote_addr, ucp_rkey_h rkey,
ucp_send_callback_t cb)
ucs_status_ptr_t ucp_get_nb(ucp_ep_h ep, void *buffer, size_t length,
uint64_t remote_addr, ucp_rkey_h rkey,
ucp_send_callback_t cb)
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/typedefs.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import enum
from cpython.ref cimport PyObject
from .ucx_api_dep cimport *
class Feature(enum.Enum):
"""Enum of the UCP_FEATURE_* constants"""
TAG = UCP_FEATURE_TAG
RMA = UCP_FEATURE_RMA
AMO32 = UCP_FEATURE_AMO32
AMO64 = UCP_FEATURE_AMO64
WAKEUP = UCP_FEATURE_WAKEUP
STREAM = UCP_FEATURE_STREAM
AM = UCP_FEATURE_AM
class AllocatorType(enum.Enum):
HOST = 0
CUDA = 1
UNSUPPORTED = -1
# Struct used as requests by UCX
cdef struct ucx_py_request:
bint finished # Used by downstream projects such as cuML
unsigned int uid
PyObject *info
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/transfer_am.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import functools
import logging
from libc.stdint cimport uintptr_t
from libc.stdlib cimport free
from .arr cimport Array
from .exceptions import UCXCanceled, UCXError, log_errors
from .ucx_api_dep cimport *
logger = logging.getLogger("ucx")
cdef void _send_nbx_callback(
void *request, ucs_status_t status, void *user_data
) with gil:
cdef UCXRequest req
cdef dict req_info
cdef str name, ucx_status_msg, msg
cdef set inflight_msgs
cdef tuple cb_args
cdef dict cb_kwargs
with log_errors():
req = UCXRequest(<uintptr_t><void*> request)
assert not req.closed()
req_info = <dict>req._handle.info
req_info["status"] = "finished"
if "cb_func" not in req_info:
# This callback function was called before ucp_tag_send_nb() returned
return
exception = None
if status == UCS_ERR_CANCELED:
name = req_info["name"]
msg = "<%s>: " % name
exception = UCXCanceled(msg)
elif status != UCS_OK:
name = req_info["name"]
ucx_status_msg = ucs_status_string(status).decode("utf-8")
msg = "<%s>: %s" % (name, ucx_status_msg)
exception = UCXError(msg)
try:
inflight_msgs = req_info["inflight_msgs"]
inflight_msgs.discard(req)
cb_func = req_info["cb_func"]
if cb_func is not None:
cb_args = req_info["cb_args"]
if cb_args is None:
cb_args = ()
cb_kwargs = req_info["cb_kwargs"]
if cb_kwargs is None:
cb_kwargs = {}
cb_func(req, exception, *cb_args, **cb_kwargs)
finally:
req.close()
def am_send_nbx(
UCXEndpoint ep,
Array buffer,
size_t nbytes,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
str name=None
):
""" This routine sends a message to an endpoint using the active message API
Each message is sent to an endpoint that is the message's sole recipient.
The routine is non-blocking and therefore returns immediately, however the
actual send operation may be delayed. The send operation is considered
completed when it is safe to reuse the source buffer. If the send operation
is completed immediately the routine returns None and the call-back function
**is not invoked**. If the operation is not completed immediately and no
exception raised then the UCP library will schedule to invoke the call-back
whenever the send operation will be completed. In other words, the completion
of a message can be signaled by the return code or the call-back.
Note
----
The user should not modify any part of the buffer after this operation is
called, until the operation completes.
Parameters
----------
ep: UCXEndpoint
The destination endpoint
buffer: Array
An ``Array`` wrapping a user-provided array-like object
nbytes: int
Size of the buffer to use. Must be equal or less than the size of buffer
cb_func: callable
The call-back function, which must accept `request` and `exception` as the
first two arguments.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
name: str, optional
Descriptive name of the operation
"""
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
if name is None:
name = u"am_send_nb"
if Feature.AM not in ep.worker._context._feature_flags:
raise ValueError("UCXContext must be created with `Feature.AM`")
if buffer.cuda and not ep.worker._context.cuda_support:
raise ValueError(
"UCX is not configured with CUDA support, please add "
"`cuda_copy` and/or `cuda_ipc` to the `UCX_TLS` environment"
"variable if you're manually setting a different value. If you"
"are building UCX from source, please see "
"https://ucx-py.readthedocs.io/en/latest/install.html for "
"more information."
)
if not buffer._contiguous():
raise ValueError("Array must be C or F contiguous")
cdef ucp_request_param_t params
params.op_attr_mask = (
UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_USER_DATA |
UCP_OP_ATTR_FIELD_FLAGS
)
params.cb.send = <ucp_send_nbx_callback_t>_send_nbx_callback
params.flags = UCP_AM_SEND_FLAG_REPLY
params.user_data = <void*>buffer.ptr
cdef int *header = <int *>malloc(sizeof(int))
if buffer.cuda:
header[0] = UCS_MEMORY_TYPE_CUDA
else:
header[0] = UCS_MEMORY_TYPE_HOST
def cb_wrapper(header_as_int, cb_func, *cb_args, **cb_kwargs):
free(<void *><uintptr_t>header_as_int)
cb_func(*cb_args, **cb_kwargs)
cb_func = functools.partial(cb_wrapper, int(<uintptr_t>header), cb_func)
cdef ucs_status_ptr_t status = ucp_am_send_nbx(
ep._handle,
0,
<void *>header,
sizeof(int),
<void*>buffer.ptr,
nbytes,
¶ms,
)
return _handle_status(
status, nbytes, cb_func, cb_args, cb_kwargs, name, ep._inflight_msgs
)
def am_recv_nb(
UCXEndpoint ep,
cb_func,
tuple cb_args=None,
dict cb_kwargs=None,
str name=u"am_recv_nb",
):
""" This function receives a message on a worker with the active message API.
The receive operation is considered completed when the callback function is
called, where the received object will be delivered. If a message has already
been received or an exception raised by the active message callback, that
object is ready for consumption and the `cb_func` is called by this function.
When no object has already been received, `cb_func` will be called by the
active message callback when the receive operation completes, delivering the
message or exception to the callback function.
The received object is always allocated by the allocator function registered
with `UCXWorker.register_am_allocator`, using the appropriate allocator
depending on whether it is a host or CUDA buffer.
Note
----
This routing always returns `None`.
Parameters
----------
ep: UCXEndpoint
The endpoint that is used for the receive operation. Received active
messages are always targeted at a specific endpoint, therefore it is
imperative to specify the correct endpoint here.
cb_func: callable
The call-back function, which must accept `recv_obj` and `exception` as the
first two arguments.
cb_args: tuple, optional
Extra arguments to the call-back function
cb_kwargs: dict, optional
Extra keyword arguments to the call-back function
name: str, optional
Descriptive name of the operation
"""
worker = ep.worker
if cb_args is None:
cb_args = ()
if cb_kwargs is None:
cb_kwargs = {}
if Feature.AM not in worker._context._feature_flags:
raise ValueError("UCXContext must be created with `Feature.AM`")
am_recv_pool = worker._am_recv_pool
ep_as_int = int(<uintptr_t>ep._handle)
if (
ep_as_int in am_recv_pool and
len(am_recv_pool[ep_as_int]) > 0
):
recv_obj = am_recv_pool[ep_as_int].pop(0)
exception = recv_obj if isinstance(type(recv_obj), (Exception, )) else None
cb_func(recv_obj, exception, *cb_args, **cb_kwargs)
logger.debug("AM recv ready: ep %s" % (hex(ep_as_int), ))
else:
if ep_as_int not in worker._am_recv_wait:
worker._am_recv_wait[ep_as_int] = list()
worker._am_recv_wait[ep_as_int].append(
{
"cb_func": cb_func,
"cb_args": cb_args,
"cb_kwargs": cb_kwargs
}
)
logger.debug("AM recv waiting: ep %s" % (hex(ep_as_int), ))
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_worker_cb.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
# See file LICENSE for terms.
# cython: language_level=3
import logging
from cython cimport boundscheck, initializedcheck, nonecheck, wraparound
from libc.stdint cimport uintptr_t
from libc.string cimport memcpy
from .exceptions import UCXCanceled, UCXError, log_errors
from .ucx_api_dep cimport *
logger = logging.getLogger("ucx")
cdef void _am_recv_completed_callback(
void *request,
ucs_status_t status,
size_t length,
void *user_data
) with gil:
cdef bytearray buf
cdef UCXRequest req
cdef dict req_info
cdef str name, ucx_status_msg, msg
cdef set inflight_msgs
cdef tuple cb_args
cdef dict cb_kwargs
with log_errors():
req = UCXRequest(<uintptr_t><void*> request)
assert not req.closed()
req_info = <dict>req._handle.info
req_info["status"] = "finished"
if "cb_func" not in req_info:
logger.debug(
"_am_recv_completed_callback() called before "
"_am_recv_callback() returned"
)
return
else:
cb_args = req_info["cb_args"]
logger.debug(
"_am_recv_completed_callback status %d len %d buf %s" % (
status, length, hex(int(<uintptr_t><void *>cb_args[0]))
)
)
exception = None
if status == UCS_ERR_CANCELED:
name = req_info["name"]
msg = "<%s>: " % name
exception = UCXCanceled(msg)
elif status != UCS_OK:
name = req_info["name"]
ucx_status_msg = ucs_status_string(status).decode("utf-8")
msg = "<%s>: %s" % (name, ucx_status_msg)
exception = UCXError(msg)
try:
inflight_msgs = req_info["inflight_msgs"]
inflight_msgs.discard(req)
cb_func = req_info["cb_func"]
if cb_func is not None:
if cb_args is None:
cb_args = ()
cb_kwargs = req_info["cb_kwargs"]
if cb_kwargs is None:
cb_kwargs = {}
cb_func(cb_args[0], exception, **cb_kwargs)
finally:
req.close()
@boundscheck(False)
@initializedcheck(False)
@nonecheck(False)
@wraparound(False)
cdef ucs_status_t _am_recv_callback(
void *arg,
const void *header,
size_t header_length,
void *data,
size_t length,
const ucp_am_recv_param_t *param
) with gil:
cdef UCXWorker worker = <UCXWorker>arg
cdef dict am_recv_pool = worker._am_recv_pool
cdef dict am_recv_wait = worker._am_recv_wait
cdef set inflight_msgs = worker._inflight_msgs
assert worker.initialized
assert param.recv_attr & UCP_AM_RECV_ATTR_FIELD_REPLY_EP
assert Feature.AM in worker._context._feature_flags
cdef ucp_ep_h ep = param.reply_ep
cdef unsigned long ep_as_int = int(<uintptr_t>ep)
if ep_as_int not in am_recv_pool:
am_recv_pool[ep_as_int] = list()
is_rndv = param.recv_attr & UCP_AM_RECV_ATTR_FLAG_RNDV
cdef object buf
cdef char[:] buf_view
cdef void *buf_ptr
cdef unsigned long cai_ptr
cdef int allocator_type = (<int *>header)[0]
def _push_result(buf, exception, recv_type):
if (
ep_as_int in am_recv_wait and
len(am_recv_wait[ep_as_int]) > 0
):
recv_wait = am_recv_wait[ep_as_int].pop(0)
cb_func = recv_wait["cb_func"]
cb_args = recv_wait["cb_args"]
cb_kwargs = recv_wait["cb_kwargs"]
logger.debug("am %s awaiting in ep %s cb_func %s" % (
recv_type,
hex(ep_as_int),
cb_func
))
cb_func(buf, exception, *cb_args, **cb_kwargs)
else:
logger.debug("am %s pushing to pool in ep %s" % (
recv_type,
hex(ep_as_int)
))
if exception is not None:
am_recv_pool[ep_as_int].append(exception)
else:
am_recv_pool[ep_as_int].append(buf)
cdef ucp_request_param_t request_param
cdef ucs_status_ptr_t status
cdef str ucx_status_msg, msg
cdef UCXRequest req
cdef dict req_info
if is_rndv:
request_param.op_attr_mask = (
UCP_OP_ATTR_FIELD_CALLBACK |
UCP_OP_ATTR_FIELD_USER_DATA |
UCP_OP_ATTR_FLAG_NO_IMM_CMPL
)
request_param.cb.recv_am = (
<ucp_am_recv_data_nbx_callback_t>_am_recv_completed_callback
)
if allocator_type == UCS_MEMORY_TYPE_HOST:
buf = worker._am_host_allocator(length)
buf_view = buf
buf_ptr = <void *><uintptr_t>&buf_view[0]
elif allocator_type == UCS_MEMORY_TYPE_CUDA:
buf = worker._am_cuda_allocator(length)
cai_ptr = buf.__cuda_array_interface__["data"][0]
buf_ptr = <void *><uintptr_t>cai_ptr
else:
logger.debug("Unsupported memory type")
buf = worker._am_host_allocator(length)
buf_view = buf
buf_ptr = <void *><uintptr_t>&buf_view[0]
_push_result(None, UCXError("Unsupported memory type"), "rndv")
return UCS_OK
status = ucp_am_recv_data_nbx(
worker._handle, data, buf_ptr, length, &request_param
)
logger.debug("am rndv: ep %s len %s" % (hex(int(ep_as_int)), length))
if UCS_PTR_STATUS(status) == UCS_OK:
_push_result(buf, None, "rndv")
return UCS_OK
elif UCS_PTR_IS_ERR(status):
ucx_status_msg = (
ucs_status_string(UCS_PTR_STATUS(status)).decode("utf-8")
)
msg = "<_am_recv_callback>: %s" % (ucx_status_msg)
logger.info("_am_recv_callback error: %s" % msg)
_push_result(None, UCXError(msg), "rndv")
return UCS_PTR_STATUS(status)
req = UCXRequest(<uintptr_t><void*> status)
assert not req.closed()
req_info = <dict>req._handle.info
if req_info["status"] == "finished":
try:
# The callback function has already handled the request
received = req_info.get("received", None)
_push_result(buf, None, "rndv")
return UCS_OK
finally:
req.close()
else:
req_info["cb_func"] = _push_result
req_info["cb_args"] = (buf, )
req_info["cb_kwargs"] = {"recv_type": "rndv"}
req_info["expected_receive"] = 0
req_info["name"] = "am_recv"
inflight_msgs.add(req)
req_info["inflight_msgs"] = inflight_msgs
return UCS_OK
else:
logger.debug("am eager copying %d bytes with ep %s" % (
length,
hex(ep_as_int)
))
buf = worker._am_host_allocator(length)
if length > 0:
buf_view = buf
buf_ptr = <void *><uintptr_t>&buf_view[0]
memcpy(buf_ptr, data, length)
_push_result(buf, None, "eager")
return UCS_OK
| 0 |
rapidsai_public_repos/ucx-py/ucp
|
rapidsai_public_repos/ucx-py/ucp/_libs/ucx_api.pyx
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
include "packed_remote_key.pyx"
include "transfer_am.pyx"
include "transfer_common.pyx"
include "transfer_stream.pyx"
include "transfer_tag.pyx"
include "typedefs.pyx"
include "ucx_address.pyx"
include "ucx_context.pyx"
include "ucx_endpoint.pyx"
include "ucx_listener.pyx"
include "ucx_memory_handle.pyx"
include "ucx_object.pyx"
include "ucx_request.pyx"
include "ucx_rkey.pyx"
include "ucx_rma.pyx"
include "ucx_worker.pyx"
include "ucx_worker_cb.pyx"
include "ucxio.pyx"
include "utils.pyx"
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.