repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/default-config.sh
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATE=$(date --utc "+%Y%m%d_%H%M%S")_UTC
TESTING_RESULTS_DIR_NAME=tests
BENCHMARK_RESULTS_DIR_NAME=benchmarks
GNN_RESULTS_DIR_NAME=gnn
METADATA_FILE_NAME=metadata.sh
# Most are defined using the bash := or :- syntax, which means they
# will be set only if they were previously unset. The project config
# is loaded first, which gives it the opportunity to override anything
# in this file that uses that syntax. If there are variables in this
# file that should not be overridded by a project, then they will
# simply not use that syntax and override, since these variables are
# read last.
WORKER_RMM_POOL_SIZE=${WORKER_RMM_POOL_SIZE:-12G}
DASK_CUDA_INTERFACE=${DASK_CUDA_INTERFACE:-ib0}
DASK_SCHEDULER_PORT=${DASK_SCHEDULER_PORT:-8792}
DASK_DEVICE_MEMORY_LIMIT=${DASK_DEVICE_MEMORY_LIMIT:-auto}
DASK_HOST_MEMORY_LIMIT=${DASK_HOST_MEMORY_LIMIT:-auto}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/getopt.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example usage from bash script called with --foo-bar=www --baz=33 --boo:
set -e # exit immediately on error
eval_str=$(python getopt.py "foo-bar:,boo,bar,baz:int" "$@")
eval($eval_str)
echo $foo_bar # prints www
echo $bar # prints 0
echo $boo # prints 1
echo $baz # prints 33
"""
import builtins
from argparse import ArgumentParser
class StderrArgumentParser(ArgumentParser):
"""
ArgumentParser where all messaging, including help, goes to stderr.
"""
def _print_message(self, message, file=None):
super(StderrArgumentParser, self)._print_message(message)
def getopt_to_argparse(prog_name, opt_parse_string, options_list):
"""
Parse options_list using an ArgumentParser created with opt_parse_string,
in the style of getopts.
Return an argparse.Namespace object as normally returned by
parse_args(). Any errors or help output will be printed to stderr and None
is returned instead.
"""
arg_parser = StderrArgumentParser(prog=prog_name)
for opt_desc in opt_parse_string.split(","):
if opt_desc == "":
raise RuntimeError(f"invalid option string: {opt_parse_string}")
opt_desc = opt_desc.split(":")
opt_desc_len = len(opt_desc)
# option with no arg: "name"
if opt_desc_len == 1:
name = f"--{opt_desc[0]}"
arg_parser.add_argument(name, action="store_const", const=1, default=0)
# required arg: "name:type" or "name:"
elif opt_desc_len == 2:
name = f"--{opt_desc[0]}"
opt_type = getattr(builtins, opt_desc[1] or "str")
arg_parser.add_argument(name, type=opt_type, required=True)
# optional arg: "name::type" or "name::"
elif (opt_desc_len == 3) and (opt_desc[1] == ""):
name = f"--{opt_desc[0]}"
opt_type = getattr(builtins, opt_desc[2] or "str")
arg_parser.add_argument(name, type=opt_type, required=False)
else:
raise RuntimeError(f"invalid option string: {opt_parse_string}")
try:
return arg_parser.parse_args(options_list)
except SystemExit as err:
return None
if __name__ == "__main__":
import sys
prog_name = sys.argv[1]
opt_string = sys.argv[2]
cli_input = sys.argv[3:]
exit_code = 1
argparse_obj = getopt_to_argparse(prog_name, opt_string, cli_input)
if argparse_obj is not None:
# Print parsed options to be eval'd by bash
empty = '""'
output_strs = [f"{option}={empty if val is None else val}"
for (option, val) in vars(argparse_obj).items()]
print(";".join(output_strs))
exit_code = 0
sys.exit(exit_code)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/setup-latest-results-dir.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# Abort script on first error, undef vars are errors, propagate failures in pipelines
set -eu -o pipefail
RAPIDS_MG_TOOLS_DIR=${RAPIDS_MG_TOOLS_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)}
source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh
usage () {
echo "Usage: $0 --results-root-dir=<results_root_dir>"
exit 1
}
results_root_dir=""
params=$(getopt -u -o d: -l results-root-dir: --name "$)" -- "$@")
read -r -a param_array <<< "$params"
i=0
while (( i < ${#param_array[@]} )); do
case "${param_array[$i]}" in
-d|--results-root-dir)
((i++)) || true # required when using set -e
results_root_dir=${param_array[$i]}
;;
--)
break
;;
*)
usage
;;
esac
((i++)) || true
done
if [ -z "$results_root_dir" ]; then
echo "Must specify results_root_dir"
usage
fi
if [ ! -d $results_root_dir ]; then
echo "directory $results_root_dir does not exist"
exit 1
fi
################################################################################
latest_results_dir=${results_root_dir}/latest
testing_results_dir=${latest_results_dir}/${TESTING_RESULTS_DIR_NAME}
benchmark_results_dir=${latest_results_dir}/${BENCHMARK_RESULTS_DIR_NAME}
gnn_results_dir=${latest_results_dir}/${GNN_RESULTS_DIR_NAME}
metadata_file=${latest_results_dir}/${METADATA_FILE_NAME}
mkdir -p ${results_root_dir}/${DATE}
previous_results=$(readlink -f $latest_results_dir)
rm -rf $latest_results_dir
ln -s ${results_root_dir}/${DATE} $latest_results_dir
mkdir -p $testing_results_dir
mkdir -p $benchmark_results_dir
mkdir -p $gnn_results_dir
# copy over old regressions if they exist. otherwise, create a new directory to store them
previous_regressions=${previous_results}/benchmarks/results
if [ -d $previous_regressions ]; then
cp -r $previous_regressions ${benchmark_results_dir}/results
else
mkdir -p ${benchmark_results_dir}/results
fi
# Write paths.sh file for use by other scripts that use the vars set by
# multi-gpu-tools scripts, set to the current values used when setting up the
# results dir.
echo "TESTING_RESULTS_DIR=$testing_results_dir" >> ${latest_results_dir}/paths.sh
echo "BENCHMARK_RESULTS_DIR=$benchmark_results_dir" >> ${latest_results_dir}/paths.sh
echo "GNN_RESULTS_DIR=$gnn_results_dir" >> ${latest_results_dir}/paths.sh
# The container may have a /metadata.sh file that can be sourced to set env
# vars with info about the image that can be used in reports, etc.
if [ -e /metadata.sh ]; then
cp /metadata.sh $metadata_file
echo "METADATA_FILE=$metadata_file" >> ${latest_results_dir}/paths.sh
else
echo "METADATA_FILE=\"\"" >> ${latest_results_dir}/paths.sh
fi
# Echo out the latest_results_dir as the last line. This is needed since other
# scripts that call this look for the last line to see the final
# latest_results_dir that was set up (since it may be named using a timestamp
# or some other uniquifier).
echo "Finished setting up latest results dir: ${latest_results_dir}"
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/script-env.sh
|
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is meant to be source'd by other scripts to add variables
# and functions to the calling environment, hence no #!/bin/bash as
# the first line.
# Read the config for the project, if possible. The project config
# takes precedence, and anything missing from the project config is
# set here.
# Projects should always call this script to ensure a complete set of
# script vars and functions are available.
if [ -v PROJECT_DIR ] && [ -n "$PROJECT_DIR" ]; then
if [ -e ${PROJECT_DIR}/config.sh ]; then
source ${PROJECT_DIR}/config.sh
else
echo "Error: ${PROJECT_DIR}/config.sh was not found and must be present."
exit 1
fi
fi
THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
source ${THIS_DIR}/default-config.sh
source ${THIS_DIR}/functions.sh
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/record-benchmarks.py
|
import glob
import argparse
import pandas as pd
from pathlib import Path
import platform
from pynvml import smi
import yaml
import os
import math
# read the pytest-results.txt file and return a df in the format we want
def pytest_results_to_df(path, run_date):
df = pd.read_csv(path, sep=" ", header=None)
# preserve failed/skipped statuses
df.loc[df[1] == 'FAILED', 3] = 'FAILED'
df.loc[df[1] == 'SKIPPED', 3] = 'SKIPPED'
df = df[[2, 3]]
# add the run date
date_row = {2: 'date', 3: run_date}
df.loc[1:] = df.loc[:]
df.loc[0] = date_row
df = df.T.reset_index(drop=True)
df.columns = df.iloc[0]
df = df.drop(df.index[0])
return df
# convert bytes to biggest denomination
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def write_metadata():
uname = platform.uname()
python_ver = 'python_ver: ' + platform.python_version()
cuda_version = os.system("nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p'")
# get info for all gpu devices
smi.nvmlInit()
num_gpus = smi.nvmlDeviceGetCount()
gpu_info = []
for i in range(num_gpus):
gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(i)
gpuType = smi.nvmlDeviceGetName(gpuDeviceHandle).decode()
gpuRam = smi.nvmlDeviceGetMemoryInfo(gpuDeviceHandle).total
gpu_info.append([gpuType, convert_size(gpuRam)])
meta = {
'os_name': uname[0],
'node_name': uname[1],
'os_release': uname[2],
'os_version': uname[3],
'machine_hw': uname[4],
'python_version': platform.python_version(),
'cuda_version': cuda_version,
'num_gpus': num_gpus,
'gpu_info': gpu_info,
}
with open(results_dir / 'meta.yaml', 'w+') as file:
yaml.dump(meta, file, sort_keys=False)
################################################################################
# get the path to latest nightly results directory
# eg. /gpfs/fs1/projects/sw_rapids/users/rratzel/cugraph-results/latest
parser = argparse.ArgumentParser(description="Script used to copy over old benchmark timings")
parser.add_argument('--latest-results', required=True, help='Latest results directory', dest="results_dir")
args = parser.parse_args()
latest_results_dir = Path(args.results_dir)
run_date = latest_results_dir.resolve().name
bench_dir = latest_results_dir / "benchmarks"
# get each of the cugraph benchmark run directories
# eg latest/benchmarks/2-GPU latest/benchmarks/8-GPU ... etc
results_dir = bench_dir / "results"
# get results from tonight's runs
all_benchmark_runs = glob.glob(str(bench_dir) + '/*-GPU')
for run in all_benchmark_runs:
run_type = Path(run).name
results_file = bench_dir / run_type / 'pytest-results.txt'
output_file = results_dir / (run_type + ".csv")
# if previous csv files were generated, append tonight's results to the end
if output_file.exists():
existing_df = pd.read_csv(output_file)
tonight_df = pytest_results_to_df(results_file, run_date)
pd.concat([existing_df, tonight_df]).to_csv(output_file, index=False)
# otherwise, create new result file for each successful run
else:
if results_file.exists():
print(f"creating a new results file for {run_type} on {run_date}")
df = pytest_results_to_df(results_file, run_date)
df.to_csv(output_file, index=False)
write_metadata()
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/README.md
|
# rapidsai/multi-gpu-tools (rapids-mg-tools)
This repo contains tools for configuring environments and automating
single-node or multi-node, multi-gpu application runs (SNMG or MNMG),
currently consisting of a collection of shell scripts and python
modules for use by such applications.
The tools in this repo are currently aimed at dask-based test and
benchmark applications, but need not be limited to those in the
future.
## Quick start examples (FIXME: finish this section!)
### Creating a script to run MNMG tests for your project
1) Load the rapids-mg-tools environment into your shell script. This
is boilerplate code to safely source the environment so your scripts
can use the rapids-mg-tools scripts and functions. By specifying your
`PROJECT_DIR`, you can provide your own `config.sh` and `functions.sh`
that can override the defaults in rapids-mg-tools or add custom vars
and functions without having to explicitely source additional files.
file: `/my/project/myscript.sh`:
```
# PROJECT_DIR will default to /my/project but can be overridden by the environment
export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)}
if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then
source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh
elif [ -n "$(which script-env.sh)" ]; then
source $(which script-env.sh)
else
echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH."
exit 1
fi
```
2) Create a project `config.sh` to customize typical settings such as log dirs, etc.
file: `/my/project/config.sh`:
```
```
3)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/run-cluster-dask-jobs.sh
|
#!/bin/bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RAPIDS_MG_TOOLS_DIR=${RAPIDS_MG_TOOLS_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)}
source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh
RUN_SCHEDULER=0
# FIXME: this should not be slurm-specific. Consider a wrapper that
# calls this script for slurm custers.
# Assumption is that this script is called from a multi-node sbatch
# run via srun, with one task per node. Use SLURM_NODEID 1 for the
# scheduler instead of SLURM_NODEID 0, since the test/benchmark script
# is typically run on 0 and putting the scheduler on 1 helps
# distribute the load (I think, just based on getting OOM errors when
# everything ran on 0).
if [[ $SLURM_NODEID == 1 ]] || [[ $SLURM_JOB_NUM_NODES == 1 ]] || hasArg --scheduler-and-workers; then
RUN_SCHEDULER=1
fi
# NOTE: if the LOGS_DIR env var is exported from the calling env, it
# will be used by run-dask-process.sh as the log location.
if [[ $RUN_SCHEDULER == 1 ]]; then
echo "NODE: $SLURM_NODEID, run-cluster-dask-jobs.sh: starting scheduler and workers..."
${RAPIDS_MG_TOOLS_DIR}/run-dask-process.sh scheduler workers
else
echo "NODE: $SLURM_NODEID, run-cluster-dask-jobs.sh: starting workers..."
${RAPIDS_MG_TOOLS_DIR}/run-dask-process.sh workers
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/dump-meta-data.sh
|
#!/bin/bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Abort script on first error, undef vars are errors, propagate failures in pipelines
set -eu -o pipefail
RAPIDS_MG_TOOLS_DIR=${RAPIDS_MG_TOOLS_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)}
source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh
param_vals=$(python3 ${RAPIDS_MG_TOOLS_DIR}/getopt.py $0 "packages:str,from-conda,from-pip" "$@")
eval $param_vals
if (( ($from_conda || $from_pip) == 0 )); then
echo "ERROR: must specify one of --from-conda or --from-pip"
exit 1
elif (( ($from_conda && $from_pip) == 1 )); then
echo "ERROR: must specify only one of --from-conda or --from-pip"
exit 1
fi
package_list=$(echo $packages | sed 's/,/ /g')
if (( $from_conda == 1 )); then
for package in $package_list; do
PACKAGE=$(echo $package | sed 's/[a-z]/\U&/g')
# output format is: name version build channel
conda_output=$(conda list | grep "^${package}")
echo "${PACKAGE}_VERSION=$(echo $conda_output | awk '{print $2}')"
echo "${PACKAGE}_BUILD=$(echo $conda_output | awk '{print $3}')"
echo "${PACKAGE}_CHANNEL=$(echo $conda_output | awk '{print $4}')"
done
# elif (( $from_source == 1 )); then
# # FIXME: this assumes the sources are always in
# # ${WORKSPACE}/${REPO_DIR_NAME}. That should be the default and a
# # --source-dir option should be added to override.
# PROJECT_VERSION=$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git rev-parse HEAD)
# PROJECT_REPO_URL=$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git config --get remote.origin.url)
# PROJECT_REPO_BRANCH=$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git rev-parse --abbrev-ref HEAD)
# PROJECT_REPO_TIME=$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git log -n1 --pretty='%ct' ${PROJECT_VERSION})
else
# FIXME: write this
pip_output=$(pip list)
for package in $package_list; do
PACKAGE=$(echo $package | sed 's/[a-z]/\U&/g')
echo "${PACKAGE}_VERSION=FIXME-for-pip"
echo "${PACKAGE}_BUILD=FIXME-for-pip"
echo "${PACKAGE}_CHANNEL=FIXME-for-pip"
done
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/build-ucx.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
# Abort script on first error, undef vars are errors, propagate failures in pipelines
set -eu -o pipefail
UCX_VERSION_TAG=${1:-"v1.14.x"}
CUDA_HOME=${2:-"/usr/local/cuda"}
# Send any remaining arguments to configure
CONFIGURE_ARGS=${@:2}
PREFIX=${CONDA_PREFIX:-"/usr/local"}
# Setup src dir
rm -rf ucx
git clone https://github.com/openucx/ucx.git
cd ucx
git checkout ${UCX_VERSION_TAG}
# build and install
./autogen.sh
mkdir build-linux && cd build-linux
../contrib/configure-release --prefix=${PREFIX} --with-sysroot --enable-cma \
--enable-mt --enable-numa --with-gnu-ld --with-rdmacm --with-verbs \
--with-cuda=${CUDA_HOME} \
${CONFIGURE_ARGS}
make -j install
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/functions.sh
|
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is source'd from script-env.sh to add functions to the
# calling environment, hence no #!/bin/bash as the first line. This
# also assumes the variables used in this file have been defined
# elsewhere.
numargs=$#
args=$*
hasArg () {
(( ${numargs} != 0 )) && (echo " ${args} " | grep -q " $1 ")
}
logger_prefix=">>>> "
logger () {
if (( $# > 0 )) && [ "$1" == "-p" ]; then
shift
echo -e "${logger_prefix}$@"
else
echo -e "$(date --utc "+%D-%T.%N")_UTC${logger_prefix}$@"
fi
}
# Retry a command at most $1 times until successful, logging $2 on retry.
# This requires scripts to use set +e
retry () {
max_retries=$1
msg=$2
shift 2
cmd=$@
eval "$cmd"
success=$?
num_retries=0
while (( success != 0 )) && (( $num_retries < $max_retries )); do
logger "$msg"
eval "$cmd"
success=$?
(( num_retries++ ))
done
# Set a final exit code on non-success that can be checked.
if (( $success != 0 )); then
false
fi
}
# Calling "set_tee outfile" will cause all stdout and stderr of the
# current script to be output to "tee", which outputs to stdout and
# "outfile" simultaneously. This is useful by allowing a script to
# "tee" itself at any point without being called with tee.
origFileDescriptorsSaved=0
set_tee () {
if [[ $origFileDescriptorsSaved == 0 ]]; then
# Save off the original file descr 1 and 2 as 3 and 4
exec 3>&1 4>&2
origFileDescriptorsSaved=1
fi
teeFile=$1
# Create a named pipe.
pipeName=$(mktemp -u)
mkfifo $pipeName
# Close the currnet 1 and 2 and restore to original (3, 4) in the
# event this function is called repeatedly.
exec 1>&- 2>&-
exec 1>&3 2>&4
# Start a tee process reading from the named pipe. Redirect stdout
# and stderr to the named pipe which goes to the tee process. The
# named pipe "file" can be removed and the tee process stays alive
# until the fd is closed.
tee -a < $pipeName $teeFile &
exec > $pipeName 2>&1
rm $pipeName
}
# Call this to stop script output from going to "tee" after a prior
# call to set_tee.
unset_tee () {
if [[ $origFileDescriptorsSaved == 1 ]]; then
# Close the current fd 1 and 2 which should stop the tee
# process, then restore 1 and 2 to original (saved as 3, 4).
exec 1>&- 2>&-
exec 1>&3 2>&4
fi
}
# Function for running a command that gets killed after a specific timeout and
# logs a timeout message.
LAST_EXITCODE=0
handle_timeout () {
_seconds=$1
eval "timeout --signal=2 --kill-after=60 $*"
LAST_EXITCODE=$?
if (( $LAST_EXITCODE == 124 )); then
logger "ERROR: command timed out after ${_seconds} seconds"
elif (( $LAST_EXITCODE == 137 )); then
logger "ERROR: command timed out after ${_seconds} seconds, and had to be killed with signal 9"
fi
}
waitForSlurmJobsToComplete () {
ids=$*
jobs=$(python -c "print(\",\".join(\"$ids\".split()))") # make a comma-separated list
jobsInQueue=$(squeue --noheader --jobs=$jobs)
while [[ $jobsInQueue != "" ]]; do
sleep 2
jobsInQueue=$(squeue --noheader --jobs=$jobs)
done
}
# Clones repo from URL specified by $1 to directory $2
# For example:
# "cloneRepo https://github.com/rapidsai/cugraph.git /my/repos/cg"
# results in cugraph being cloned to /my/repos/cg.
# NOTE: This removes any existing cloned repos that match the
# destination.
cloneRepo () {
repo_url=$1
repo_name=$(basename $2)
dest_dir=$(dirname $2)
mkdir -p $dest_dir
pushd $dest_dir > /dev/null
logger "Clone $repo_url in $dest_dir..."
if [ -d $repo_name ]; then
rm -rf $repo_name
if [ -d $repo_name ]; then
echo "ERROR: ${dest_dir}/$repo_name was not completely removed."
error 1
fi
fi
git clone $repo_url
popd > /dev/null
}
keep_last_n_files () {
n=$1
pattern=$2
_files=(${pattern})
if (( ${#_files[*]} > $n )); then
_diff=$((${#_files[*]} - $n))
for ((i=0; i<${_diff}; i++)); do
rm -rf ${_files[$i]}
done
fi
}
wait_for_file () {
timeout=$1
file_name=$2
logger "waiting for file: $file_name"
i=0
while (( i < $timeout )); do
if [ -e $file_name ]; then
logger "file $file_name exists"
break
fi
sleep 1
((i++))
done
if [ ! -e $file_name ]; then
logger "timed out waiting for file: $file_name"
false
fi
}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/run-dask-process.sh
|
#!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RAPIDS_MG_TOOLS_DIR=${RAPIDS_MG_TOOLS_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)}
source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh
# Logs can be written to a specific location by setting the LOGS_DIR
# env var. If unset, all logs are created under a dir named after the
# current PID.
LOGS_DIR=${LOGS_DIR:-dask_logs-$$}
########################################
NUMARGS=$#
ARGS=$*
VALIDARGS="-h --help scheduler workers --tcp --ucx --ucxib --ucx-ib"
HELP="$0 [<app> ...] [<flag> ...]
where <app> is:
scheduler - start dask scheduler
workers - start dask workers
and <flag> is:
--tcp - initalize a tcp cluster (default)
--ucx - initialize a ucx cluster with NVLink
--ucxib | --ucx-ib - initialize a ucx cluster with IB+NVLink
-h | --help - print this text
The cluster config order of precedence is any specification on the
command line (--tcp, --ucx, etc.) if provided, then the value of the
env var CLUSTER_CONFIG_TYPE if set, then the default value of tcp.
"
# CLUSTER_CONFIG_TYPE defaults to the env var value if set, else TCP
CLUSTER_CONFIG_TYPE=${CLUSTER_CONFIG_TYPE:-TCP}
START_SCHEDULER=0
START_WORKERS=0
if (( ${NUMARGS} == 0 )); then
echo "${HELP}"
exit 0
else
if hasArg -h || hasArg --help; then
echo "${HELP}"
exit 0
fi
for a in ${ARGS}; do
if ! (echo " ${VALIDARGS} " | grep -q " ${a} "); then
echo "Invalid option: ${a}"
exit 1
fi
done
fi
if hasArg scheduler; then
START_SCHEDULER=1
fi
if hasArg workers; then
START_WORKERS=1
fi
# Allow the command line to take precedence
if hasArg --tcp; then
CLUSTER_CONFIG_TYPE=TCP
elif hasArg --ucx; then
CLUSTER_CONFIG_TYPE=UCX
elif hasArg --ucxib || hasArg --ucx-ib; then
CLUSTER_CONFIG_TYPE=UCXIB
fi
########################################
#export DASK_LOGGING__DISTRIBUTED="DEBUG"
#ulimit -n 100000
SCHEDULER_LOG=${LOGS_DIR}/scheduler_log.txt
WORKERS_LOG=${LOGS_DIR}/worker-${HOSTNAME}_log.txt
buildTcpArgs () {
export DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT="100s"
export DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP="600s"
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN="1s"
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX="60s"
export DASK_DISTRIBUTED__WORKER__MEMORY__Terminate="False"
SCHEDULER_ARGS="--protocol=tcp
--port=$DASK_SCHEDULER_PORT
--scheduler-file $SCHEDULER_FILE
"
WORKER_ARGS="--rmm-pool-size=$WORKER_RMM_POOL_SIZE
--local-directory=/tmp/$LOGNAME
--scheduler-file=$SCHEDULER_FILE
--memory-limit=$DASK_HOST_MEMORY_LIMIT
--device-memory-limit=$DASK_DEVICE_MEMORY_LIMIT
"
}
buildUCXWithInfinibandArgs () {
export DASK_RMM__POOL_SIZE=0.5GB
export DASK_DISTRIBUTED__COMM__UCX__CREATE_CUDA_CONTEXT=True
SCHEDULER_ARGS="--protocol=ucx
--port=$DASK_SCHEDULER_PORT
--interface=$DASK_CUDA_INTERFACE
--scheduler-file $SCHEDULER_FILE
"
WORKER_ARGS="--interface=$DASK_CUDA_INTERFACE
--rmm-pool-size=$WORKER_RMM_POOL_SIZE
--rmm-async
--local-directory=/tmp/$LOGNAME
--scheduler-file=$SCHEDULER_FILE
--memory-limit=$DASK_HOST_MEMORY_LIMIT
--device-memory-limit=$DASK_DEVICE_MEMORY_LIMIT
"
}
buildUCXwithoutInfinibandArgs () {
export UCX_TCP_CM_REUSEADDR=y
export UCX_MAX_RNDV_RAILS=1
export UCX_TCP_TX_SEG_SIZE=8M
export UCX_TCP_RX_SEG_SIZE=8M
export DASK_DISTRIBUTED__COMM__UCX__CUDA_COPY=True
export DASK_DISTRIBUTED__COMM__UCX__TCP=True
export DASK_DISTRIBUTED__COMM__UCX__NVLINK=True
export DASK_DISTRIBUTED__COMM__UCX__INFINIBAND=False
export DASK_DISTRIBUTED__COMM__UCX__RDMACM=False
export DASK_RMM__POOL_SIZE=0.5GB
SCHEDULER_ARGS="--protocol=ucx
--port=$DASK_SCHEDULER_PORT
--scheduler-file $SCHEDULER_FILE
"
WORKER_ARGS="--enable-tcp-over-ucx
--enable-nvlink
--disable-infiniband
--disable-rdmacm
--rmm-pool-size=$WORKER_RMM_POOL_SIZE
--local-directory=/tmp/$LOGNAME
--scheduler-file=$SCHEDULER_FILE
--memory-limit=$DASK_HOST_MEMORY_LIMIT
--device-memory-limit=$DASK_DEVICE_MEMORY_LIMIT
"
}
if [[ "$CLUSTER_CONFIG_TYPE" == "UCX" ]]; then
logger "Using cluster configurtion for UCX"
buildUCXwithoutInfinibandArgs
elif [[ "$CLUSTER_CONFIG_TYPE" == "UCXIB" ]]; then
logger "Using cluster configurtion for UCX with Infiniband"
buildUCXWithInfinibandArgs
else
logger "Using cluster configurtion for TCP"
buildTcpArgs
fi
########################################
scheduler_pid=""
worker_pid=""
num_scheduler_tries=0
startScheduler () {
mkdir -p $(dirname $SCHEDULER_FILE)
echo "RUNNING: \"dask scheduler $SCHEDULER_ARGS\"" > $SCHEDULER_LOG
dask scheduler $SCHEDULER_ARGS >> $SCHEDULER_LOG 2>&1 &
scheduler_pid=$!
}
mkdir -p $LOGS_DIR
logger "Logs written to: $LOGS_DIR"
if [[ $START_SCHEDULER == 1 ]]; then
rm -f $SCHEDULER_FILE $SCHEDULER_LOG $WORKERS_LOG
startScheduler
sleep 6
num_scheduler_tries=$(( num_scheduler_tries+1 ))
# Wait for the scheduler to start first before proceeding, since
# it may require several retries (if prior run left ports open
# that need time to close, etc.)
while [ ! -f "$SCHEDULER_FILE" ]; do
scheduler_alive=$(ps -p $scheduler_pid > /dev/null ; echo $?)
if [[ $scheduler_alive != 0 ]]; then
if [[ $num_scheduler_tries != 30 ]]; then
echo "scheduler failed to start, retry #$num_scheduler_tries"
startScheduler
sleep 6
num_scheduler_tries=$(( num_scheduler_tries+1 ))
else
echo "could not start scheduler, exiting."
exit 1
fi
fi
done
echo "scheduler started."
fi
if [[ $START_WORKERS == 1 ]]; then
rm -f $WORKERS_LOG
while [ ! -f "$SCHEDULER_FILE" ]; do
echo "run-dask-process.sh: $SCHEDULER_FILE not present - waiting to start workers..."
sleep 2
done
echo "RUNNING: \"dask_cuda_worker $WORKER_ARGS\"" > $WORKERS_LOG
dask-cuda-worker $WORKER_ARGS >> $WORKERS_LOG 2>&1 &
worker_pid=$!
echo "worker(s) started."
fi
# This script will not return until the following background process
# have been completed/killed.
if [[ $worker_pid != "" ]]; then
echo "waiting for worker pid $worker_pid to finish before exiting script..."
wait $worker_pid
fi
if [[ $scheduler_pid != "" ]]; then
echo "waiting for scheduler pid $scheduler_pid to finish before exiting script..."
wait $scheduler_pid
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/multi-gpu-tools/wait_for_workers.py
|
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import yaml
from dask.distributed import Client
def initialize_dask_cuda(communication_type):
communication_type = communication_type.lower()
if "ucx" in communication_type:
os.environ["UCX_MAX_RNDV_RAILS"] = "1"
if communication_type == "ucx-ib":
os.environ["UCX_MEMTYPE_REG_WHOLE_ALLOC_TYPES"]="cuda"
os.environ["DASK_RMM__POOL_SIZE"]="0.5GB"
os.environ["DASK_DISTRIBUTED__COMM__UCX__CREATE_CUDA_CONTEXT"]="True"
def wait_for_workers(
num_expected_workers, scheduler_file_path, communication_type, timeout_after=0
):
"""
Waits until num_expected_workers workers are available based on
the workers managed by scheduler_file_path, then returns 0. If
timeout_after is specified, will return 1 if num_expected_workers
workers are not available before the timeout.
"""
# FIXME: use scheduler file path from global environment if none
# supplied in configuration yaml
print("wait_for_workers.py - initializing client...", end="")
sys.stdout.flush()
initialize_dask_cuda(communication_type)
print("done.")
sys.stdout.flush()
ready = False
start_time = time.time()
while not ready:
if timeout_after and ((time.time() - start_time) >= timeout_after):
print(
f"wait_for_workers.py timed out after {timeout_after} seconds before finding {num_expected_workers} workers."
)
sys.stdout.flush()
break
with Client(scheduler_file=scheduler_file_path) as client:
num_workers = len(client.scheduler_info()["workers"])
if num_workers < num_expected_workers:
print(
f"wait_for_workers.py expected {num_expected_workers} but got {num_workers}, waiting..."
)
sys.stdout.flush()
time.sleep(5)
else:
print(f"wait_for_workers.py got {num_workers} workers, done.")
sys.stdout.flush()
ready = True
if ready is False:
return 1
return 0
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument(
"--num-expected-workers",
type=int,
required=False,
help="Number of workers to wait for. If not specified, "
"uses the NUM_WORKERS env var if set, otherwise defaults "
"to 16.",
)
ap.add_argument(
"--scheduler-file-path",
type=str,
required=True,
help="Path to shared scheduler file to read.",
)
ap.add_argument(
"--communication-type",
type=str,
default="tcp",
required=False,
help="Initiliaze dask_cuda based on the cluster communication type."
"Supported values are tcp(default), ucx, ucxib, ucx-ib.",
)
ap.add_argument(
"--timeout-after",
type=int,
default=0,
required=False,
help="Number of seconds to wait for workers. "
"Default is 0 which means wait forever.",
)
args = ap.parse_args()
if args.num_expected_workers is None:
args.num_expected_workers = os.environ.get("NUM_WORKERS", 16)
exitcode = wait_for_workers(
num_expected_workers=args.num_expected_workers,
scheduler_file_path=args.scheduler_file_path,
communication_type=args.communication_type,
timeout_after=args.timeout_after,
)
sys.exit(exitcode)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/test-ci-permissions/Dockerfile
|
FROM rapidsai/mambaforge-cuda:latest
RUN useradd -rm -d /home/rapids -s /bin/bash -g conda -u 1000 rapids
USER rapids
WORKDIR /home/rapids
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/.pre-commit-config.yaml
|
## https://pre-commit.com/
#
# Before first use: `pre-commit install`
# To run: `pre-commit run --all-files`
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: debug-statements
- id: mixed-line-ending
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
args: ["--config=.flake8"]
exclude: |
(?x)^(
scripts
)
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v16.0.6
hooks:
- id: clang-format
exclude: |
(?x)^(
scripts/checks/__clang_cuda_additional_intrinsics.h
)
types_or: [c, c++, cuda]
args: ["-fallback-style=none", "-style=file", "-i"]
- repo: local
hooks:
- id: copyright-check
name: copyright-check
entry: python3 ./scripts/checks/copyright.py --update-current-year
language: python
pass_filenames: false
additional_dependencies: [gitpython]
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.5.1
hooks:
- id: rapids-dependency-file-generator
args: ["--clean"]
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/.flake8
|
# Copyright (c) 2023, NVIDIA CORPORATION.
[flake8]
filename = *.py, *.pyx, *.pxd, *.pxi
exclude = __init__.py, *.egg, build, docs, .git
force-check = True
max-line-length = 120
ignore =
# line break before binary operator
W503,
# whitespace before :
E203
per-file-ignores =
# Rules ignored only in Cython:
# E211: whitespace before '(' (used in multi-line imports)
# E225: Missing whitespace around operators (breaks cython casting syntax like <int>)
# E226: Missing whitespace around arithmetic operators (breaks cython pointer syntax like int*)
# E227: Missing whitespace around bitwise or shift operator (Can also break casting syntax)
# E275: Missing whitespace after keyword (Doesn't work with Cython except?)
# E402: invalid syntax (works for Python, not Cython)
# E999: invalid syntax (works for Python, not Cython)
# W504: line break after binary operator (breaks lines that end with a pointer)
*.pyx: E211, E225, E226, E227, E275, E402, E999, W504
*.pxd: E211, E225, E226, E227, E275, E402, E999, W504
*.pxi: E211, E225, E226, E227, E275, E402, E999, W504
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/fetch_rapids.cmake
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/CUGRAPH_RAPIDS.cmake)
file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.12/RAPIDS.cmake
${CMAKE_CURRENT_BINARY_DIR}/CUGRAPH_RAPIDS.cmake
)
endif()
include(${CMAKE_CURRENT_BINARY_DIR}/CUGRAPH_RAPIDS.cmake)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/README.md
|
# WholeGraph
WholeGraph is developed to help train large-scale Graph Neural Networks(GNN).
WholeGraph provides underlying storage structure called WholeMemory.
WholeMemory is a Tensor like storage and provide multi-GPU support.
It is optimized for NVLink systems like DGX A100 servers.
By working together with cuGraph, cuGraph-Ops, cuGraph-DGL, cuGraph-PyG,
and upstream DGL and PyG, it will be easy to build GNN applications.
## Table of content
- Installation
- [Getting WholeGraph Packages](./docs/wholegraph/source/installation/getting_wholegraph.md)
- [Building from Source](./docs/wholegraph/source/installation/source_build.md)
- General
- [WholeGraph Introduction](./docs/wholegraph/source/basics/wholegraph_intro.md)
- Packages
- libwholegraph (C/CUDA)
- pylibwholegraph
- API Docs
- Python
- C
- Reference
- [RAPIDS](https://rapids.ai)
- [cuGraph](https://github.com/rapidsai/cugraph)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/CHANGELOG.md
|
# wholegraph 23.10.00 (11 Oct 2023)
## 🐛 Bug Fixes
- Update all versions to 23.10 ([#71](https://github.com/rapidsai/wholegraph/pull/71)) [@raydouglass](https://github.com/raydouglass)
- Use `conda mambabuild` not `mamba mambabuild` ([#67](https://github.com/rapidsai/wholegraph/pull/67)) [@bdice](https://github.com/bdice)
## 🛠️ Improvements
- Update image names ([#70](https://github.com/rapidsai/wholegraph/pull/70)) [@AyodeAwe](https://github.com/AyodeAwe)
- Update to clang 16.0.6. ([#68](https://github.com/rapidsai/wholegraph/pull/68)) [@bdice](https://github.com/bdice)
- Simplify wheel build scripts and allow alphas of RAPIDS dependencies ([#66](https://github.com/rapidsai/wholegraph/pull/66)) [@divyegala](https://github.com/divyegala)
- Fix docs build and slightly optimize ([#63](https://github.com/rapidsai/wholegraph/pull/63)) [@dongxuy04](https://github.com/dongxuy04)
- Use `copy-pr-bot` ([#60](https://github.com/rapidsai/wholegraph/pull/60)) [@ajschmidt8](https://github.com/ajschmidt8)
- PR: Use top-k from RAFT ([#53](https://github.com/rapidsai/wholegraph/pull/53)) [@chuangz0](https://github.com/chuangz0)
# wholegraph 23.08.00 (9 Aug 2023)
## 🚨 Breaking Changes
- Refactoring into 23.08 ([#24](https://github.com/rapidsai/wholegraph/pull/24)) [@BradReesWork](https://github.com/BradReesWork)
## 🛠️ Improvements
- Correct syntax in GHA workflow ([#46](https://github.com/rapidsai/wholegraph/pull/46)) [@tingyu66](https://github.com/tingyu66)
- Refactoring into 23.08 ([#24](https://github.com/rapidsai/wholegraph/pull/24)) [@BradReesWork](https://github.com/BradReesWork)
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/build.sh
|
#!/bin/bash
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# wholegraph build script
# This script is used to build component(s) in this repo from
# source, and can be called with various options to customize the
# build as needed (see the help output for details)
# Abort script on first error
set -e
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="
clean
uninstall
libwholegraph
pylibwholegraph
tests
benchmarks
-v
-g
-n
--allgpuarch
--native
--cmake-args
--compile-cmd
--clean
-h
--help
"
HELP="$0 [<target> ...] [<flag> ...]
where <target> is:
clean - remove all existing build artifacts and configuration (start over).
uninstall - uninstall libwholegraph and pylibwholegraph from a prior build/install (see also -n)
libwholegraph - build the libwholegraph C++ library.
pylibwholegraph - build the pylibwholegraph Python package.
tests - build the C++ (OPG) tests.
benchmarks - build benchmarks.
and <flag> is:
-v - verbose build mode
-g - build for debug
-n - no install step
--allgpuarch - build for all supported GPU architectures
--cmake-args=\\\"<args>\\\" - add arbitrary CMake arguments to any cmake call
--compile-cmd - only output compile commands (invoke CMake without build)
--clean - clean an individual target (note: to do a complete rebuild, use the clean target described above)
-h | --h[elp] - print this text
default action (no args) is to build and install 'libwholegraph' and then 'pylibwholegraph'
libwholegraph build dir is: ${LIBWHOLEGRAPH_BUILD_DIR}
Set env var LIBWHOLEGRAPH_BUILD_DIR to override libwholegraph build dir.
"
LIBWHOLEGRAPH_BUILD_DIR=${LIBWHOLEGRAPH_BUILD_DIR:=${REPODIR}/cpp/build}
PYLIBWHOLEGRAPH_BUILD_DIRS="${REPODIR}/python/pylibwholegraph/build"
PYLIBWHOLEGRAPH_BUILD_DIRS+=" ${REPODIR}/python/pylibwholegraph/_skbuild"
PYLIBWHOLEGRAPH_BUILD_DIRS+=" ${REPODIR}/python/pylibwholegraph/pylibwholegraph/binding/include"
PYLIBWHOLEGRAPH_BUILD_DIRS+=" ${REPODIR}/python/pylibwholegraph/pylibwholegraph/binding/lib"
# All python build dirs using _skbuild are handled by cleanPythonDir, but
# adding them here for completeness
BUILD_DIRS="${LIBWHOLEGRAPH_BUILD_DIR}
${PYLIBWHOLEGRAPH_BUILD_DIRS}
"
# Set defaults for vars modified by flags to this script
VERBOSE_FLAG=""
CMAKE_VERBOSE_OPTION=""
BUILD_TYPE=Release
BUILD_ALL_GPU_ARCH=0
INSTALL_TARGET="--target install"
PYTHON=${PYTHON:-python}
# Set defaults for vars that may not have been defined externally
# FIXME: if INSTALL_PREFIX is not set, check PREFIX, then check
# CONDA_PREFIX, but there is no fallback from there!
INSTALL_PREFIX=${INSTALL_PREFIX:=${PREFIX:=${CONDA_PREFIX}}}
PARALLEL_LEVEL=${PARALLEL_LEVEL:="`nproc`"}
BUILD_ABI=${BUILD_ABI:=ON}
export CMAKE_GENERATOR="${CMAKE_GENERATOR:=Ninja}"
function hasArg {
(( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
function buildAll {
(( ${NUMARGS} == 0 )) || !(echo " ${ARGS} " | grep -q " [^-][a-zA-Z0-9\_\-]\+ ")
}
function cleanPythonDir {
pushd $1 > /dev/null
rm -rf dist *.egg-info
find . -type d -name __pycache__ -print | xargs rm -rf
find . -type d -name _skbuild -print | xargs rm -rf
find . -type d -name _external_repositories -print | xargs rm -rf
popd > /dev/null
}
function cmakeArgs {
# Check for multiple cmake args options
if [[ $(echo $ARGS | { grep -Eo "\-\-cmake\-args" || true; } | wc -l ) -gt 1 ]]; then
echo "Multiple --cmake-args options were provided, please provide only one: ${ARGS}"
exit 1
fi
# Check for cmake args option
if [[ -n $(echo $ARGS | { grep -E "\-\-cmake\-args" || true; } ) ]]; then
# There are possible weird edge cases that may cause this regex filter to output nothing and fail silently
# the true pipe will catch any weird edge cases that may happen and will cause the program to fall back
# on the invalid option error
EXTRA_CMAKE_ARGS=$(echo $ARGS | { grep -Eo "\-\-cmake\-args=\".+\"" || true; })
if [[ -n ${EXTRA_CMAKE_ARGS} ]]; then
# Remove the full EXTRA_CMAKE_ARGS argument from list of args so that it passes validArgs function
ARGS=${ARGS//$EXTRA_CMAKE_ARGS/}
# Filter the full argument down to just the extra string that will be added to cmake call
EXTRA_CMAKE_ARGS=$(echo $EXTRA_CMAKE_ARGS | grep -Eo "\".+\"" | sed -e 's/^"//' -e 's/"$//')
fi
fi
}
if hasArg -h || hasArg --h || hasArg --help; then
echo "${HELP}"
exit 0
fi
# Check for valid usage
if (( ${NUMARGS} != 0 )); then
# Check for cmake args
cmakeArgs
for a in ${ARGS}; do
if ! (echo "${VALIDARGS}" | grep -q "^[[:blank:]]*${a}$"); then
echo "Invalid option: ${a}"
exit 1
fi
done
fi
# Process flags
if hasArg -v; then
VERBOSE_FLAG=-v
CMAKE_VERBOSE_OPTION="--log-level=VERBOSE"
fi
if hasArg -g; then
BUILD_TYPE=Debug
fi
if hasArg -n; then
INSTALL_TARGET=""
fi
if hasArg --allgpuarch; then
BUILD_ALL_GPU_ARCH=1
fi
# If clean or uninstall targets given, run them prior to any other steps
if hasArg uninstall; then
if [[ "$INSTALL_PREFIX" != "" ]]; then
rm -rf ${INSTALL_PREFIX}/include/wholememory
rm -f ${INSTALL_PREFIX}/lib/libwholegraph.so
rm -rf ${INSTALL_PREFIX}/lib/cmake/wholegraph
fi
# This may be redundant given the above, but can also be used in case
# there are other installed files outside of the locations above.
if [ -e ${LIBWHOLEGRAPH_BUILD_DIR}/install_manifest.txt ]; then
xargs rm -f < ${LIBWHOLEGRAPH_BUILD_DIR}/install_manifest.txt > /dev/null 2>&1
fi
# uninstall libwholegraph and pylibwholegraph installed from a prior "setup.py install"
# FIXME: if multiple versions of these packages are installed, this only
# removes the latest one and leaves the others installed. build.sh uninstall
# can be run multiple times to remove all of them, but that is not obvious.
pip uninstall -y libwholegraph pylibwholegraph
fi
# If clean given, run it prior to any other steps
if hasArg clean; then
echo "- Cleaning"
# Ignore errors for clean since missing files, etc. are not failures
set +e
# remove artifacts generated inplace
# FIXME: ideally the "setup.py clean" command would be used for this, but
# currently running any setup.py command has side effects (eg. cloning repos).
# (cd ${REPODIR}/python && python setup.py clean)
if [[ -d ${REPODIR}/python/pylibwholegraph ]]; then
cleanPythonDir ${REPODIR}/python/pylibwholegraph
fi
# If the dirs to clean are mounted dirs in a container, the
# contents should be removed but the mounted dirs will remain.
# The find removes all contents but leaves the dirs, the rmdir
# attempts to remove the dirs but can fail safely.
for bd in ${BUILD_DIRS}; do
if [ -d ${bd} ]; then
find ${bd} -mindepth 1 -delete
rmdir ${bd} || true
fi
done
# remove any left-over cpython shared libraries
find ${REPODIR}/python/pylibwholegraph -name "*.cpython*.so" -type f -delete
fi
if hasArg tests; then
BUILD_TESTS=ON
else
BUILD_TESTS=OFF
fi
if hasArg benchmarks; then
BUILD_BENCHMARKS=ON
else
BUILD_BENCHMARKS=OFF
fi
################################################################################
# libwholegraph
if buildAll || hasArg libwholegraph; then
# set values based on flags
if (( ${BUILD_ALL_GPU_ARCH} == 0 )); then
WHOLEGRAPH_CMAKE_CUDA_ARCHITECTURES="${WHOLEGRAPH_CMAKE_CUDA_ARCHITECTURES:=NATIVE}"
echo "Building for the architecture of the GPU in the system..."
else
WHOLEGRAPH_CMAKE_CUDA_ARCHITECTURES="70-real;75-real;80-real;86-real;90"
echo "Building for *ALL* supported GPU architectures..."
fi
cmake -S ${REPODIR}/cpp -B ${LIBWHOLEGRAPH_BUILD_DIR} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_CUDA_ARCHITECTURES=${WHOLEGRAPH_CMAKE_CUDA_ARCHITECTURES} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} \
-DCMAKE_MESSAGE_LOG_LEVEL=VERBOSE \
-DBUILD_TESTS=${BUILD_TESTS} \
${EXTRA_CMAKE_ARGS}
cd ${LIBWHOLEGRAPH_BUILD_DIR}
if ! hasArg --compile-cmd; then
## Build and (optionally) install library + tests
cmake --build . -j${PARALLEL_LEVEL} ${INSTALL_TARGET} ${VERBOSE_FLAG}
fi
fi
################################################################################
# pylibwholegraph
if buildAll || hasArg pylibwholegraph; then
if hasArg --clean; then
cleanPythonDir ${REPODIR}/python/pylibwholegraph
fi
# setup.py and cmake reference an env var LIBWHOLEGRAPH_DIR to find the
# libwholegraph package (cmake).
# If not set by the user, set it to LIBWHOLEGRAPH_BUILD_DIR
LIBWHOLEGRAPH_DIR=${LIBWHOLEGRAPH_DIR:=${LIBWHOLEGRAPH_BUILD_DIR}}
if ! hasArg --compile-cmd; then
cd ${REPODIR}/python/pylibwholegraph
env LIBWHOLEGRAPH_DIR=${LIBWHOLEGRAPH_DIR} \
${PYTHON} setup.py build_ext --inplace \
--build-type=${BUILD_TYPE} \
${EXTRA_CMAKE_ARGS}
if ! hasArg -n; then
env LIBWHOLEGRAPH_DIR=${LIBWHOLEGRAPH_DIR} \
${PYTHON} setup.py install \
--build-type=${BUILD_TYPE} \
${EXTRA_CMAKE_ARGS}
fi
else
# just invoke cmake without going through scikit-build
env LIBWHOLEGRAPH_DIR=${LIBWHOLEGRAPH_DIR} \
cmake -S ${REPODIR}/python/pylibwholegraph -B ${REPODIR}/python/pylibwholegraph/_skbuild/build \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
${EXTRA_CMAKE_ARGS}
fi
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/dependencies.yaml
|
# Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
all:
output: [conda]
matrix:
cuda: ["11.8", "12.0"]
arch: [x86_64]
includes:
- checks
- build
- cudatoolkit
- py_version
- run
- test_python
- docs
- clang_tools
test_cpp:
output: none
includes:
- cudatoolkit
test_python:
output: none
includes:
- cudatoolkit
- py_version
- test_python
checks:
output: none
includes:
- checks
- py_version
docs:
output: none
includes:
- cudatoolkit
- docs
- py_version
- pytorch_cpu
clang_tidy:
output: none
includes:
- build
- cudatoolkit
- py_version
- run
- pytorch_cpu
- clang_tools
py_build_pylibwholegraph:
output: pyproject
pyproject_dir: python/pylibwholegraph
extras:
table: build-system
includes:
- python_build_wheel
channels:
- rapidsai
- rapidsai-nightly
- pytorch
- conda-forge
- nvidia
dependencies:
build:
common:
- output_types: [conda, requirements]
packages:
- ninja
- output_types: conda
packages:
- c-compiler
- cmake>=3.26.4
- cudnn=8.4
- cxx-compiler
- cython
- doxygen=1.8.20
- libraft-headers==23.12.*
- librmm==23.12.*
- nanobind>=0.2.0
- nccl
- scikit-build
specific:
- output_types: conda
matrices:
- matrix:
arch: x86_64
packages:
- gcc_linux-64=11.*
- sysroot_linux-64=2.17
- matrix:
arch: aarch64
packages:
- gcc_linux-aarch64=11.*
- sysroot_linux-aarch64=2.17
- output_types: conda
matrices:
- matrix:
arch: x86_64
cuda: "11.8"
packages:
- nvcc_linux-64=11.8
- matrix:
arch: x86_64
cuda: "11.5"
packages:
- nvcc_linux-64=11.5
- matrix:
arch: aarch64
cuda: "11.8"
packages:
- nvcc_linux-aarch64=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-nvcc
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "11.2"
packages:
- cudatoolkit=11.2
- cuda-nvtx=11.4 # oldest available
- matrix:
cuda: "11.4"
packages:
- cudatoolkit=11.4
- cuda-nvtx=11.4 # oldest available
- matrix:
cuda: "11.5"
packages:
- cudatoolkit=11.5
- cuda-nvtx=11.5
- matrix:
cuda: "11.8"
packages:
- cudatoolkit=11.8
- cuda-nvtx=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-cudart-dev
- cuda-nvtx
checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
run:
common:
- output_types: [conda, requirements]
packages: []
test_cpp:
common:
- output_types: [conda, requirements]
packages:
- nccl
test_python:
common:
- output_types: [conda, requirements]
packages:
- c-compiler
- cxx-compiler
- ninja
- numpy>=1.17
- pytest
- pytest-forked
- pytest-xdist
- nccl
specific:
- output_types: [conda, requirements]
matrices:
- matrix:
arch: x86_64
cuda: "11.2"
packages:
# It's impossible to create this environment with pyg because
# the pyg package has an explicit dependency on cudatoolkit=11.*
# and there simply isn't any build for cudatoolkit=11.2.
# Note that the packages for CUDA 11.2/11.4 environments are the
# ones from conda-forge (built only against CUDA 11.2) and
# *not* the pytorch channel. For CUDA 11.5/11.8 environments,
# we're using packages from the pytorch channel.
- pytorch=1.11.0=*cuda112*
- matrix:
arch: x86_64
cuda: "11.4"
packages:
# It's impossible to create this environment with pyg because
# the pyg package has an explicit dependency on cudatoolkit=11.*
# and there simply isn't any build for cudatoolkit=11.4.
# There is also no build of pytorch for CUDA 11.4 but the 11.2
# build should work in practice and doesn't require any
# cudatoolkit version explicitly.
- pytorch=1.11.0=*cuda112*
- matrix:
arch: x86_64
cuda: "11.5"
packages:
# This environment "just works" for both pytorch and pyg, but only
# with older pytorch versions since the newest ones aren't built
# against 11.5 anymore.
- pytorch=1.11.0=*cuda11.5*
- matrix:
arch: x86_64
cuda: "11.8"
packages:
# Since CUDA 11.6, pytorch switched to using the `cuda-*` packages
# as dependencies for its official conda package. These are only
# available from the nvidia channel at the moment, and this will
# probably continue once conda-forge has added these new packages
# since conda-forge will only add this from CUDA 12.0 onwards,
# at least in the near-term.
# Our own RAPIDS packages are dependent on the `cudatoolkit`
# package from conda-forge though, which means that we have to
# install both `cudatoolkit` version 11.8 and the `cuda-*` packages
# version 11.8 here.
# Starting with Pytorch 2.0, this works well though, since Pytorch
# has largely reduced its dependencies, so only part of the CUDA
# toolkit needs to be duplicated this way.
# If conda-forge supports the new cuda-* packages for CUDA 11.8
# at some point, then we can fully support/properly specify
# this environment.
- pytorch=2.0.0
- pytorch-cuda=11.8
- matrix:
arch: aarch64
cuda: "11.8"
packages:
- pytorch=2.0.0
- pytorch-cuda=11.8
- matrix:
packages:
docs:
common:
- output_types: [conda, requirements]
packages:
- breathe
- doxygen=1.8.20
- graphviz
- ipython
- ipykernel
- nbsphinx
- numpydoc
- pydata-sphinx-theme
- recommonmark
- sphinx<6
- sphinx-copybutton
- sphinx-markdown-tables
- sphinxcontrib-websupport
pytorch_cpu:
common:
- output_types: [conda, requirements]
packages:
- pytorch=2.0.0
- cpuonly
clang_tools:
common:
- output_types: [conda, requirements]
packages:
- clangxx=16.0.0
- clang-tools=16.0.0
- gitpython
python_build_wheel:
common:
- output_types: [pyproject]
packages:
- cmake>=3.26.4
- cython>=0.29,<0.30
- ninja
- setuptools
- scikit-build>=0.13.1
- wheel
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/Dockerfile
|
FROM nvcr.io/nvidia/pytorch:22.10-py3
RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y lsb-core software-properties-common wget libspdlog-dev
#RUN remove old cmake to update
RUN conda remove --force -y cmake
RUN rm -rf /usr/local/bin/cmake && rm -rf /usr/local/lib/cmake && rm -rf /usr/lib/cmake
RUN apt-key adv --fetch-keys https://apt.kitware.com/keys/kitware-archive-latest.asc && \
export LSB_CODENAME=$(lsb_release -cs) && \
apt-add-repository -y "deb https://apt.kitware.com/ubuntu/ ${LSB_CODENAME} main" && \
apt update && apt install -y cmake
# update py for pytest
RUN pip3 install -U py
RUN pip3 install Cython setuputils3 scikit-build nanobind pytest-forked pytest
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2023 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/wholegraph/VERSION
|
23.12.00
| 0 |
rapidsai_public_repos/wholegraph/python
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pyproject.toml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[build-system]
requires = [
"cmake>=3.26.4",
"cython>=0.29,<0.30",
"ninja",
"scikit-build>=0.13.1",
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "pylibwholegraph"
dynamic = ["version"]
description = "pylibwholegraph - GPU Graph Storage for GNN feature and graph structure"
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.6"
classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic]
version = {file = "pylibwholegraph/VERSION"}
| 0 |
rapidsai_public_repos/wholegraph/python
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
set(RAPIDS_VERSION "23.12")
set(WHOLEGRAPH_VERSION "${RAPIDS_VERSION}.00")
include(FetchContent)
FetchContent_Declare(
rapids-cmake
GIT_REPOSITORY https://github.com/rapidsai/rapids-cmake.git
GIT_TAG origin/branch-${RAPIDS_VERSION}
)
FetchContent_MakeAvailable(rapids-cmake)
include(rapids-cmake)
include(rapids-cuda)
include(rapids-cpm)
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
set(CMAKE_CUDA_ARCHITECTURES 70-real 80-real 86)
endif ()
rapids_cuda_init_architectures(PYLIBWHOLEGRAPH)
project(PYLIBWHOLEGRAPH VERSION ${WHOLEGRAPH_VERSION} LANGUAGES C CXX CUDA)
##############################################################################
# - User Options ------------------------------------------------------------
option(DETECT_CONDA_ENV "Enable detection of conda environment for dependencies" ON)
option(WHOLEGRAPH_BUILD_WHEELS "Whether we're building a wheel for pypi" OFF)
##############################################################################
# - Base Rapids Options -----------------------------------------------------
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time")
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "${CMAKE_COMMAND} -E time")
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# pywholegraph Options
function(show_python_envs position)
message(STATUS "[PYTHON INFO] POSITION: ${position} Python_ROOT_DIR=${Python_ROOT_DIR}")
message(STATUS " ${position} Python_INCLUDE_DIR=${Python_INCLUDE_DIR} Python_LIBRARY=${Python_LIBRARY} Python_EXECUTABLE=${Python_EXECUTABLE}")
message(STATUS " ${position} PYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIR} PYTHON_LIBRARY=${PYTHON_LIBRARY} PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}")
message(STATUS " ${position} Python_INCLUDE_DIRS=${Python_INCLUDE_DIRS} Python_LIBRARIES=${Python_LIBRARIES}")
endfunction(show_python_envs)
set(SKBUILD ON)
set(Python_USE_STATIC_LIBS TRUE)
if (NOT DEFINED BUILD_PYTHON_EXE)
if (DEFINED Python_EXECUTABLE)
set(BUILD_PYTHON_EXE ${Python_EXECUTABLE})
elseif(DEFINED PYTHON_EXECUTABLE)
set(BUILD_PYTHON_EXE ${PYTHON_EXECUTABLE})
elseif(DEFINED Python_ROOT_DIR)
set(BUILD_PYTHON_EXE "${Python_ROOT_DIR}/bin/python3")
else()
execute_process(COMMAND "which" "python3"
OUTPUT_VARIABLE BUILD_PYTHON_EXE OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
endif()
endif()
execute_process(COMMAND ${BUILD_PYTHON_EXE} -c "import sys; print(sys.exec_prefix)"
OUTPUT_VARIABLE Python_ROOT_DIR OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
execute_process(COMMAND ${BUILD_PYTHON_EXE} -c "import sys; print(sys.executable)"
OUTPUT_VARIABLE Python_EXECUTABLE OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
execute_process(COMMAND ${BUILD_PYTHON_EXE} -c "import sysconfig; print(sysconfig.get_path('include'))"
OUTPUT_VARIABLE Python_INCLUDE_DIR OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
execute_process(COMMAND ${BUILD_PYTHON_EXE} -c "import sysconfig; print(sysconfig.get_config_var('LIBDEST'))"
OUTPUT_VARIABLE Python_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET)
set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
set(PYTHON_INCLUDE_DIR ${Python_INCLUDE_DIR})
set(PYTHON_LIBRARY ${Python_LIBRARY})
find_package(Python 3 COMPONENTS Interpreter Development REQUIRED)
show_python_envs("python env")
# default build type
rapids_cmake_build_type(Release)
message("CMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}")
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env( conda_env MODIFY_PREFIX_PATH )
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(STATUS "No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}")
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
rapids_cpm_init()
include(../../cpp/cmake/thirdparty/get_raft.cmake)
##############################################################################
# - Dependencies ------------------------------------------------------------
#find_package(PythonInterp 3 REQUIRED)
#include(${CMAKE_CURRENT_LIST_DIR}/../cmake/thirdparty/nanobind.cmake)
# use <package>_ROOT here to take precedence over any other package
set(wholegraph_ROOT "$ENV{LIBWHOLEGRAPH_DIR}")
find_package(wholegraph "${RAPIDS_VERSION}.0" EXACT)
message("WholeGraph")
if (WHOLEGRAPH_FOUND)
message(STATUS "PYLIBWHOLEGRAPH: using pre-built wholegraph C++ package")
elseif(WHOLEGRAPH_BUILD_WHEELS)
# statically link dependencies if building wheels
message(STATUS "PYLIBWHOLEGRAPH: build wheels")
add_subdirectory(../../cpp/ libwholegraph EXCLUDE_FROM_ALL)
else()
message(FATAL_ERROR "PYLIBWHOLEGRAPH: could not find wholegraph package in "
"cmake prefix ${CMAKE_PREFIX_PATH} or user dir $ENV{LIBWHOLEGRAPH_DIR}")
endif()
execute_process(
COMMAND "${Python_EXECUTABLE}" -c "import os; import skbuild; print(os.path.join(os.path.dirname(skbuild.__file__), 'resources/cmake'))"
OUTPUT_VARIABLE SKBUILD_CMAKE_MODULE_PATH OUTPUT_STRIP_TRAILING_WHITESPACE
)
list(APPEND CMAKE_MODULE_PATH "${SKBUILD_CMAKE_MODULE_PATH}")
include(rapids-cython)
rapids_cython_init()
##############################################################################
# - Display options ----------------------------------------------------------
message(VERBOSE "PYLIBWHOLEGRAPH: Enable detection of conda environment for dependencies: ${DETECT_CONDA_ENV}")
##############################################################################
# - Compiler options ---------------------------------------------------------
# this is needed for clang-tidy runs
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
list(APPEND CXX_DEFINITIONS WHOLEGRAPH_VERSION=${WHOLEGRAPH_VERSION})
message(STATUS "PYLIBWHOLEGRAPH: DEFAULT_CXX_FLAGS='${DEFAULT_CXX_FLAGS}'")
message(STATUS "PYLIBWHOLEGRAPH: CXX_FLAGS='${CXX_FLAGS}'")
message(STATUS "PYLIBWHOLEGRAPH: CXX_DEFINITIONS='${CXX_DEFINITIONS}'")
##############################################################################
# - Variables ----------------------------------------------------------------
set(WHOLEGRAPH_CPP_TARGET "wholegraph::wholegraph" CACHE STRING "libwholegraph target name")
add_subdirectory(pylibwholegraph/binding)
# when used without setup.py, command is like:
# export LIBWHOLEGRAPH_DIR=`pwd`/../../cpp/build/install
# cmake ../ -DSKBUILD=ON
| 0 |
rapidsai_public_repos/wholegraph/python
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/setup.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages, Command
from skbuild import setup
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = [
("all", None, None),
]
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
setupFileDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(setupFileDir)
os.system("rm -rf build")
os.system("rm -rf _skbuild")
os.system("rm -rf dist")
os.system('find . -name "__pycache__" -type d -exec rm -rf {} +')
os.system("rm -rf *.egg-info")
os.system('find . -name "*.cpython*.so" -type f -delete')
cmdclass = dict()
cmdclass["clean"] = CleanCommand
setup(
packages=find_packages(
include=[
"pylibwholegraph",
"pylibwholegraph.*",
]
),
package_data={
"pylibwholegraph": ["VERSION", "torch_cpp_ext/*.cpp",
"torch_cpp_ext/*.h"],
},
include_package_data=True,
cmdclass=cmdclass,
zip_safe=False,
)
| 0 |
rapidsai_public_repos/wholegraph/python
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2023 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/_version.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib.resources
__version__ = (
importlib.resources.files("pylibwholegraph").joinpath("VERSION").read_text().strip()
)
__git_commit__ = ""
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pylibwholegraph._version import __git_commit__, __version__
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/VERSION
|
23.12.00
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch_cpp_ext/torch_env_func_ptrs.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
namespace wholegraph_torch {
/**
* @brief : PyTorch environment functions for memory allocation.
*
* @return : pointers to the functions of current CUDA device
*/
wholememory_env_func_t* get_pytorch_env_func();
cudaStream_t get_current_stream();
void* create_output_context();
void destroy_output_context(void* output_context);
} // namespace wholegraph_torch
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch_cpp_ext/torch_utils.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <torch/script.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory_tensor.h>
namespace wholegraph_torch {
c10::ScalarType get_c10_scalar_type(wholememory_dtype_t wm_dtype);
wholememory_dtype_t get_wholememory_dtype(torch::ScalarType ts_dtype);
struct pytorch_memory_context {
torch::Tensor tensor;
torch::TensorOptions options;
wholememory_tensor_description_t desc;
};
void set_need_grad(pytorch_memory_context* memory_context, bool require_grad);
void create_torch_memory_context_func(void** memory_context, void* /*global_context*/);
void destroy_torch_memory_context_func(void* memory_context, void* /*global_context*/);
void* torch_common_malloc_func(wholememory_tensor_description_t* tensor_description,
void* memory_context,
bool gpu_memory = true,
bool pinned = false);
void torch_common_free_func(void* memory_context, void* /*global_context*/);
void get_tensor_desc_from_torch_tensor(wholememory_tensor_description_t* tensor_desc,
const torch::Tensor& t);
void get_array_desc_from_torch_tensor(wholememory_array_description_t* array_desc,
const torch::Tensor& t);
void get_matrix_desc_from_torch_tensor(wholememory_matrix_description_t* matrix_desc,
const torch::Tensor& t);
class wrapped_torch_tensor {
public:
explicit wrapped_torch_tensor(const torch::Tensor& torch_tensor);
~wrapped_torch_tensor();
wholememory_tensor_t get_wholememory_tensor() const;
void unsqueeze(int dim = -1);
void squeeze(int dim = -1);
private:
wholememory_tensor_t wholememory_tensor_ = nullptr;
};
void torch_tensor_check_dim_in_range(const torch::Tensor& t,
int min_dim,
int max_dim,
const char* info);
inline void torch_tensor_check_dim(const torch::Tensor& t, int dim, const char* info)
{
return torch_tensor_check_dim_in_range(t, dim, dim, info);
}
void torch_tensor_check_dtype(const torch::Tensor& t, torch::Dtype dtype, const char* info);
void torch_tensor_check_dtype_is_int(const torch::Tensor& t, const char* info);
// int32 or int64
void torch_tensor_check_dtype_is_index(const torch::Tensor& t, const char* info);
void torch_tensor_check_dtype_is_float(const torch::Tensor& t, const char* info);
} // namespace wholegraph_torch
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch_cpp_ext/wholegraph_torch_ext.cpp
|
#include <torch/extension.h>
#include <torch/script.h>
#include "torch_env_func_ptrs.h"
#include "torch_utils.h"
int64_t wrapped_get_wholegraph_env_fns()
{
return reinterpret_cast<int64_t>(static_cast<void*>(wholegraph_torch::get_pytorch_env_func()));
}
int64_t wrapped_get_stream()
{
return reinterpret_cast<int64_t>(static_cast<void*>(wholegraph_torch::get_current_stream()));
}
int64_t wrapped_create_output_context()
{
return reinterpret_cast<int64_t>(wholegraph_torch::create_output_context());
}
void wrapped_destroy_output_context(int64_t output_context)
{
wholegraph_torch::destroy_output_context(reinterpret_cast<void*>(output_context));
}
torch::Tensor get_torch_tensor_from_output_context(int64_t output_context)
{
auto* torch_output_context =
static_cast<wholegraph_torch::pytorch_memory_context*>(reinterpret_cast<void*>(output_context));
return torch_output_context->tensor;
}
PYBIND11_MODULE(pylibwholegraph_torch_ext, m)
{
m.def("get_wholegraph_env_fns",
&wrapped_get_wholegraph_env_fns,
"Get WholeGraph Environment functions.");
m.def("get_stream", &wrapped_get_stream, "Get current CUDA stream.");
m.def("create_output_context", &wrapped_create_output_context, "Create output memory context.");
m.def("destroy_output_context", &wrapped_destroy_output_context, "Destroy output memory context.");
m.def("get_tensor_from_context",
&get_torch_tensor_from_output_context,
"Get PyTorch Tensor from output memory context");
}
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch_cpp_ext/torch_utils.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "torch_utils.h"
#include <c10/cuda/CUDAFunctions.h>
namespace wholegraph_torch {
c10::ScalarType get_c10_scalar_type(wholememory_dtype_t wm_dtype)
{
switch (wm_dtype) {
case WHOLEMEMORY_DT_FLOAT: return c10::ScalarType::Float;
case WHOLEMEMORY_DT_HALF: return c10::ScalarType::Half;
case WHOLEMEMORY_DT_DOUBLE: return c10::ScalarType::Double;
case WHOLEMEMORY_DT_BF16: return c10::ScalarType::BFloat16;
case WHOLEMEMORY_DT_INT: return c10::ScalarType::Int;
case WHOLEMEMORY_DT_INT64: return c10::ScalarType::Long;
case WHOLEMEMORY_DT_INT16: return c10::ScalarType::Short;
case WHOLEMEMORY_DT_INT8: return c10::ScalarType::Char;
default: return c10::ScalarType::Undefined;
}
}
wholememory_dtype_t get_wholememory_dtype(torch::ScalarType ts_dtype)
{
switch (ts_dtype) {
case c10::ScalarType::Float: return WHOLEMEMORY_DT_FLOAT;
case c10::ScalarType::Half: return WHOLEMEMORY_DT_HALF;
case c10::ScalarType::Double: return WHOLEMEMORY_DT_DOUBLE;
case c10::ScalarType::BFloat16: return WHOLEMEMORY_DT_BF16;
case c10::ScalarType::Int: return WHOLEMEMORY_DT_INT;
case c10::ScalarType::Long: return WHOLEMEMORY_DT_INT64;
case c10::ScalarType::Short: return WHOLEMEMORY_DT_INT16;
case c10::ScalarType::Char: return WHOLEMEMORY_DT_INT8;
default: return WHOLEMEMORY_DT_UNKNOWN;
}
}
void set_need_grad(pytorch_memory_context* memory_context, bool require_grad)
{
memory_context->options = memory_context->options.requires_grad(require_grad);
}
void create_torch_memory_context_func(void** memory_context, void* /*global_context*/)
{
*memory_context = new pytorch_memory_context();
}
void destroy_torch_memory_context_func(void* memory_context, void* /*global_context*/)
{
if (memory_context != nullptr) { delete static_cast<pytorch_memory_context*>(memory_context); }
}
void* torch_common_malloc_func(wholememory_tensor_description_t* tensor_description,
void* memory_context,
bool gpu_memory,
bool pinned)
{
auto* pytorch_context = static_cast<pytorch_memory_context*>(memory_context);
pytorch_context->desc = *tensor_description;
std::vector<int64_t> shape(tensor_description->dim);
for (int i = 0; i < tensor_description->dim; i++) {
shape[i] = tensor_description->sizes[i];
}
pytorch_context->options =
pytorch_context->options.dtype(get_c10_scalar_type(tensor_description->dtype));
if (gpu_memory) {
pytorch_context->options =
pytorch_context->options.device(c10::Device(c10::kCUDA, c10::cuda::current_device()));
} else {
pytorch_context->options = pytorch_context->options.device(c10::Device(c10::kCPU));
pytorch_context->options = pytorch_context->options.pinned_memory(pinned);
}
try {
pytorch_context->tensor = torch::empty(shape, pytorch_context->options);
} catch (c10::Error& err) {
fprintf(stderr, "torch_common_malloc_func allocation failed. Reasion=%s", err.what());
throw err;
}
return pytorch_context->tensor.data_ptr();
}
void torch_common_free_func(void* memory_context, void* /*global_context*/)
{
static_cast<pytorch_memory_context*>(memory_context)->tensor = torch::Tensor();
static_cast<pytorch_memory_context*>(memory_context)->options = torch::TensorOptions();
wholememory_initialize_tensor_desc(&static_cast<pytorch_memory_context*>(memory_context)->desc);
}
void get_tensor_desc_from_torch_tensor(wholememory_tensor_description_t* tensor_desc,
const torch::Tensor& t)
{
tensor_desc->dim = t.dim();
tensor_desc->dtype = get_wholememory_dtype(t.dtype().toScalarType());
TORCH_CHECK(tensor_desc->dtype != WHOLEMEMORY_DT_UNKNOWN);
tensor_desc->storage_offset = t.storage_offset();
for (int i = 0; i < tensor_desc->dim; i++) {
tensor_desc->sizes[i] = t.size(i);
tensor_desc->strides[i] = t.stride(i);
}
}
void get_array_desc_from_torch_tensor(wholememory_array_description_t* array_desc,
const torch::Tensor& t)
{
TORCH_CHECK(t.dim() == 1, "get_array_desc_from_torch_tensor: should be 1-dim tensor");
array_desc->dtype = get_wholememory_dtype(t.dtype().toScalarType());
TORCH_CHECK(array_desc->dtype != WHOLEMEMORY_DT_UNKNOWN);
array_desc->size = t.size(0);
array_desc->storage_offset = t.storage_offset();
}
void get_matrix_desc_from_torch_tensor(wholememory_matrix_description_t* matrix_desc,
const torch::Tensor& t)
{
TORCH_CHECK(t.dim() == 2, "get_matrix_desc_from_torch_tensor: should be 2-dim tensor");
matrix_desc->dtype = get_wholememory_dtype(t.dtype().toScalarType());
TORCH_CHECK(matrix_desc->dtype != WHOLEMEMORY_DT_UNKNOWN);
matrix_desc->sizes[0] = t.size(0);
matrix_desc->sizes[1] = t.size(1);
matrix_desc->stride = t.stride(0);
matrix_desc->storage_offset = t.storage_offset();
}
wrapped_torch_tensor::wrapped_torch_tensor(const torch::Tensor& torch_tensor)
{
wholememory_tensor_description_t tensor_description;
get_tensor_desc_from_torch_tensor(&tensor_description, torch_tensor);
wholememory_make_tensor_from_pointer(
&wholememory_tensor_, torch_tensor.storage().data(), &tensor_description);
}
wrapped_torch_tensor::~wrapped_torch_tensor()
{
wholememory_destroy_tensor(wholememory_tensor_);
wholememory_tensor_ = nullptr;
}
wholememory_tensor_t wrapped_torch_tensor::get_wholememory_tensor() const
{
return wholememory_tensor_;
}
void wrapped_torch_tensor::unsqueeze(int dim)
{
auto* tensor_desc = wholememory_tensor_get_tensor_description(wholememory_tensor_);
TORCH_CHECK(dim >= -tensor_desc->dim - 1 && dim <= tensor_desc->dim,
"dim = ",
dim,
" but t.dim()=",
tensor_desc->dim,
", should in range [",
-tensor_desc->dim - 1,
", ",
tensor_desc->dim,
"]")
if (dim < 0) { dim += tensor_desc->dim + 1; }
TORCH_CHECK(wholememory_unsqueeze_tensor(tensor_desc, dim), "unsqueeze failed.")
}
void wrapped_torch_tensor::squeeze(int dim)
{
auto* tensor_desc = wholememory_tensor_get_tensor_description(wholememory_tensor_);
TORCH_CHECK(dim >= -tensor_desc->dim && dim < tensor_desc->dim,
"dim = ",
dim,
" but t.dim()=",
tensor_desc->dim,
", should in range [",
-tensor_desc->dim,
", ",
tensor_desc->dim,
")")
if (dim < 0) { dim += tensor_desc->dim; }
TORCH_CHECK(tensor_desc->sizes[dim] == 1, "dim size should be 1")
TORCH_CHECK(
dim == tensor_desc->dim - 1 || tensor_desc->strides[dim] == tensor_desc->strides[dim + 1],
"stride should be same as next dim")
TORCH_CHECK(wholememory_squeeze_tensor(tensor_desc, dim))
}
void torch_tensor_check_dim_in_range(const torch::Tensor& t,
int min_dim,
int max_dim,
const char* info)
{
TORCH_CHECK(t.dim() >= min_dim && t.dim() <= max_dim,
std::string(info),
" dim=",
t.dim(),
", should in range [",
min_dim,
", ",
max_dim,
"]")
}
void torch_tensor_check_dtype(const torch::Tensor& t, torch::Dtype dtype, const char* info)
{
TORCH_CHECK(t.dtype() == dtype, std::string(info), " should be ", dtype, " but got ", t.dtype());
}
void torch_tensor_check_dtype_is_int(const torch::Tensor& t, const char* info)
{
TORCH_CHECK(t.dtype() == torch::kInt8 || t.dtype() == torch::kInt16 ||
t.dtype() == torch::kInt32 || t.dtype() == torch::kInt64,
std::string(info),
" should be integer.")
}
// int32 or int64
void torch_tensor_check_dtype_is_index(const torch::Tensor& t, const char* info)
{
TORCH_CHECK(t.dtype() == torch::kInt32 || t.dtype() == torch::kInt64,
std::string(info),
" should be int32 or int64.")
}
void torch_tensor_check_dtype_is_float(const torch::Tensor& t, const char* info)
{
TORCH_CHECK(t.dtype() == torch::kFloat16 || t.dtype() == torch::kBFloat16 ||
t.dtype() == torch::kFloat32 || t.dtype() == torch::kFloat64,
std::string(info),
" should be float tensor.")
}
} // namespace wholegraph_torch
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch_cpp_ext/torch_env_func_ptrs.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "torch_env_func_ptrs.h"
#include <c10/cuda/CUDAStream.h>
#include "torch_utils.h"
namespace wholegraph_torch {
void* torch_malloc_func(wholememory_tensor_description_t* tensor_description,
wholememory_memory_allocation_type_t memory_allocation_type,
void* memory_context,
void* /*global_context*/)
{
bool gpu_memory = memory_allocation_type == WHOLEMEMORY_MA_DEVICE;
bool pinned_memory = memory_allocation_type == WHOLEMEMORY_MA_PINNED;
return torch_common_malloc_func(tensor_description, memory_context, gpu_memory, pinned_memory);
}
static wholememory_env_func_t pytorch_env_func = {
.temporary_fns =
{
.create_memory_context_fn = create_torch_memory_context_func,
.destroy_memory_context_fn = destroy_torch_memory_context_func,
.malloc_fn = torch_malloc_func,
.free_fn = torch_common_free_func,
.global_context = nullptr,
},
.output_fns = {
.malloc_fn = torch_malloc_func,
.free_fn = torch_common_free_func,
.global_context = nullptr,
}};
wholememory_env_func_t* get_pytorch_env_func() { return &pytorch_env_func; }
cudaStream_t get_current_stream() { return at::cuda::getCurrentCUDAStream(); }
void* create_output_context() {
void* output_context = nullptr;
create_torch_memory_context_func(&output_context, nullptr);
return output_context;
}
void destroy_output_context(void* output_context) {
destroy_torch_memory_context_func(output_context, nullptr);
}
} // namespace wholegraph_torch
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/test_utils/test_comm.py
|
import torch
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.torch.dlpack_utils import torch_import_from_dlpack
from packaging import version
def gen_csr_format_from_dense_matrix(
matrix_tensor,
graph_node_count,
graph_edge_count,
neighbor_node_count,
csr_row_dtype,
csr_col_dtype,
weight_dtype,
):
row_num = matrix_tensor.shape[0]
col_num = matrix_tensor.shape[1]
assert row_num == graph_node_count
assert col_num == neighbor_node_count
csr_row_ptr = torch.zeros((graph_node_count + 1,), dtype=csr_row_dtype)
for i in range(row_num):
csr_row_ptr[i + 1] = torch.count_nonzero(matrix_tensor[i]).item()
csr_row_ptr = torch.cumsum(csr_row_ptr, dim=0, dtype=csr_row_dtype)
assert csr_row_ptr[graph_node_count] == graph_edge_count
csr_col_ptr = torch.nonzero(matrix_tensor, as_tuple=True)[1]
csr_weight_ptr = torch.empty((graph_edge_count,), dtype=weight_dtype)
for row_id in range(row_num):
start = csr_row_ptr[row_id]
end = csr_row_ptr[row_id + 1]
for j in range(start, end):
col_id = csr_col_ptr[j]
csr_weight_ptr[j] = matrix_tensor[row_id][col_id]
if csr_col_dtype == torch.int32:
csr_col_ptr = csr_col_ptr.int()
return csr_row_ptr, csr_col_ptr, csr_weight_ptr
def gen_csr_graph(
graph_node_count,
graph_edge_count,
neighbor_node_count=None,
csr_row_dtype=torch.int64,
csr_col_dtype=torch.int32,
weight_dtype=torch.float32,
):
if neighbor_node_count is None:
neighbor_node_count = graph_node_count
all_count = graph_node_count * neighbor_node_count
assert all_count >= graph_edge_count
matrix_tensor = (
torch.rand(all_count, dtype=weight_dtype, device=torch.device("cpu")) + 1
)
choice_zero_idxs = torch.randperm(all_count, device=torch.device("cpu"))[
: all_count - graph_edge_count
]
matrix_tensor[choice_zero_idxs] = 0
matrix_tensor.resize_(graph_node_count, neighbor_node_count)
target_torch_version = "1.13.0a"
if version.parse(torch.__version__) >= version.parse(target_torch_version):
sp_format = matrix_tensor.to_sparse_csr()
csr_row_ptr = sp_format.crow_indices()
csr_col_ptr = sp_format.col_indices()
csr_weight_ptr = sp_format.values()
assert csr_row_ptr.dtype == torch.int64
assert csr_col_ptr.dtype == torch.int64
if csr_col_dtype == torch.int32:
csr_col_ptr = csr_col_ptr.int()
if csr_row_dtype == torch.int32:
csr_row_ptr = csr_row_ptr.int()
return csr_row_ptr, csr_col_ptr, csr_weight_ptr
else:
return gen_csr_format_from_dense_matrix(
matrix_tensor,
graph_node_count,
graph_edge_count,
neighbor_node_count,
csr_row_dtype,
csr_col_dtype,
weight_dtype,
)
def host_sample_all_neighbors(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
total_sample_count,
):
output_dest_tensor = torch.empty((total_sample_count,), dtype=col_id_dtype)
output_center_localid_tensor = torch.empty((total_sample_count,), dtype=torch.int32)
output_edge_gid_tensor = torch.empty((total_sample_count,), dtype=torch.int64)
center_nodes_count = center_nodes.size(0)
for i in range(center_nodes_count):
node_id = center_nodes[i]
start = host_csr_row_ptr[node_id]
end = host_csr_row_ptr[node_id + 1]
output_id = output_sample_offset_tensor[i]
for j in range(end - start):
output_dest_tensor[output_id + j] = host_csr_col_ptr[start + j]
output_center_localid_tensor[output_id + j] = node_id
output_edge_gid_tensor[output_id + j] = start + j
return output_dest_tensor, output_center_localid_tensor, output_edge_gid_tensor
def copy_host_1D_tensor_to_wholememory(
wm_array, host_tensor, world_rank, world_size, wm_comm
):
local_tensor_cuda, local_start = wm_array.get_local_tensor(
torch_import_from_dlpack, wmb.WholeMemoryMemoryLocation.MlDevice, world_rank
)
assert local_tensor_cuda.dim() == 1
wm_array_size = wm_array.shape[0]
local_start_ref = min(
wmb.determine_partition_plan(wm_array_size, world_size) * world_rank,
wm_array_size,
)
local_end = min(
wmb.determine_partition_plan(wm_array_size, world_size) * (world_rank + 1),
wm_array_size,
)
local_count = local_end - local_start
assert local_start == local_start_ref
assert local_tensor_cuda.shape[0] == local_count
local_tensor_cuda.copy_(host_tensor[local_start:local_end])
wm_comm.barrier()
def host_get_sample_offset_tensor(host_csr_row_ptr, center_nodes, max_sample_count):
center_nodes_count = center_nodes.size(0)
output_sample_offset_tensor = torch.empty(
(center_nodes_count + 1,), dtype=torch.int32
)
output_sample_offset_tensor[0] = 0
for i in range(center_nodes_count):
node_id = center_nodes[i]
neighbor_count = host_csr_row_ptr[node_id + 1] - host_csr_row_ptr[node_id]
output_sample_offset_tensor[i + 1] = neighbor_count
if max_sample_count > 0:
output_sample_offset_tensor[i + 1] = min(max_sample_count, neighbor_count)
output_sample_offset_tensor = torch.cumsum(
output_sample_offset_tensor, dim=0, dtype=torch.int32
)
return output_sample_offset_tensor
def int_to_wholememory_datatype(value: int):
if value == 0:
return wmb.WholeMemoryDataType.DtInt
if value == 1:
return wmb.WholeMemoryDataType.DtInt64
if value == 2:
return wmb.WholeMemoryDataType.DtFloat
if value == 3:
return wmb.WholeMemoryDataType.DtDouble
else:
raise ValueError("invalid int_to_wholememory_datatype value")
def int_to_wholememory_location(value: int):
if value == 0:
return wmb.WholeMemoryMemoryLocation.MlHost
if value == 1:
return wmb.WholeMemoryMemoryLocation.MlDevice
else:
raise ValueError("invalid int_to_wholememory_localtion value")
def int_to_wholememory_type(value: int):
if value == 0:
return wmb.WholeMemoryMemoryType.MtContinuous
if value == 1:
return wmb.WholeMemoryMemoryType.MtChunked
else:
raise ValueError("invalid int_to_wholememory_type value")
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/utils/multiprocess.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
def multiprocess_run(world_size: int, func, inline_single_process=False):
"""
Run func in multiple process
:param world_size: process count
:param func: function to run
:param inline_single_process: when only one process, whether to use current process to run.
:return: None
"""
assert world_size > 0
if world_size == 1 and inline_single_process:
func(0, 1)
return
spawn_context = mp.get_context("spawn")
process_array = [None] * world_size
for i in range(world_size):
process_array[i] = spawn_context.Process(target=func, args=(i, world_size))
process_array[i].start()
for i in range(world_size):
process_array[i].join()
for i in range(world_size):
assert process_array[i].exitcode == 0
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/pylibwholegraph/test_wholememory_binding.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
from pylibwholegraph.torch.dlpack_utils import torch_import_from_dlpack
import torch
# Run with:
# python3 -m pytest ../tests/pylibwholegraph/test_wholememory_binding.py -s
def single_test_case(wm_comm, mt, ml, malloc_size, granularity):
world_rank = wm_comm.get_rank()
print("Rank=%d testing mt=%s, ml=%s" % (world_rank, mt, ml))
h = wmb.malloc(malloc_size, wm_comm, mt, ml, granularity)
global_tensor = None
chunked_tensors = None
view_device = wmb.WholeMemoryMemoryLocation.MlDevice
view_device_id = world_rank
tensor_data_type = wmb.WholeMemoryDataType.DtInt64
elt_size = 8
local_tensor, local_offset = h.get_local_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
local_data_torch = torch.arange(
local_offset, local_offset + local_tensor.shape[0], dtype=torch.int64
)
local_tensor.copy_(local_data_torch)
local_view_tensor, _ = h.get_local_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
assert torch.equal(local_view_tensor.cpu(), local_data_torch)
del local_data_torch, local_view_tensor
wm_comm.barrier()
if mt == wmb.WholeMemoryMemoryType.MtDistributed or (
mt == wmb.WholeMemoryMemoryType.MtChunked
and ml == wmb.WholeMemoryMemoryLocation.MlDevice
):
with pytest.raises(ValueError):
global_tensor, _ = h.get_global_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
else:
global_tensor, _ = h.get_global_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
global_data_torch = torch.arange(0, malloc_size // elt_size, dtype=torch.int64)
assert torch.equal(global_tensor.cpu(), global_data_torch)
del global_data_torch
if mt == wmb.WholeMemoryMemoryType.MtDistributed:
with pytest.raises(ValueError):
chunked_tensors, _ = h.get_all_chunked_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
else:
chunked_tensors, _ = h.get_all_chunked_flatten_tensor(
torch_import_from_dlpack, tensor_data_type, view_device, view_device_id
)
remote_offset = 0
for i in range(len(chunked_tensors)):
remote_data_torch = torch.arange(
remote_offset,
remote_offset + chunked_tensors[i].shape[0],
dtype=torch.int64,
)
assert torch.equal(chunked_tensors[i].cpu(), remote_data_torch)
remote_offset += chunked_tensors[i].shape[0]
del remote_data_torch
wmb.free(h)
def routine_func(world_rank: int, world_size: int):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
single_rank_size = 1024 * 1024 * 1024
malloc_size = single_rank_size * world_size
granularity = 256
print("")
for mt in [
wmb.WholeMemoryMemoryType.MtContinuous,
wmb.WholeMemoryMemoryType.MtChunked,
wmb.WholeMemoryMemoryType.MtDistributed,
]:
for ml in [
wmb.WholeMemoryMemoryLocation.MlHost,
wmb.WholeMemoryMemoryLocation.MlDevice,
]:
if wm_comm.support_type_location(mt, ml):
single_test_case(wm_comm, mt, ml, malloc_size, granularity)
wmb.finalize()
def test_dlpack():
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
multiprocess_run(gpu_count, routine_func)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/pylibwholegraph/test_wholememory_io.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
from pylibwholegraph.torch.dlpack_utils import torch_import_from_dlpack
import torch
import numpy as np
import os
import random
from functools import partial
gpu_count = None
@pytest.fixture(scope="module", autouse=True)
def module_level_setup_teardown():
skip_env_flag = os.getenv("TEST_WM_LOAD_STORE")
skip_test = True
if skip_env_flag is not None:
skip_env_flag = skip_env_flag.lower()
if skip_env_flag == "1" or skip_env_flag == "true" or skip_env_flag == "on":
skip_test = False
if skip_test:
pytest.skip("Skipping load store test due to TEST_WM_LOAD_STORE not set...")
global gpu_count
assert gpu_count is None
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
yield
gpu_count = None
def load_routine_func(
world_rank: int,
world_size: int,
cpu_embedding_tensor_base,
file_name_prefix,
file_part_count,
embedding_entry_count,
embedding_dim,
embedding_stride,
storage_offset,
):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
data_type = wmb.WholeMemoryDataType.DtInt
file_list = [None] * file_part_count
per_rank_entry = wmb.determine_partition_plan(embedding_entry_count, world_size)
rank_start_entry = min(per_rank_entry * world_rank, embedding_entry_count)
rank_end_entry = min(per_rank_entry * (world_rank + 1), embedding_entry_count)
rank_entry_count = rank_end_entry - rank_start_entry
reference_local_tensor = cpu_embedding_tensor_base[
rank_start_entry:rank_end_entry, :
].cuda()
for i in range(file_part_count):
file_list[i] = "%s_part_%d_of_%d" % (file_name_prefix, i, file_part_count)
for mt in [
wmb.WholeMemoryMemoryType.MtContinuous,
wmb.WholeMemoryMemoryType.MtChunked,
wmb.WholeMemoryMemoryType.MtDistributed,
]:
for ml in [
wmb.WholeMemoryMemoryLocation.MlHost,
wmb.WholeMemoryMemoryLocation.MlDevice,
]:
if not wm_comm.support_type_location(mt, ml):
continue
wholememory_root_tensor = wmb.create_wholememory_matrix(
data_type,
embedding_entry_count,
embedding_dim + storage_offset,
embedding_stride,
wm_comm,
mt,
ml,
)
wholememory_tensor = wholememory_root_tensor.get_sub_tensor(
[-1, storage_offset], [-1, -1]
)
wholememory_tensor.from_filelist(file_list)
local_tensor, local_offset = wholememory_tensor.get_local_tensor(
torch_import_from_dlpack,
wmb.WholeMemoryMemoryLocation.MlDevice,
world_rank,
)
assert local_tensor.dim() == 2
assert local_tensor.shape[0] == rank_entry_count
assert local_tensor.shape[1] == embedding_dim
assert torch.equal(local_tensor, reference_local_tensor)
del wholememory_tensor
wmb.destroy_wholememory_tensor(wholememory_root_tensor)
wmb.finalize()
@pytest.mark.parametrize("file_part_count", [3, 5])
@pytest.mark.parametrize(
"embedding_entry_count", [1024 * 1024 * 4 + 131, 1024 * 1024 * 6 - 127]
)
@pytest.mark.parametrize("embedding_dim", [16, 31, 33])
@pytest.mark.parametrize("embedding_stride", [16, 32, 64])
@pytest.mark.parametrize("storage_offset", [0, 3])
def test_wholememory_load(
file_part_count,
embedding_entry_count,
embedding_dim,
embedding_stride,
storage_offset,
):
if embedding_stride < storage_offset + embedding_dim:
pytest.skip(
"Skipping due to embedding_stride, embedding_dim and storage_offset configuration not valid."
)
cpu_embedding_tensor_base = torch.randint(
-1000000000,
1000000000,
(embedding_entry_count, embedding_dim),
dtype=torch.int,
device="cpu",
)
indices = sorted(
random.sample(range(1, embedding_entry_count), file_part_count - 1)
)
indices.append(embedding_entry_count)
counts = [0] * file_part_count
for i in range(file_part_count):
counts[i] = indices[i] if i == 0 else indices[i] - indices[i - 1]
splited_tensors = torch.split(cpu_embedding_tensor_base, counts, dim=0)
file_name_prefix = "pytest_load_temp_file"
for i in range(file_part_count):
splited_tensors[i].numpy().tofile(
"%s_part_%d_of_%d" % (file_name_prefix, i, file_part_count)
)
cpu_embedding_tensor_base = cpu_embedding_tensor_base.share_memory_()
load_routine_func_partial = partial(
load_routine_func,
cpu_embedding_tensor_base=cpu_embedding_tensor_base,
file_name_prefix=file_name_prefix,
file_part_count=file_part_count,
embedding_entry_count=embedding_entry_count,
embedding_dim=embedding_dim,
embedding_stride=embedding_stride,
storage_offset=storage_offset,
)
global gpu_count
multiprocess_run(gpu_count, load_routine_func_partial)
for i in range(file_part_count):
filename = "%s_part_%d_of_%d" % (file_name_prefix, i, file_part_count)
assert os.path.isfile(filename)
os.remove(filename)
def store_routine_func(
world_rank: int,
world_size: int,
file_name_prefix,
embedding_entry_count,
embedding_dim,
embedding_stride,
storage_offset,
):
(wm_comm, _) = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
data_type = wmb.WholeMemoryDataType.DtInt
mt = wmb.WholeMemoryMemoryType.MtContinuous
ml = wmb.WholeMemoryMemoryLocation.MlHost
filename = "%s_part_%d_of_%d" % (file_name_prefix, world_rank, world_size)
wholememory_root_tensor = wmb.create_wholememory_matrix(
data_type,
embedding_entry_count,
embedding_stride,
embedding_stride,
wm_comm,
mt,
ml,
)
local_root_tensor, local_root_offset = wholememory_root_tensor.get_local_tensor(
torch_import_from_dlpack, wmb.WholeMemoryMemoryLocation.MlHost, world_rank
)
root_data_tensor = torch.IntTensor(
range(
embedding_stride * local_root_offset,
embedding_stride * (local_root_offset + local_root_tensor.shape[0]),
)
).reshape((-1, embedding_stride))
local_root_tensor.copy_(root_data_tensor)
wholememory_tensor = wholememory_root_tensor.get_sub_tensor(
[-1, storage_offset], [-1, storage_offset + embedding_dim]
)
wholememory_tensor.to_file(filename)
wmb.finalize()
@pytest.mark.parametrize(
"embedding_entry_count", [1024 * 1024 * 4 + 131, 1024 * 1024 * 6 - 127]
)
@pytest.mark.parametrize("embedding_dim", [16, 31, 33])
@pytest.mark.parametrize("embedding_stride", [16, 32, 64])
@pytest.mark.parametrize("storage_offset", [0, 3])
def test_wholememory_store(
embedding_entry_count, embedding_dim, embedding_stride, storage_offset
):
if embedding_stride < storage_offset + embedding_dim:
pytest.skip(
"Skipping due to embedding_stride, embedding_dim and storage_offset configuration not valid."
)
file_name_prefix = "pytest_store_temp_file"
store_routine_func_partial = partial(
store_routine_func,
file_name_prefix=file_name_prefix,
embedding_entry_count=embedding_entry_count,
embedding_dim=embedding_dim,
embedding_stride=embedding_stride,
storage_offset=storage_offset,
)
global gpu_count
multiprocess_run(gpu_count, store_routine_func_partial)
embedding_entry_offset = 0
file_part_count = gpu_count
for i in range(file_part_count):
filename = "%s_part_%d_of_%d" % (file_name_prefix, i, file_part_count)
assert os.path.isfile(filename)
filesize = os.path.getsize(filename)
assert filesize % (embedding_dim * 4) == 0
file_entry_count = filesize // (embedding_dim * 4)
loaded_np_array = np.fromfile(filename, dtype=np.int32)
loaded_torch_tensor = torch.from_numpy(loaded_np_array).reshape(
(-1, embedding_dim)
)
reference_tensor = torch.IntTensor(
range(
embedding_stride * embedding_entry_offset,
embedding_stride * (embedding_entry_offset + file_entry_count),
)
).reshape((-1, embedding_stride))
reference_tensor = reference_tensor[
:, storage_offset : storage_offset + embedding_dim
]
assert torch.equal(loaded_torch_tensor, reference_tensor)
embedding_entry_offset += file_entry_count
os.remove(filename)
assert embedding_entry_offset == embedding_entry_count
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/pylibwholegraph/test_wholememory_tensor.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
# Run with:
# python3 -m pytest ../tests/pylibwholegraph/test_wholememory_tensor.py -s
def array_test_case(wm_comm, dt, mt, ml, size):
world_rank = wm_comm.get_rank()
print(
"Rank=%d testing array size=%d dt=%s, mt=%s, ml=%s"
% (world_rank, size, dt, mt, ml)
)
wm_array = wmb.create_wholememory_array(dt, size, wm_comm, mt, ml)
assert wm_array.dtype == dt
assert wm_array.dim() == 1
assert len(wm_array.shape) == 1
assert wm_array.shape[0] == size
assert len(wm_array.stride()) == 1
assert wm_array.stride()[0] == 1
assert wm_array.storage_offset() == 0
wm_sub_array = wm_array.get_sub_tensor([size // 4], [-1])
assert wm_sub_array.dtype == dt
assert wm_sub_array.dim() == 1
assert wm_sub_array.shape[0] == size - size // 4
assert wm_sub_array.stride()[0] == 1
assert wm_sub_array.storage_offset() == size // 4
wmb.destroy_wholememory_tensor(wm_sub_array)
wmb.destroy_wholememory_tensor(wm_array)
def matrix_test_case(wm_comm, dt, mt, ml, mat_size):
world_rank = wm_comm.get_rank()
print(
"Rank=%d testing matrix size=%s dt=%s, mt=%s, ml=%s"
% (world_rank, mat_size, dt, mt, ml)
)
wm_matrix = wmb.create_wholememory_matrix(
dt, mat_size[0], mat_size[1], -1, wm_comm, mt, ml
)
assert wm_matrix.dtype == dt
assert wm_matrix.dim() == 2
assert len(wm_matrix.shape) == 2
assert wm_matrix.shape[0] == mat_size[0]
assert wm_matrix.shape[1] == mat_size[1]
assert len(wm_matrix.stride()) == 2
assert wm_matrix.stride()[0] == mat_size[1]
assert wm_matrix.stride()[1] == 1
wm_sub_matrix = wm_matrix.get_sub_tensor(
[mat_size[0] // 3, mat_size[1] // 5], [-1, mat_size[1] // 5 * 3]
)
assert wm_sub_matrix.dtype == dt
assert wm_sub_matrix.dim() == 2
assert wm_sub_matrix.shape[0] == mat_size[0] - mat_size[0] // 3
assert wm_sub_matrix.shape[1] == mat_size[1] // 5 * 3 - mat_size[1] // 5
assert wm_sub_matrix.stride()[0] == mat_size[1]
assert wm_sub_matrix.stride()[1] == 1
assert (
wm_sub_matrix.storage_offset()
== mat_size[1] // 5 + mat_size[0] // 3 * mat_size[1]
)
wmb.destroy_wholememory_tensor(wm_sub_matrix)
wmb.destroy_wholememory_tensor(wm_matrix)
def routine_func(world_rank: int, world_size: int):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
single_array_size = 128 * 1024 * 1024 * world_size
single_matrix_size = (1024 * 1024 * world_size, 128)
dt = wmb.WholeMemoryDataType.DtFloat
print("")
for mt in [
wmb.WholeMemoryMemoryType.MtContinuous,
wmb.WholeMemoryMemoryType.MtChunked,
wmb.WholeMemoryMemoryType.MtDistributed,
]:
for ml in [
wmb.WholeMemoryMemoryLocation.MlHost,
wmb.WholeMemoryMemoryLocation.MlDevice,
]:
if wm_comm.support_type_location(mt, ml):
array_test_case(wm_comm, dt, mt, ml, single_array_size)
matrix_test_case(wm_comm, dt, mt, ml, single_matrix_size)
wmb.finalize()
def test_wholememory_tensor():
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
multiprocess_run(gpu_count, routine_func)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_wholememory_cython_binding.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pylibwholegraph.binding.wholememory_binding as wmb
import torch
from pylibwholegraph.torch.wholegraph_env import (
get_stream,
get_wholegraph_env_fns,
wrap_torch_tensor,
TorchMemoryContext,
)
import time
def test_smoke():
torch.cuda.set_device(0)
output_len = 128
embed_dim = 10
input_tensor = torch.ones((embed_dim,), device="cuda")
indice_tensor = torch.arange(output_len, device="cuda")
ref_tensor = input_tensor.expand((output_len, embed_dim)) + indice_tensor.reshape(
(output_len, 1)
).expand((output_len, embed_dim))
output_tensor = torch.empty((output_len, embed_dim), device="cuda")
assert wmb.py_get_wholememory_tensor_count() == 0
output_device_context = TorchMemoryContext()
output_pinned_context = TorchMemoryContext()
output_host_context = TorchMemoryContext()
wrapped_input = wrap_torch_tensor(input_tensor)
wrapped_output = wrap_torch_tensor(output_tensor)
assert wmb.py_get_wholememory_tensor_count() > 0
env_func_int_ptr = get_wholegraph_env_fns()
stream_int_ptr = get_stream()
wmb.wholememory_env_test_cython_op(
wrapped_input,
wrapped_output,
output_device_context.get_c_context(),
output_pinned_context.get_c_context(),
output_host_context.get_c_context(),
output_len,
env_func_int_ptr,
stream_int_ptr,
)
torch.cuda.synchronize()
assert torch.allclose(ref_tensor, output_device_context.get_tensor().cuda())
assert torch.allclose(ref_tensor, output_pinned_context.get_tensor().cuda())
assert torch.allclose(ref_tensor, output_host_context.get_tensor().cuda())
del wrapped_input, wrapped_output
assert wmb.py_get_wholememory_tensor_count() == 0
def test_loop_memory():
torch.cuda.set_device(0)
embedding_dim = 1
output_len = 1
input_tensor = torch.ones((embedding_dim,), device="cuda")
output_tensor = torch.empty((output_len, embedding_dim), device="cuda")
env_func_int_ptr = get_wholegraph_env_fns()
stream_int_ptr = get_stream()
output_device_context = TorchMemoryContext()
output_pinned_context = TorchMemoryContext()
output_host_context = TorchMemoryContext()
wrapped_input = wrap_torch_tensor(input_tensor)
wrapped_output = wrap_torch_tensor(output_tensor)
wmb.wholememory_env_test_cython_op(
wrapped_input,
wrapped_output,
output_device_context.get_c_context(),
output_pinned_context.get_c_context(),
output_host_context.get_c_context(),
output_len,
env_func_int_ptr,
stream_int_ptr,
)
del wrapped_input, wrapped_output
torch.cuda.synchronize()
start_time = time.time()
for i in range(100000):
output_device_context = TorchMemoryContext()
output_pinned_context = TorchMemoryContext()
output_host_context = TorchMemoryContext()
wrapped_input = wrap_torch_tensor(input_tensor)
wrapped_output = wrap_torch_tensor(output_tensor)
wmb.wholememory_env_test_cython_op(
wrapped_input,
wrapped_output,
output_device_context.get_c_context(),
0,
0,
output_len,
env_func_int_ptr,
stream_int_ptr,
)
del wrapped_input, wrapped_output
torch.cuda.synchronize()
end_time = time.time()
assert wmb.py_get_wholememory_tensor_count() == 0
print("total_time=%f" % (end_time - start_time,))
@pytest.mark.parametrize("output_len", list(range(1, 100, 17)))
@pytest.mark.parametrize("embed_dim", list(range(1, 128, 23)))
def test_random_alloc(output_len, embed_dim):
torch.cuda.set_device(0)
input_tensor = torch.rand((embed_dim,), device="cuda")
indice_tensor = torch.arange(output_len, device="cuda")
ref_tensor = input_tensor.expand((output_len, embed_dim)) + indice_tensor.reshape(
(output_len, 1)
).expand((output_len, embed_dim))
output_tensor = torch.empty((output_len, embed_dim), device="cuda")
output_device_context = TorchMemoryContext()
output_pinned_context = TorchMemoryContext()
output_host_context = TorchMemoryContext()
wrapped_input = wrap_torch_tensor(input_tensor)
wrapped_output = wrap_torch_tensor(output_tensor)
env_func_int_ptr = get_wholegraph_env_fns()
stream_int_ptr = get_stream()
wmb.wholememory_env_test_cython_op(
wrapped_input,
wrapped_output,
output_device_context.get_c_context(),
output_pinned_context.get_c_context(),
output_host_context.get_c_context(),
output_len,
env_func_int_ptr,
stream_int_ptr,
)
torch.cuda.synchronize()
assert torch.allclose(ref_tensor, output_device_context.get_tensor().cuda())
assert torch.allclose(ref_tensor, output_pinned_context.get_tensor().cuda())
assert torch.allclose(ref_tensor, output_host_context.get_tensor().cuda())
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_graph_add_csr_self_loop.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from pylibwholegraph.test_utils.test_comm import gen_csr_graph
import pylibwholegraph.torch.graph_ops as wg_ops
def host_add_csr_self_loop(csr_row_ptr_tensor, csr_col_ptr_tensor):
row_num = csr_row_ptr_tensor.shape[0] - 1
edge_num = csr_col_ptr_tensor.shape[0]
output_csr_row_ptr_tensor = torch.empty(
(csr_row_ptr_tensor.shape[0],), dtype=csr_row_ptr_tensor.dtype
)
output_csr_col_ptr_tensor = torch.empty(
(edge_num + row_num,), dtype=csr_col_ptr_tensor.dtype
)
for row_id in range(row_num):
start = csr_row_ptr_tensor[row_id]
end = csr_row_ptr_tensor[row_id + 1]
output_csr_row_ptr_tensor[row_id] = start + row_id
output_csr_col_ptr_tensor[start + row_id] = row_id
for j in range(start, end):
output_csr_col_ptr_tensor[j + row_id + 1] = csr_col_ptr_tensor[j]
output_csr_row_ptr_tensor[row_num] = csr_row_ptr_tensor[row_num] + row_num
return output_csr_row_ptr_tensor, output_csr_col_ptr_tensor
def routine_func(**kwargs):
target_node_count = kwargs["target_node_count"]
neighbor_node_count = kwargs["neighbor_node_count"]
edge_num = kwargs["edge_num"]
assert neighbor_node_count >= target_node_count
csr_row_ptr_tensor, csr_col_ptr_tensor, _ = gen_csr_graph(
target_node_count,
edge_num,
neighbor_node_count,
csr_row_dtype=torch.int32,
csr_col_dtype=torch.int32,
)
csr_row_ptr_tensor_cuda = csr_row_ptr_tensor.cuda()
csr_col_ptr_tensor_cuda = csr_col_ptr_tensor.cuda()
(
output_csr_row_ptr_tensor_cuda,
output_csr_col_ptr_tensor_cuda,
) = wg_ops.add_csr_self_loop(csr_row_ptr_tensor_cuda, csr_col_ptr_tensor_cuda)
output_csr_row_ptr_tensor = output_csr_row_ptr_tensor_cuda.cpu()
output_csr_col_ptr_tensor = output_csr_col_ptr_tensor_cuda.cpu()
(
output_csr_row_ptr_tensor_ref,
output_csr_col_ptr_tensor_ref,
) = host_add_csr_self_loop(csr_row_ptr_tensor, csr_col_ptr_tensor)
assert torch.equal(output_csr_row_ptr_tensor, output_csr_row_ptr_tensor_ref)
assert torch.equal(output_csr_col_ptr_tensor, output_csr_col_ptr_tensor_ref)
@pytest.mark.parametrize("target_node_count", [101, 113])
@pytest.mark.parametrize("neighbor_node_count", [157, 1987])
@pytest.mark.parametrize("edge_num", [1001, 2305])
def test_add_csr_self_loop(target_node_count, neighbor_node_count, edge_num):
gpu_count = torch.cuda.device_count()
assert gpu_count > 0
routine_func(
target_node_count=target_node_count,
neighbor_node_count=neighbor_node_count,
edge_num=edge_num,
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_wholegraph_unweighted_sample_without_replacement.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
import torch
from functools import partial
from pylibwholegraph.test_utils.test_comm import (
gen_csr_graph,
copy_host_1D_tensor_to_wholememory,
host_get_sample_offset_tensor,
host_sample_all_neighbors,
int_to_wholememory_datatype,
int_to_wholememory_location,
int_to_wholememory_type,
)
import pylibwholegraph.torch.wholegraph_ops as wg_ops
import random
def unweighte_sample_without_replacement_base(random_values, M, N):
a = torch.empty((M,), dtype=torch.int32)
Q = torch.arange(N, dtype=torch.int32)
for i in range(M):
a[i] = Q[random_values[i]]
Q[random_values[i]] = Q[N - i - 1]
return a
def host_unweighted_sample_without_replacement_func(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
total_sample_count,
max_sample_count,
random_seed,
):
output_dest_tensor = torch.empty((total_sample_count,), dtype=col_id_dtype)
output_center_localid_tensor = torch.empty((total_sample_count,), dtype=torch.int32)
output_edge_gid_tensor = torch.empty((total_sample_count,), dtype=torch.int64)
center_nodes_count = center_nodes.size(0)
M = max_sample_count
warp_count = [
1,
1,
1,
2,
2,
2,
4,
4,
4,
4,
4,
4,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
]
total_items_per_thread = [
1,
2,
3,
2,
3,
3,
2,
2,
3,
3,
3,
3,
2,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
]
func_idx = int((max_sample_count - 1) / 32)
block_threads = warp_count[func_idx] * 32
items_per_thread = total_items_per_thread[func_idx]
for i in range(center_nodes_count):
node_id = center_nodes[i]
start = host_csr_row_ptr[node_id]
end = host_csr_row_ptr[node_id + 1]
neighbor_count = end - start
N = neighbor_count
output_id = output_sample_offset_tensor[i]
gidx = i * block_threads
if neighbor_count <= max_sample_count:
for j in range(end - start):
output_dest_tensor[output_id + j] = host_csr_col_ptr[start + j]
output_center_localid_tensor[output_id + j] = i
output_edge_gid_tensor[output_id + j] = start + j
else:
random_values = torch.empty((N,), dtype=torch.int32)
for j in range(block_threads):
local_gidx = gidx + j
random_nums = wg_ops.generate_random_positive_int_cpu(
random_seed, local_gidx, items_per_thread
)
for k in range(items_per_thread):
id = k * block_threads + j
if id < neighbor_count:
if id < M:
random_values[id] = random_nums[k] % (N - id)
else:
random_values[id] = N
random_sample_ids = unweighte_sample_without_replacement_base(
random_values, M, N
)
for sample_id in range(M):
output_dest_tensor[output_id + sample_id] = host_csr_col_ptr[
start + random_sample_ids[sample_id]
]
output_center_localid_tensor[output_id + sample_id] = i
output_edge_gid_tensor[output_id + sample_id] = (
start + random_sample_ids[sample_id]
)
return output_dest_tensor, output_center_localid_tensor, output_edge_gid_tensor
def host_unweighted_sample_without_replacement(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
max_sample_count,
col_id_dtype,
random_seed,
):
center_nodes_count = center_nodes.size(0)
output_sample_offset_tensor = host_get_sample_offset_tensor(
host_csr_row_ptr, center_nodes, max_sample_count
)
total_sample_count = output_sample_offset_tensor[center_nodes_count]
if max_sample_count <= 0:
return host_sample_all_neighbors(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
total_sample_count,
)
if max_sample_count > 1024:
raise ValueError(
"invalid host_unweighted_sample_without_replacement test max_sample_count"
)
(
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
) = host_unweighted_sample_without_replacement_func(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
total_sample_count,
max_sample_count,
random_seed,
)
return (
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
)
def routine_func(world_rank: int, world_size: int, **kwargs):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
host_csr_row_ptr = kwargs["host_csr_row_ptr"]
host_csr_col_ptr = kwargs["host_csr_col_ptr"]
graph_node_count = kwargs["graph_node_count"]
graph_edge_count = kwargs["graph_edge_count"]
max_sample_count = kwargs["max_sample_count"]
center_node_count = kwargs["center_node_count"]
center_node_dtype = kwargs["center_node_dtype"]
int_col_id_dtype = kwargs["col_id_dtype"]
int_wholememory_location = kwargs["wholememory_location"]
int_wholememory_type = kwargs["wholememory_type"]
need_center_local_output = kwargs["need_center_local_output"]
need_edge_output = kwargs["need_edge_output"]
world_rank = wm_comm.get_rank()
world_size = wm_comm.get_size()
col_id_dtype = int_to_wholememory_datatype(int_col_id_dtype)
wholememory_location = int_to_wholememory_location(int_wholememory_location)
wholememory_type = int_to_wholememory_type(int_wholememory_type)
if not wm_comm.support_type_location(wholememory_type, wholememory_location):
wmb.finalize()
return
wm_csr_row_ptr = wmb.create_wholememory_array(
wmb.WholeMemoryDataType.DtInt64,
graph_node_count + 1,
wm_comm,
wholememory_type,
wholememory_location,
)
wm_csr_col_ptr = wmb.create_wholememory_array(
col_id_dtype, graph_edge_count, wm_comm, wholememory_type, wholememory_location
)
copy_host_1D_tensor_to_wholememory(
wm_csr_row_ptr, host_csr_row_ptr, world_rank, world_size, wm_comm
)
copy_host_1D_tensor_to_wholememory(
wm_csr_col_ptr, host_csr_col_ptr, world_rank, world_size, wm_comm
)
wm_comm.barrier()
center_node_tensor = torch.randint(
0, graph_node_count, (center_node_count,), dtype=center_node_dtype
)
center_node_tensor_cuda = center_node_tensor.cuda()
random_seed = random.randint(1, 10000)
# output_sample_offset_tensor_cuda,
# output_dest_tensor_cuda,
# output_center_localid_tensor_cuda,
# output_edge_gid_tensor_cuda =
# torch.ops.wholegraph.unweighted_sample_without_replacement(wm_csr_row_ptr.get_c_handle(),
# wm_csr_col_ptr.get_c_handle(),
# center_node_tensor_cuda,
# max_sample_count,
# random_seed)
output_sample_offset_tensor = None
output_dest_tensor = None
output_center_localid_tensor = None
output_edge_gid_tensor = None
output_tensors = wg_ops.unweighted_sample_without_replacement(
wm_csr_row_ptr,
wm_csr_col_ptr,
center_node_tensor_cuda,
max_sample_count,
random_seed,
need_center_local_output=need_center_local_output,
need_edge_output=need_edge_output,
)
output_cpu_tensors = tuple(tensor.cpu() for tensor in output_tensors)
torch_col_id_dtype = torch.int32
if col_id_dtype == wmb.WholeMemoryDataType.DtInt64:
torch_col_id_dtype = torch.int64
(
output_sample_offset_tensor_ref,
output_dest_tensor_ref,
output_center_localid_tensor_ref,
output_edge_gid_tensor_ref,
) = host_unweighted_sample_without_replacement(
host_csr_row_ptr,
host_csr_col_ptr,
center_node_tensor,
max_sample_count,
torch_col_id_dtype,
random_seed,
)
if need_edge_output and need_center_local_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
) = output_cpu_tensors
assert torch.equal(output_sample_offset_tensor, output_sample_offset_tensor_ref)
assert torch.equal(output_dest_tensor, output_dest_tensor_ref)
assert torch.equal(
output_center_localid_tensor, output_center_localid_tensor_ref
)
assert torch.equal(output_edge_gid_tensor, output_edge_gid_tensor_ref)
elif need_center_local_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
) = output_cpu_tensors
assert torch.equal(output_sample_offset_tensor, output_sample_offset_tensor_ref)
assert torch.equal(output_dest_tensor, output_dest_tensor_ref)
assert torch.equal(
output_center_localid_tensor, output_center_localid_tensor_ref
)
elif need_edge_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_edge_gid_tensor,
) = output_cpu_tensors
assert torch.equal(output_sample_offset_tensor, output_sample_offset_tensor_ref)
assert torch.equal(output_dest_tensor, output_dest_tensor_ref)
assert torch.equal(output_edge_gid_tensor, output_edge_gid_tensor_ref)
else:
output_sample_offset_tensor, output_dest_tensor = output_cpu_tensors
assert torch.equal(output_sample_offset_tensor, output_sample_offset_tensor_ref)
assert torch.equal(output_dest_tensor, output_dest_tensor_ref)
wmb.destroy_wholememory_tensor(wm_csr_row_ptr)
wmb.destroy_wholememory_tensor(wm_csr_col_ptr)
wmb.finalize()
@pytest.mark.parametrize("graph_node_count", [103])
@pytest.mark.parametrize("graph_edge_count", [1043])
@pytest.mark.parametrize("max_sample_count", [11])
@pytest.mark.parametrize("center_node_count", [13])
@pytest.mark.parametrize("center_node_dtype", [torch.int32, torch.int64])
@pytest.mark.parametrize("col_id_dtype", [0, 1])
@pytest.mark.parametrize("wholememory_location", ([0, 1]))
@pytest.mark.parametrize("wholememory_type", ([0, 1]))
@pytest.mark.parametrize("need_center_local_output", [True, False])
@pytest.mark.parametrize("need_edge_output", [True, False])
def test_wholegraph_unweighted_sample(
graph_node_count,
graph_edge_count,
max_sample_count,
center_node_count,
center_node_dtype,
col_id_dtype,
wholememory_location,
wholememory_type,
need_center_local_output,
need_edge_output,
):
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
csr_col_dtype = torch.int32
if col_id_dtype == wmb.WholeMemoryDataType.DtInt64:
csr_col_dtype = torch.int64
host_csr_row_ptr, host_csr_col_ptr, _ = gen_csr_graph(
graph_node_count, graph_edge_count, csr_col_dtype=csr_col_dtype
)
routine_func_partial = partial(
routine_func,
host_csr_row_ptr=host_csr_row_ptr,
host_csr_col_ptr=host_csr_col_ptr,
graph_node_count=graph_node_count,
graph_edge_count=graph_edge_count,
max_sample_count=max_sample_count,
center_node_count=center_node_count,
center_node_dtype=center_node_dtype,
col_id_dtype=col_id_dtype,
wholememory_location=wholememory_location,
wholememory_type=wholememory_type,
need_center_local_output=need_center_local_output,
need_edge_output=need_edge_output,
)
multiprocess_run(gpu_count, routine_func_partial, True)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_graph_append_unique.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import pylibwholegraph.torch.graph_ops as wg_ops
def host_neighbor_raw_to_unique(unique_node_tensor, neighbor_node_tensor):
output_neighbor_raw_to_unique = torch.empty(
(neighbor_node_tensor.size(0)), dtype=torch.int32
)
for i in range(neighbor_node_tensor.size(0)):
neighbor_id = neighbor_node_tensor[i]
output_neighbor_raw_to_unique[i] = torch.nonzero(
unique_node_tensor == neighbor_id
).item()
return output_neighbor_raw_to_unique
def routine_func(**kwargs):
target_node_count = kwargs["target_node_count"]
neighbor_node_count = kwargs["neighbor_node_count"]
target_node_dtype = kwargs["target_node_dtype"]
need_neighbor_raw_to_unique = kwargs["need_neighbor_raw_to_unique"]
target_node_tensor = torch.randperm(neighbor_node_count, dtype=target_node_dtype)[
:target_node_count
]
neighbor_node_tensor = torch.randint(
0, neighbor_node_count, (neighbor_node_count,), dtype=target_node_dtype
)
target_node_tensor_cuda = target_node_tensor.cuda()
neighbor_node_tensor_cuda = neighbor_node_tensor.cuda()
output_unique_node_tensor_cuda = None
output_neighbor_raw_to_unique_mapping_tensor_cuda = None
if need_neighbor_raw_to_unique:
(
output_unique_node_tensor_cuda,
output_neighbor_raw_to_unique_mapping_tensor_cuda,
) = wg_ops.append_unique(
target_node_tensor_cuda,
neighbor_node_tensor_cuda,
need_neighbor_raw_to_unique=need_neighbor_raw_to_unique,
)
else:
output_unique_node_tensor_cuda = wg_ops.append_unique(
target_node_tensor_cuda,
neighbor_node_tensor_cuda,
need_neighbor_raw_to_unique=need_neighbor_raw_to_unique,
)
output_unique_node_tensor = output_unique_node_tensor_cuda.cpu()
output_unique_node_tensor_ref = torch.unique(
torch.cat((target_node_tensor, neighbor_node_tensor), 0), sorted=True
)
output_unique_node_tensor_sorted, _ = torch.sort(output_unique_node_tensor)
assert torch.equal(output_unique_node_tensor_sorted, output_unique_node_tensor_ref)
if need_neighbor_raw_to_unique:
output_neighbor_raw_to_unique_mapping_tensor = (
output_neighbor_raw_to_unique_mapping_tensor_cuda.cpu()
)
output_neighbor_raw_to_unique_mapping_tensor_ref = host_neighbor_raw_to_unique(
output_unique_node_tensor, neighbor_node_tensor
)
assert torch.equal(
output_neighbor_raw_to_unique_mapping_tensor,
output_neighbor_raw_to_unique_mapping_tensor_ref,
)
@pytest.mark.parametrize("target_node_count", [10, 113])
@pytest.mark.parametrize("neighbor_node_count", [104, 1987])
@pytest.mark.parametrize("target_node_dtype", [torch.int32, torch.int64])
@pytest.mark.parametrize("need_neighbor_raw_to_unique", [True, False])
def test_append_unique(
target_node_count,
neighbor_node_count,
target_node_dtype,
need_neighbor_raw_to_unique,
):
gpu_count = torch.cuda.device_count()
assert gpu_count > 0
routine_func(
target_node_count=target_node_count,
neighbor_node_count=neighbor_node_count,
target_node_dtype=target_node_dtype,
need_neighbor_raw_to_unique=need_neighbor_raw_to_unique,
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_wholegraph_gather_scatter.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibwholegraph.binding.wholememory_binding as wmb
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
from pylibwholegraph.torch.dlpack_utils import torch_import_from_dlpack
import torch
import pylibwholegraph.torch.wholememory_ops as wm_ops
# PYTHONPATH=../:$PYTHONPATH python3 -m pytest ../tests/wholegraph_torch/ops/test_wholegraph_gather_scatter.py -s
def gen_int_embedding(indice_tensor, embedding_dim, output_type):
indice_count = indice_tensor.shape[0]
indice_part = (
indice_tensor.type(torch.int).reshape(indice_count, 1).repeat(1, embedding_dim)
)
embedding_part = (
torch.arange(0, embedding_dim, 1, dtype=torch.int)
.reshape(1, embedding_dim)
.repeat(indice_count, 1)
)
output = indice_part + embedding_part
return output.type(output_type)
def scatter_gather_test_cast(
wm_comm,
dt,
mt,
ml,
embedding_count,
embedding_dim,
indice_count,
use_python_binding=True,
):
world_rank = wm_comm.get_rank()
world_size = wm_comm.get_size()
print(
"Rank=%d testing scatter gather with embedding_count=%d, embedding_dim=%d, indice_count=%d, dt=%s, mt=%s, ml=%s"
% (world_rank, embedding_count, embedding_dim, indice_count, dt, mt, ml)
)
wm_embedding = wmb.create_wholememory_matrix(
dt, embedding_count, embedding_dim, -1, wm_comm, mt, ml
)
scatter_indice = torch.arange(
world_rank, embedding_count, world_size, dtype=torch.int64
)
embedding_to_scatter = gen_int_embedding(scatter_indice, embedding_dim, torch.float)
# print('\nscatter_indice=%s\nembedding_to_scatter=%s' % (scatter_indice, embedding_to_scatter))
scatter_indice_cuda = scatter_indice.cuda()
embedding_to_scatter_cuda = embedding_to_scatter.cuda()
if use_python_binding:
wm_ops.wholememory_scatter_functor(
embedding_to_scatter_cuda, scatter_indice_cuda, wm_embedding
)
else:
torch.ops.wholegraph.scatter(
embedding_to_scatter_cuda, scatter_indice_cuda, wm_embedding.get_c_handle()
)
wm_comm.barrier()
del scatter_indice
del scatter_indice_cuda
del embedding_to_scatter
del embedding_to_scatter_cuda
local_tensor_cuda, local_start = wm_embedding.get_local_tensor(
torch_import_from_dlpack, wmb.WholeMemoryMemoryLocation.MlDevice, world_rank
)
local_ref_start = min(
wmb.determine_partition_plan(embedding_count, world_size) * world_rank,
embedding_count,
)
local_ref_end = min(
wmb.determine_partition_plan(embedding_count, world_size) * (world_rank + 1),
embedding_count,
)
local_ref_count = local_ref_end - local_ref_start
assert local_start == local_ref_start
assert local_tensor_cuda.dim() == 2
assert local_tensor_cuda.shape[0] == local_ref_count
assert local_tensor_cuda.shape[1] == embedding_dim
local_tensor = local_tensor_cuda.cpu()
local_indices = torch.arange(local_ref_start, local_ref_end, dtype=torch.int64)
local_tensor_ref = gen_int_embedding(local_indices, embedding_dim, torch.float)
# print('\nlocal_tensor %s =%s\nlocal_tensor_ref %s =%s' % (
# local_tensor.shape, local_tensor, local_tensor_ref.shape, local_tensor_ref))
assert torch.allclose(local_tensor, local_tensor_ref)
gather_indice = torch.randint(0, embedding_count, (indice_count,), dtype=torch.int)
gather_indice_cuda = gather_indice.cuda()
if use_python_binding:
embedding_after_gather_cuda = wm_ops.wholememory_gather_forward_functor(
wm_embedding, gather_indice_cuda
)
else:
embedding_after_gather_cuda = torch.ops.wholegraph.gather(
wm_embedding.get_c_handle(), gather_indice_cuda, None, None
)
embedding_after_gather = embedding_after_gather_cuda.cpu()
ref_embedding_gather = gen_int_embedding(gather_indice, embedding_dim, torch.float)
# print('\ngather_indice=%s\nembedding_after_gather=%s\nref_embedding_gather=%s' % (
# gather_indice, embedding_after_gather, ref_embedding_gather))
assert torch.allclose(embedding_after_gather, ref_embedding_gather)
del gather_indice
del gather_indice_cuda
del embedding_after_gather
del embedding_after_gather_cuda
del ref_embedding_gather
wmb.destroy_wholememory_tensor(wm_embedding)
def routine_func(world_rank: int, world_size: int):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
embedding_count = 1024 * 256 * world_size + 3
embedding_dim = 256
indice_count = 100001
dt = wmb.WholeMemoryDataType.DtFloat
print("")
for mt in [
wmb.WholeMemoryMemoryType.MtContinuous,
wmb.WholeMemoryMemoryType.MtChunked,
wmb.WholeMemoryMemoryType.MtDistributed,
]:
for ml in [
wmb.WholeMemoryMemoryLocation.MlHost,
wmb.WholeMemoryMemoryLocation.MlDevice,
]:
if wm_comm.support_type_location(mt, ml):
scatter_gather_test_cast(
wm_comm, dt, mt, ml, embedding_count, embedding_dim, indice_count, True
)
# scatter_gather_test_cast(wm_comm, dt, mt, ml, embedding_count, embedding_dim, indice_count, False)
wmb.finalize()
def test_wholegraph_gather_scatter():
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
multiprocess_run(gpu_count, routine_func)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/tests/wholegraph_torch/ops/test_wholegraph_weighted_sample_without_replacement.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pylibwholegraph.utils.multiprocess import multiprocess_run
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
import pylibwholegraph.binding.wholememory_binding as wmb
import torch
import random
from functools import partial
from pylibwholegraph.test_utils.test_comm import (
gen_csr_graph,
copy_host_1D_tensor_to_wholememory,
host_get_sample_offset_tensor,
host_sample_all_neighbors,
int_to_wholememory_datatype,
int_to_wholememory_location,
int_to_wholememory_type,
)
import pylibwholegraph.torch.wholegraph_ops as wg_ops
def host_weighted_sample_without_replacement_func(
host_csr_row_ptr,
host_csr_col_ptr,
host_csr_weight_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
csr_weight_dtype,
total_sample_count,
max_sample_count,
random_seed,
):
output_dest_tensor = torch.empty((total_sample_count,), dtype=col_id_dtype)
output_center_localid_tensor = torch.empty((total_sample_count,), dtype=torch.int32)
output_edge_gid_tensor = torch.empty((total_sample_count,), dtype=torch.int64)
center_nodes_count = center_nodes.size(0)
block_size = 128 if max_sample_count <= 256 else 256
for i in range(center_nodes_count):
node_id = center_nodes[i]
start = host_csr_row_ptr[node_id]
end = host_csr_row_ptr[node_id + 1]
neighbor_count = end - start
output_id = output_sample_offset_tensor[i]
gidx = i * block_size
if neighbor_count <= max_sample_count:
for j in range(end - start):
output_dest_tensor[output_id + j] = host_csr_col_ptr[start + j]
output_center_localid_tensor[output_id + j] = i
output_edge_gid_tensor[output_id + j] = start + j
else:
total_neighbor_generated_weights = torch.tensor([], dtype=csr_weight_dtype)
edge_weight_corresponding_ids = torch.tensor([], dtype=col_id_dtype)
for j in range(block_size):
local_gidx = gidx + j
local_edge_weights = torch.tensor([], dtype=csr_weight_dtype)
generated_edge_weight_count = 0
for id in range(j, neighbor_count, block_size):
local_edge_weights = torch.cat(
(
local_edge_weights,
torch.tensor(
[host_csr_weight_ptr[start + id]],
dtype=csr_weight_dtype,
),
)
)
generated_edge_weight_count += 1
edge_weight_corresponding_ids = torch.cat(
(
edge_weight_corresponding_ids,
torch.tensor([id], dtype=col_id_dtype),
)
)
random_values = (
wg_ops.generate_exponential_distribution_negative_float_cpu(
random_seed, local_gidx, generated_edge_weight_count
)
)
generated_random_weight = torch.tensor(
[
(1.0 / local_edge_weights[i]) * random_values[i]
for i in range(generated_edge_weight_count)
]
)
total_neighbor_generated_weights = torch.cat(
(total_neighbor_generated_weights, generated_random_weight)
)
assert total_neighbor_generated_weights.size(0) == neighbor_count
_, sorted_weight_ids = torch.sort(
total_neighbor_generated_weights, descending=True
)
sorted_top_m_weight_ids = edge_weight_corresponding_ids[
sorted_weight_ids[0:max_sample_count]
]
for sample_id in range(max_sample_count):
output_dest_tensor[output_id + sample_id] = host_csr_col_ptr[
start + sorted_top_m_weight_ids[sample_id]
]
output_center_localid_tensor[output_id + sample_id] = i
output_edge_gid_tensor[output_id + sample_id] = (
start + sorted_top_m_weight_ids[sample_id]
)
return output_dest_tensor, output_center_localid_tensor, output_edge_gid_tensor
def host_weighted_sample_without_replacement(
host_csr_row_ptr,
host_csr_col_ptr,
host_csr_weight_ptr,
center_nodes,
max_sample_count,
col_id_dtype,
random_seed,
):
center_nodes_count = center_nodes.size(0)
output_sample_offset_tensor = host_get_sample_offset_tensor(
host_csr_row_ptr, center_nodes, max_sample_count
)
total_sample_count = output_sample_offset_tensor[center_nodes_count]
if max_sample_count <= 0:
return host_sample_all_neighbors(
host_csr_row_ptr,
host_csr_col_ptr,
center_nodes,
output_sample_offset_tensor,
col_id_dtype,
total_sample_count,
)
if max_sample_count > 1024:
raise ValueError(
"invalid host_unweighted_sample_without_replacement test max_sample_count"
)
torch_col_id_dtype = torch.int32
if col_id_dtype == wmb.WholeMemoryDataType.DtInt64:
torch_col_id_dtype = torch.int64
(
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
) = host_weighted_sample_without_replacement_func(
host_csr_row_ptr,
host_csr_col_ptr,
host_csr_weight_ptr,
center_nodes,
output_sample_offset_tensor,
torch_col_id_dtype,
host_csr_weight_ptr.dtype,
total_sample_count,
max_sample_count,
random_seed,
)
return (
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
)
def routine_func(world_rank: int, world_size: int, **kwargs):
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank, world_size, world_rank, world_size
)
wm_comm = wm_comm.wmb_comm
host_csr_row_ptr = kwargs["host_csr_row_ptr"]
host_csr_col_ptr = kwargs["host_csr_col_ptr"]
host_csr_weight_ptr = kwargs["host_csr_weight_ptr"]
graph_node_count = kwargs["graph_node_count"]
graph_edge_count = kwargs["graph_edge_count"]
max_sample_count = kwargs["max_sample_count"]
center_node_count = kwargs["center_node_count"]
center_node_dtype = kwargs["center_node_dtype"]
int_col_id_dtype = kwargs["col_id_dtype"]
int_csr_weight_dtype = kwargs["csr_weight_dtype"]
int_wholememory_location = kwargs["wholememory_location"]
int_wholememory_type = kwargs["wholememory_type"]
need_center_local_output = kwargs["need_center_local_output"]
need_edge_output = kwargs["need_edge_output"]
world_rank = wm_comm.get_rank()
world_size = wm_comm.get_size()
col_id_dtype = int_to_wholememory_datatype(int_col_id_dtype)
csr_weight_dtype = int_to_wholememory_datatype(int_csr_weight_dtype)
wholememory_location = int_to_wholememory_location(int_wholememory_location)
wholememory_type = int_to_wholememory_type(int_wholememory_type)
if not wm_comm.support_type_location(wholememory_type, wholememory_location):
wmb.finalize()
return
wm_csr_row_ptr = wmb.create_wholememory_array(
wmb.WholeMemoryDataType.DtInt64,
graph_node_count + 1,
wm_comm,
wholememory_type,
wholememory_location,
)
wm_csr_col_ptr = wmb.create_wholememory_array(
col_id_dtype, graph_edge_count, wm_comm, wholememory_type, wholememory_location
)
wm_csr_weight_ptr = wmb.create_wholememory_array(
csr_weight_dtype,
graph_edge_count,
wm_comm,
wholememory_type,
wholememory_location,
)
copy_host_1D_tensor_to_wholememory(
wm_csr_row_ptr, host_csr_row_ptr, world_rank, world_size, wm_comm
)
copy_host_1D_tensor_to_wholememory(
wm_csr_col_ptr, host_csr_col_ptr, world_rank, world_size, wm_comm
)
copy_host_1D_tensor_to_wholememory(
wm_csr_weight_ptr, host_csr_weight_ptr, world_rank, world_size, wm_comm
)
wm_comm.barrier()
center_node_tensor = torch.randint(
0, graph_node_count, (center_node_count,), dtype=center_node_dtype
)
center_node_tensor_cuda = center_node_tensor.cuda()
random_seed = random.randint(1, 10000)
# output_sample_offset_tensor_cuda,
# output_dest_tensor_cuda,
# output_center_localid_tensor_cuda,
# output_edge_gid_tensor_cuda =
# torch.ops.wholegraph.weighted_sample_without_replacement(wm_csr_row_ptr.get_c_handle(),
# wm_csr_col_ptr.get_c_handle(),
# wm_csr_weight_ptr.get_c_handle(),
# center_node_tensor_cuda,
# max_sample_count,
# random_seed)
output_sample_offset_tensor = None
output_dest_tensor = None
output_center_localid_tensor = None
output_edge_gid_tensor = None
output_tensors = wg_ops.weighted_sample_without_replacement(
wm_csr_row_ptr,
wm_csr_col_ptr,
wm_csr_weight_ptr,
center_node_tensor_cuda,
max_sample_count,
random_seed,
need_center_local_output=need_center_local_output,
need_edge_output=need_edge_output,
)
output_cpu_tensors = tuple(tensor.cpu() for tensor in output_tensors)
if need_edge_output and need_center_local_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
output_edge_gid_tensor,
) = output_cpu_tensors
elif need_center_local_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_center_localid_tensor,
) = output_cpu_tensors
elif need_edge_output:
(
output_sample_offset_tensor,
output_dest_tensor,
output_edge_gid_tensor,
) = output_cpu_tensors
else:
output_sample_offset_tensor, output_dest_tensor = output_cpu_tensors
(
output_sample_offset_tensor_ref,
output_dest_tensor_ref,
output_center_localid_tensor_ref,
output_edge_gid_tensor_ref,
) = host_weighted_sample_without_replacement(
host_csr_row_ptr,
host_csr_col_ptr,
host_csr_weight_ptr,
center_node_tensor,
max_sample_count,
col_id_dtype,
random_seed,
)
assert torch.equal(output_sample_offset_tensor, output_sample_offset_tensor_ref)
for i in range(center_node_count):
start = output_sample_offset_tensor[i]
end = output_sample_offset_tensor[i + 1]
output_dest_tensor[start:end], sorted_ids = torch.sort(
output_dest_tensor[start:end]
)
output_dest_tensor_ref[start:end], ref_sorted_ids = torch.sort(
output_dest_tensor_ref[start:end]
)
output_center_localid_tensor_ref[start:end] = output_center_localid_tensor_ref[
start:end
][ref_sorted_ids]
output_edge_gid_tensor_ref[start:end] = output_edge_gid_tensor_ref[start:end][
ref_sorted_ids
]
if need_edge_output and need_center_local_output:
output_center_localid_tensor[start:end] = output_center_localid_tensor[
start:end
][sorted_ids]
output_edge_gid_tensor[start:end] = output_edge_gid_tensor[start:end][
sorted_ids
]
elif need_center_local_output:
output_center_localid_tensor[start:end] = output_center_localid_tensor[
start:end
][sorted_ids]
elif need_edge_output:
output_edge_gid_tensor[start:end] = output_edge_gid_tensor[start:end][
sorted_ids
]
assert torch.equal(output_dest_tensor, output_dest_tensor_ref)
if need_edge_output and need_center_local_output:
assert torch.equal(
output_center_localid_tensor, output_center_localid_tensor_ref
)
assert torch.equal(output_edge_gid_tensor, output_edge_gid_tensor_ref)
elif need_center_local_output:
assert torch.equal(
output_center_localid_tensor, output_center_localid_tensor_ref
)
elif need_edge_output:
assert torch.equal(output_edge_gid_tensor, output_edge_gid_tensor_ref)
wmb.destroy_wholememory_tensor(wm_csr_row_ptr)
wmb.destroy_wholememory_tensor(wm_csr_col_ptr)
wmb.destroy_wholememory_tensor(wm_csr_weight_ptr)
wmb.finalize()
@pytest.mark.parametrize("graph_node_count", [113])
@pytest.mark.parametrize("graph_edge_count", [1043])
@pytest.mark.parametrize("max_sample_count", [11])
@pytest.mark.parametrize("center_node_count", [13])
@pytest.mark.parametrize("center_node_dtype", [torch.int32, torch.int64])
@pytest.mark.parametrize("col_id_dtype", [0, 1])
@pytest.mark.parametrize("csr_weight_dtype", [2, 3])
@pytest.mark.parametrize("wholememory_location", ([0, 1]))
@pytest.mark.parametrize("wholememory_type", ([0, 1]))
@pytest.mark.parametrize("need_center_local_output", [True, False])
@pytest.mark.parametrize("need_edge_output", [True, False])
def test_wholegraph_weighted_sample(
graph_node_count,
graph_edge_count,
max_sample_count,
center_node_count,
center_node_dtype,
col_id_dtype,
csr_weight_dtype,
wholememory_location,
wholememory_type,
need_center_local_output,
need_edge_output,
):
gpu_count = wmb.fork_get_gpu_count()
assert gpu_count > 0
csr_col_dtype = torch.int32
if col_id_dtype == 1:
csr_col_dtype = torch.int64
host_csr_row_ptr, host_csr_col_ptr, host_csr_weight_ptr = gen_csr_graph(
graph_node_count, graph_edge_count, csr_col_dtype=csr_col_dtype
)
routine_func_partial = partial(
routine_func,
host_csr_row_ptr=host_csr_row_ptr,
host_csr_col_ptr=host_csr_col_ptr,
host_csr_weight_ptr=host_csr_weight_ptr,
graph_node_count=graph_node_count,
graph_edge_count=graph_edge_count,
max_sample_count=max_sample_count,
center_node_count=center_node_count,
center_node_dtype=center_node_dtype,
col_id_dtype=col_id_dtype,
csr_weight_dtype=csr_weight_dtype,
wholememory_location=wholememory_location,
wholememory_type=wholememory_type,
need_center_local_output=need_center_local_output,
need_edge_output=need_edge_output,
)
multiprocess_run(gpu_count, routine_func_partial, True)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/binding/CMakeLists.txt
|
# Set the list of Cython files to build
set(cython_sources wholememory_binding.pyx)
set(linked_libraries wholegraph::wholegraph)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}"
ASSOCIATED_TARGETS wholegraph
MODULE_PREFIX wholegraphcif)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/binding/wholememory_binding.pyx
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
cimport cpython
from libc cimport stdlib
from libc.stdio cimport printf, fprintf, stdout, stderr, fflush
import functools
import cython
from libc.stdint cimport *
from libcpp.cast cimport *
from libcpp cimport bool
from cpython cimport Py_buffer
from cpython cimport array
import array
import numpy as np
from cpython.ref cimport PyObject, Py_INCREF, Py_DECREF
from cpython.object cimport Py_TYPE, PyObject_CallObject
from cpython.tuple cimport *
from cpython.long cimport PyLong_AsLongLong
cdef extern from "Python.h":
void Py_INCREF(PyObject *o)
void Py_DECREF(PyObject *o)
const char * PyUnicode_AsUTF8(object unicode)
PyObject * PyUnicode_FromString(const char * u)
cdef extern from "wholememory/wholememory.h":
ctypedef enum wholememory_error_code_t:
WHOLEMEMORY_SUCCESS "WHOLEMEMORY_SUCCESS" # success
WHOLEMEMORY_UNKNOW_ERROR "WHOLEMEMORY_UNKNOW_ERROR" # unknown error
WHOLEMEMORY_NOT_IMPLEMENTED "WHOLEMEMORY_NOT_IMPLEMENTED" # method is not implemented
WHOLEMEMORY_LOGIC_ERROR "WHOLEMEMORY_LOGIC_ERROR" # logic error
WHOLEMEMORY_CUDA_ERROR "WHOLEMEMORY_CUDA_ERROR" # CUDA error
WHOLEMEMORY_COMMUNICATION_ERROR "WHOLEMEMORY_COMMUNICATION_ERROR" # communication error
WHOLEMEMORY_INVALID_INPUT "WHOLEMEMORY_INVALID_INPUT" # invalid input, e.g. nullptr
WHOLEMEMORY_INVALID_VALUE "WHOLEMEMORY_INVALID_VALUE" # input value is invalid
WHOLEMEMORY_OUT_OF_MEMORY "WHOLEMEMORY_OUT_OF_MEMORY" # out of memory
WHOLEMEMORY_NOT_SUPPORTED "WHOLEMEMORY_NOT_SUPPORTED" # not supported
ctypedef enum wholememory_memory_type_t:
WHOLEMEMORY_MT_NONE "WHOLEMEMORY_MT_NONE"
WHOLEMEMORY_MT_CONTINUOUS "WHOLEMEMORY_MT_CONTINUOUS"
WHOLEMEMORY_MT_CHUNKED "WHOLEMEMORY_MT_CHUNKED"
WHOLEMEMORY_MT_DISTRIBUTED "WHOLEMEMORY_MT_DISTRIBUTED"
ctypedef enum wholememory_memory_location_t:
WHOLEMEMORY_ML_NONE "WHOLEMEMORY_ML_NONE"
WHOLEMEMORY_ML_DEVICE "WHOLEMEMORY_ML_DEVICE"
WHOLEMEMORY_ML_HOST "WHOLEMEMORY_ML_HOST"
cdef wholememory_error_code_t wholememory_init(unsigned int flags)
cdef wholememory_error_code_t wholememory_finalize()
cdef struct wholememory_unique_id_t:
char internal[128]
cdef struct wholememory_comm_:
pass
ctypedef wholememory_comm_ * wholememory_comm_t
cdef wholememory_error_code_t wholememory_create_unique_id(wholememory_unique_id_t * unique_id)
cdef wholememory_error_code_t wholememory_create_communicator(wholememory_comm_t * comm,
wholememory_unique_id_t unique_id,
int rank,
int size)
cdef wholememory_error_code_t wholememory_destroy_communicator(wholememory_comm_t comm)
cdef wholememory_error_code_t wholememory_communicator_support_type_location(
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location)
cdef wholememory_error_code_t wholememory_communicator_get_rank(int * rank, wholememory_comm_t comm)
cdef wholememory_error_code_t wholememory_communicator_get_size(int * size, wholememory_comm_t comm)
cdef wholememory_error_code_t wholememory_communicator_barrier(wholememory_comm_t comm)
cdef struct wholememory_handle_:
pass
ctypedef wholememory_handle_ * wholememory_handle_t
cdef wholememory_error_code_t wholememory_malloc(wholememory_handle_t * wholememory_handle_ptr,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity)
cdef wholememory_error_code_t wholememory_free(wholememory_handle_t wholememory_handle)
cdef wholememory_error_code_t wholememory_get_communicator(wholememory_comm_t * comm,
wholememory_handle_t wholememory_handle)
cdef wholememory_memory_type_t wholememory_get_memory_type(wholememory_handle_t wholememory_handle)
cdef wholememory_memory_location_t wholememory_get_memory_location(wholememory_handle_t wholememory_handle)
cdef size_t wholememory_get_total_size(wholememory_handle_t wholememory_handle)
cdef wholememory_error_code_t wholememory_get_local_memory(void** local_ptr,
size_t * local_size,
size_t * local_offset,
wholememory_handle_t wholememory_handle)
cdef wholememory_error_code_t wholememory_get_rank_memory(void** rank_memory_ptr,
size_t * rank_memory_size,
size_t * rank_memory_offset,
int rank,
wholememory_handle_t wholememory_handle)
cdef wholememory_error_code_t wholememory_get_global_pointer(void** global_ptr,
wholememory_handle_t wholememory_handle)
cdef wholememory_error_code_t wholememory_determine_partition_plan(size_t * size_per_rank,
size_t total_size,
size_t data_granularity,
int world_size)
cdef wholememory_error_code_t wholememory_determine_entry_partition_plan(size_t * entry_per_rank,
size_t total_entry_count,
int world_size)
cdef wholememory_error_code_t wholememory_get_partition_plan(size_t * size_per_rank,
wholememory_handle_t wholememory_handle)
cdef int fork_get_device_count()
cdef wholememory_error_code_t wholememory_load_from_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_size,
size_t file_entry_size,
const char** file_names,
int file_count)
cdef wholememory_error_code_t wholememory_store_to_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t file_entry_size,
const char *local_file_name)
cpdef enum WholeMemoryErrorCode:
Success = WHOLEMEMORY_SUCCESS
UnknowError = WHOLEMEMORY_UNKNOW_ERROR
NotImplemented = WHOLEMEMORY_NOT_IMPLEMENTED
LogicError = WHOLEMEMORY_LOGIC_ERROR
CUDAError = WHOLEMEMORY_CUDA_ERROR
CommunicationError = WHOLEMEMORY_COMMUNICATION_ERROR
InvalidInput = WHOLEMEMORY_INVALID_INPUT
InvalidValue = WHOLEMEMORY_INVALID_VALUE
OutOfMemory = WHOLEMEMORY_OUT_OF_MEMORY
NotSupported = WHOLEMEMORY_NOT_SUPPORTED
cpdef enum WholeMemoryMemoryType:
MtNone = WHOLEMEMORY_MT_NONE
MtContinuous = WHOLEMEMORY_MT_CONTINUOUS
MtChunked = WHOLEMEMORY_MT_CHUNKED
MtDistributed = WHOLEMEMORY_MT_DISTRIBUTED
cpdef enum WholeMemoryMemoryLocation:
MlNone = WHOLEMEMORY_ML_NONE
MlDevice = WHOLEMEMORY_ML_DEVICE
MlHost = WHOLEMEMORY_ML_HOST
cdef check_wholememory_error_code(wholememory_error_code_t err):
cdef WholeMemoryErrorCode err_code = int(err)
if err_code == Success:
return
elif err_code == UnknowError:
raise Exception('Unknown error')
elif err_code == NotImplemented:
raise NotImplementedError('Not implemented')
elif err_code == LogicError:
raise RuntimeError('Logic error')
elif err_code == CUDAError:
raise RuntimeError('CUDA error')
elif err_code == CommunicationError:
raise RuntimeError('Communication error')
elif err_code == InvalidInput:
raise ValueError('Invalid input')
elif err_code == InvalidValue:
raise ValueError('Invalid value')
elif err_code == OutOfMemory:
raise MemoryError('Out of memory')
else:
raise NotImplementedError('Error code %d not recognized' % (int(err),))
cdef extern from "wholememory/tensor_description.h":
ctypedef enum wholememory_dtype_t:
WHOLEMEMORY_DT_UNKNOWN "WHOLEMEMORY_DT_UNKNOWN"
WHOLEMEMORY_DT_FLOAT "WHOLEMEMORY_DT_FLOAT"
WHOLEMEMORY_DT_HALF "WHOLEMEMORY_DT_HALF"
WHOLEMEMORY_DT_DOUBLE "WHOLEMEMORY_DT_DOUBLE"
WHOLEMEMORY_DT_BF16 "WHOLEMEMORY_DT_BF16"
WHOLEMEMORY_DT_INT "WHOLEMEMORY_DT_INT"
WHOLEMEMORY_DT_INT64 "WHOLEMEMORY_DT_INT64"
WHOLEMEMORY_DT_INT16 "WHOLEMEMORY_DT_INT16"
WHOLEMEMORY_DT_INT8 "WHOLEMEMORY_DT_INT8"
WHOLEMEMORY_DT_COUNT "WHOLEMEMORY_DT_COUNT"
cdef struct wholememory_tensor_description_t:
int64_t sizes[8]
int64_t strides[8]
int64_t storage_offset
int dim
wholememory_dtype_t dtype
cdef size_t wholememory_dtype_get_element_size(wholememory_dtype_t dtype)
cdef int64_t wholememory_get_memory_element_count_from_tensor(
wholememory_tensor_description_t * p_tensor_description)
cdef extern from "wholememory/env_func_ptrs.h":
ctypedef enum wholememory_memory_allocation_type_t:
WHOLEMEMORY_MA_NONE "WHOLEMEMORY_MA_NONE"
WHOLEMEMORY_MA_DEVICE "WHOLEMEMORY_MA_DEVICE"
WHOLEMEMORY_MA_HOST "WHOLEMEMORY_MA_HOST"
WHOLEMEMORY_MA_PINNED "WHOLEMEMORY_MA_PINNED"
ctypedef void (*wholememory_create_memory_context_func_t)(void ** memory_context,
void * global_context)
ctypedef void (*wholememory_destroy_memory_context_func_t)(void * memory_context,
void * global_context)
ctypedef void * (*wholememory_malloc_func_t)(wholememory_tensor_description_t * desc,
wholememory_memory_allocation_type_t memory_allocation_type,
void * memory_context,
void * global_context)
ctypedef void (*wholememory_free_func_t)(void * memory_context, void * global_context)
cdef struct wholememory_temp_memory_func_t:
wholememory_create_memory_context_func_t create_memory_context_fn
wholememory_destroy_memory_context_func_t destroy_memory_context_fn
wholememory_malloc_func_t malloc_fn
wholememory_free_func_t free_fn
void * global_context
cdef struct wholememory_output_memory_func_t:
wholememory_malloc_func_t malloc_fn
wholememory_free_func_t free_fn
void * global_context
cdef struct wholememory_env_func_t:
wholememory_temp_memory_func_t temporary_fns
wholememory_output_memory_func_t output_fns
cpdef enum WholeMemoryMemoryAllocType:
MatNone = WHOLEMEMORY_MA_NONE
MatDevice = WHOLEMEMORY_MA_DEVICE
MatHost = WHOLEMEMORY_MA_HOST
MatPinned = WHOLEMEMORY_MA_PINNED
cdef class PyMemoryAllocType:
cdef wholememory_memory_allocation_type_t alloc_type
def __cinit__(self):
self.alloc_type = WHOLEMEMORY_MA_NONE
def set_type(self, WholeMemoryMemoryAllocType new_type):
self.alloc_type = <wholememory_memory_allocation_type_t> <int64_t> new_type
def get_type(self):
return <int64_t> self.alloc_type
def set_ctype(self, wholememory_memory_allocation_type_t alloc_type):
self.alloc_type = alloc_type
def get_ctype(self):
return self.alloc_type
cdef class GlobalContextWrapper:
cdef PyObject * temp_create_context_fn
cdef PyObject * temp_destroy_context_fn
cdef PyObject * temp_malloc_fn
cdef PyObject * temp_free_fn
cdef PyObject * temp_global_context
cdef PyObject * output_malloc_fn
cdef PyObject * output_free_fn
cdef PyObject * output_global_context
cdef wholememory_env_func_t env_func
def __cinit__(self):
self.temp_create_context_fn = NULL
self.temp_destroy_context_fn = NULL
self.temp_malloc_fn = NULL
self.temp_free_fn = NULL
self.temp_global_context = NULL
self.output_malloc_fn = NULL
self.output_free_fn = NULL
self.output_global_context = NULL
def __dealloc__(self):
Py_DECREF(self.self.temp_create_context_fn)
Py_DECREF(self.self.temp_destroy_context_fn)
Py_DECREF(self.self.temp_malloc_fn)
Py_DECREF(self.self.temp_free_fn)
if self.temp_global_context:
Py_DECREF(self.self.temp_global_context)
Py_DECREF(self.self.output_malloc_fn)
Py_DECREF(self.self.output_free_fn)
if self.output_global_context:
Py_DECREF(self.self.output_global_context)
cpdef create_context(self,
temp_create_context_fn,
temp_destroy_context_fn,
temp_malloc_fn,
temp_free_fn,
temp_global_context,
output_malloc_fn,
output_free_fn,
output_global_context):
self.temp_create_context_fn = <PyObject *> temp_create_context_fn
Py_INCREF(self.temp_create_context_fn)
self.temp_destroy_context_fn = <PyObject *> temp_destroy_context_fn
Py_INCREF(self.temp_destroy_context_fn)
self.temp_malloc_fn = <PyObject *> temp_malloc_fn
Py_INCREF(self.temp_malloc_fn)
self.temp_free_fn = <PyObject *> temp_free_fn
Py_INCREF(self.temp_free_fn)
if temp_global_context:
self.temp_global_context = <PyObject *> temp_global_context
Py_INCREF(self.temp_global_context)
self.output_malloc_fn = <PyObject *> output_malloc_fn
Py_INCREF(self.output_malloc_fn)
self.output_free_fn = <PyObject *> output_free_fn
Py_INCREF(self.output_free_fn)
if output_global_context:
self.output_global_context = <PyObject *> output_global_context
Py_INCREF(self.output_global_context)
self.env_func.temporary_fns.create_memory_context_fn = <wholememory_create_memory_context_func_t> &python_cb_wrapper_temp_create_context
self.env_func.temporary_fns.destroy_memory_context_fn = <wholememory_destroy_memory_context_func_t> &python_cb_wrapper_temp_destroy_context
self.env_func.temporary_fns.malloc_fn = <wholememory_malloc_func_t> &python_cb_wrapper_temp_malloc
self.env_func.temporary_fns.free_fn = <wholememory_free_func_t> &python_cb_wrapper_temp_free
self.env_func.temporary_fns.global_context = <PyObject *> self
self.env_func.output_fns.malloc_fn = <wholememory_malloc_func_t> &python_cb_wrapper_output_malloc
self.env_func.output_fns.free_fn = <wholememory_free_func_t> &python_cb_wrapper_output_free
self.env_func.output_fns.global_context = <PyObject *> self
cpdef int64_t get_env_fns(self):
return <int64_t> (&self.env_func)
cdef void python_cb_wrapper_temp_create_context(void** memory_context,
void * global_context) nogil:
cdef PyObject * ret_memory_context = NULL
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
python_fn = wrapped_global_context.temp_create_context_fn
python_global_context = wrapped_global_context.temp_global_context
args = PyTuple_New(1)
Py_INCREF(<object> python_global_context)
PyTuple_SetItem(args, 0, <object> python_global_context)
py_memory_context = PyObject_CallObject(<object> python_fn, <object> args)
ret_memory_context = <PyObject *> py_memory_context
Py_DECREF(args)
Py_INCREF(ret_memory_context)
(<PyObject **> memory_context)[0] = ret_memory_context
return
cdef void python_cb_wrapper_temp_destroy_context(void * memory_context,
void * global_context) nogil:
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
python_fn = wrapped_global_context.temp_destroy_context_fn
python_global_context = wrapped_global_context.temp_global_context
args = PyTuple_New(2)
Py_INCREF(<object> <PyObject *> memory_context)
PyTuple_SetItem(args, 0, <object> <PyObject *> memory_context)
Py_INCREF(<object> python_global_context)
PyTuple_SetItem(args, 1, <object> python_global_context)
PyObject_CallObject(<object> python_fn, <object> args)
Py_DECREF(args)
Py_DECREF(<PyObject *> memory_context)
return
cdef void * python_cb_wrapper_temp_malloc(wholememory_tensor_description_t * tensor_desc,
wholememory_memory_allocation_type_t malloc_type,
void * memory_context,
void * global_context) nogil:
cdef int64_t res_ptr = 0
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
py_tensor_desc = PyWholeMemoryTensorDescription()
py_tensor_desc.set_by_tensor_desc(tensor_desc)
py_malloc_type = PyMemoryAllocType()
py_malloc_type.set_type(malloc_type)
python_fn = wrapped_global_context.temp_malloc_fn
python_global_context = wrapped_global_context.temp_global_context
args = PyTuple_New(4)
Py_INCREF(py_tensor_desc)
PyTuple_SetItem(args, 0, <object> py_tensor_desc)
Py_INCREF(py_malloc_type)
PyTuple_SetItem(args, 1, <object> py_malloc_type)
Py_INCREF(<object> <PyObject *> memory_context)
PyTuple_SetItem(args, 2, <object> <PyObject *> memory_context)
Py_INCREF(<object> <PyObject *> python_global_context)
PyTuple_SetItem(args, 3, <object> <PyObject *> python_global_context)
res_ptr = PyLong_AsLongLong(PyObject_CallObject(<object> python_fn, <object> args))
Py_DECREF(args)
return <void *> res_ptr
cdef void python_cb_wrapper_temp_free(void * memory_context,
void * global_context) nogil:
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
python_fn = wrapped_global_context.temp_free_fn
python_global_context = wrapped_global_context.temp_global_context
args = PyTuple_New(2)
Py_INCREF(<object> <PyObject *> memory_context)
PyTuple_SetItem(args, 0, <object> <PyObject *> memory_context)
Py_INCREF(<object> python_global_context)
PyTuple_SetItem(args, 1, <object> python_global_context)
PyObject_CallObject(<object> python_fn, <object> args)
Py_DECREF(args)
return
cdef void * python_cb_wrapper_output_malloc(wholememory_tensor_description_t * tensor_desc,
wholememory_memory_allocation_type_t malloc_type,
void * memory_context,
void * global_context) nogil:
cdef int64_t res_ptr = 0
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
py_tensor_desc = PyWholeMemoryTensorDescription()
py_tensor_desc.set_by_tensor_desc(tensor_desc)
py_malloc_type = PyMemoryAllocType()
py_malloc_type.set_type(malloc_type)
python_fn = wrapped_global_context.output_malloc_fn
python_global_context = wrapped_global_context.output_global_context
args = PyTuple_New(4)
Py_INCREF(py_tensor_desc)
PyTuple_SetItem(args, 0, <object> <PyObject *> py_tensor_desc)
Py_INCREF(py_malloc_type)
PyTuple_SetItem(args, 1, <object> <PyObject *> py_malloc_type)
Py_INCREF(<object> <PyObject *> memory_context)
PyTuple_SetItem(args, 2, <object> <PyObject *> memory_context)
Py_INCREF(<object> <PyObject *> python_global_context)
PyTuple_SetItem(args, 3, <object> <PyObject *> python_global_context)
res_ptr = PyLong_AsLongLong(PyObject_CallObject(<object> python_fn, <object> args))
Py_DECREF(args)
return <void *> res_ptr
cdef void python_cb_wrapper_output_free(void * memory_context,
void * global_context) nogil:
with gil:
wrapped_global_context = <GlobalContextWrapper> <PyObject *> global_context
python_fn = wrapped_global_context.output_free_fn
python_global_context = wrapped_global_context.output_global_context
args = PyTuple_New(2)
Py_INCREF(<object> <PyObject *> memory_context)
PyTuple_SetItem(args, 0, <object> <PyObject *> memory_context)
Py_INCREF(<object> python_global_context)
PyTuple_SetItem(args, 1, <object> python_global_context)
PyObject_CallObject(<object> python_fn, <object> args)
Py_DECREF(args)
return
cdef extern from "wholememory/wholememory_tensor.h":
cdef struct wholememory_tensor_:
pass
ctypedef wholememory_tensor_ * wholememory_tensor_t
cdef wholememory_error_code_t wholememory_create_tensor(wholememory_tensor_t *wholememory_tensor,
wholememory_tensor_description_t *tensor_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location)
cdef wholememory_error_code_t wholememory_destroy_tensor(wholememory_tensor_t wholememory_tensor)
cdef wholememory_error_code_t wholememory_make_tensor_from_pointer(wholememory_tensor_t *wholememory_tensor,
void *data_ptr,
wholememory_tensor_description_t *tensor_description)
cdef wholememory_error_code_t wholememory_make_tensor_from_handle(wholememory_tensor_t *wholememory_tensor,
wholememory_handle_t wholememory_handle,
wholememory_tensor_description_t *tensor_description)
cdef bool wholememory_tensor_has_handle(wholememory_tensor_t wholememory_tensor)
cdef wholememory_handle_t wholememory_tensor_get_memory_handle(wholememory_tensor_t wholememory_tensor)
cdef wholememory_tensor_description_t * wholememory_tensor_get_tensor_description(
wholememory_tensor_t wholememory_tensor)
cdef wholememory_error_code_t wholememory_tensor_get_subtensor(wholememory_tensor_t wholememory_tensor,
int64_t *starts,
int64_t *ends,
wholememory_tensor_t *sub_wholememory_tensor)
int64_t get_wholememory_tensor_count()
def py_get_wholememory_tensor_count():
return get_wholememory_tensor_count()
cpdef enum WholeMemoryDataType:
DtUnknown = WHOLEMEMORY_DT_UNKNOWN
DtFloat = WHOLEMEMORY_DT_FLOAT
DtHalf = WHOLEMEMORY_DT_HALF
DtDouble = WHOLEMEMORY_DT_DOUBLE
DtBF16 = WHOLEMEMORY_DT_BF16
DtInt = WHOLEMEMORY_DT_INT
DtInt64 = WHOLEMEMORY_DT_INT64
DtInt16 = WHOLEMEMORY_DT_INT16
DtInt8 = WHOLEMEMORY_DT_INT8
DtCount = WHOLEMEMORY_DT_COUNT
cdef extern from "wholememory/embedding.h":
cdef struct wholememory_embedding_cache_policy_:
pass
cdef struct wholememory_embedding_optimizer_:
pass
cdef struct wholememory_embedding_:
pass
ctypedef wholememory_embedding_cache_policy_ * wholememory_embedding_cache_policy_t
ctypedef wholememory_embedding_optimizer_ * wholememory_embedding_optimizer_t
ctypedef wholememory_embedding_ * wholememory_embedding_t
ctypedef enum wholememory_access_type_t:
WHOLEMEMORY_AT_NONE "WHOLEMEMORY_AT_NONE"
WHOLEMEMORY_AT_READONLY "WHOLEMEMORY_AT_READONLY"
WHOLEMEMORY_AT_READWRITE "WHOLEMEMORY_AT_READWRITE"
ctypedef enum wholememory_optimizer_type_t:
WHOLEMEMORY_OPT_NONE "WHOLEMEMORY_OPT_NONE"
WHOLEMEMORY_OPT_SGD "WHOLEMEMORY_OPT_SGD"
WHOLEMEMORY_OPT_LAZY_ADAM "WHOLEMEMORY_OPT_LAZY_ADAM"
WHOLEMEMORY_OPT_RMSPROP "WHOLEMEMORY_OPT_RMSPROP"
WHOLEMEMORY_OPT_ADAGRAD "WHOLEMEMORY_OPT_ADAGRAD"
cdef wholememory_error_code_t wholememory_create_embedding_optimizer(
wholememory_embedding_optimizer_t * optimizer, wholememory_optimizer_type_t optimizer_type)
cdef wholememory_error_code_t wholememory_optimizer_set_parameter(
wholememory_embedding_optimizer_t optimizer, const char * parameter_name, void * value)
cdef void wholememory_destroy_embedding_optimizer(wholememory_embedding_optimizer_t optimizer)
cdef wholememory_error_code_t wholememory_create_embedding_cache_policy(
wholememory_embedding_cache_policy_t * cache_policy,
wholememory_comm_t cache_level_comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_access_type_t access_type,
float cache_ratio)
cdef wholememory_error_code_t wholememory_destroy_embedding_cache_policy(
wholememory_embedding_cache_policy_t cache_policy)
cdef wholememory_error_code_t wholememory_create_embedding(
wholememory_embedding_t * wholememory_embedding,
wholememory_tensor_description_t * embedding_tensor_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_embedding_optimizer_t optimizer,
wholememory_embedding_cache_policy_t cache_policy)
cdef wholememory_error_code_t wholememory_destroy_embedding(
wholememory_embedding_t wholememory_embedding)
cdef wholememory_error_code_t wholememory_embedding_gather(wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t * p_env_fns,
int64_t stream_int)
cdef wholememory_error_code_t wholememory_embedding_gather_gradient_apply(
wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t grads,
bool adjust_cache,
float lr,
wholememory_env_func_t * p_env_fns,
int64_t stream_int)
cdef wholememory_tensor_t wholememory_embedding_get_embedding_tensor(
wholememory_embedding_t wholememory_embedding)
cdef const char * const * wholememory_embedding_get_optimizer_state_names(
wholememory_embedding_t wholememory_embedding)
cdef wholememory_tensor_t wholememory_embedding_get_optimizer_state(
wholememory_embedding_t wholememory_embedding, const char * name)
cdef wholememory_error_code_t wholememory_embedding_writeback_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int)
cdef wholememory_error_code_t wholememory_embedding_drop_all_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int)
cpdef enum WholeMemoryAccessType:
AtNone = WHOLEMEMORY_AT_NONE
AtReadOnly = WHOLEMEMORY_AT_READONLY
AtReadWrite = WHOLEMEMORY_AT_READWRITE
cpdef enum WholeMemoryOptimizerType:
OptNone = WHOLEMEMORY_OPT_NONE
OptSgd = WHOLEMEMORY_OPT_SGD
OptLazyAdam = WHOLEMEMORY_OPT_LAZY_ADAM
OptAdaGrad = WHOLEMEMORY_OPT_ADAGRAD
OptRmsProp = WHOLEMEMORY_OPT_RMSPROP
cdef class WholeMemoryOptimizer:
cdef wholememory_embedding_optimizer_t wm_optimizer
cdef wholememory_optimizer_type_t optimizer_type
cdef public dict param_dict
def __cinit__(self):
self.wm_optimizer = NULL
self.optimizer_type = WHOLEMEMORY_OPT_NONE
def __init__(self):
self.param_dict = {}
def create_optimizer(self,
WholeMemoryOptimizerType optimizer_type,
dict param_dict):
cdef str param_key
cdef float param_value
self.optimizer_type = <wholememory_optimizer_type_t> <int> optimizer_type
self.param_dict = param_dict
check_wholememory_error_code(wholememory_create_embedding_optimizer(&self.wm_optimizer, self.optimizer_type))
for param_key, param_value in self.param_dict.items():
key_bytes = param_key.encode('utf-8')
check_wholememory_error_code(
wholememory_optimizer_set_parameter(self.wm_optimizer, key_bytes, ¶m_value))
def destroy_optimizer(self):
if self.wm_optimizer == NULL:
return
wholememory_destroy_embedding_optimizer(self.wm_optimizer)
self.wm_optimizer = NULL
self.optimizer_type = WHOLEMEMORY_OPT_NONE
self.param_dict = None
def create_optimizer(WholeMemoryOptimizerType optimizer_type,
dict param_dict):
wm_optimizer = WholeMemoryOptimizer()
wm_optimizer.create_optimizer(optimizer_type, param_dict)
return wm_optimizer
def create_non_optimizer():
return WholeMemoryOptimizer()
cdef class WholeMemoryCachePolicy:
cdef wholememory_embedding_cache_policy_t cache_policy
cdef wholememory_memory_type_t memory_type
cdef wholememory_memory_location_t memory_location
cdef wholememory_access_type_t access_type
cdef float ratio
cdef PyWholeMemoryComm comm
def __cinit__(self):
self.cache_policy = NULL
self.memory_type = WHOLEMEMORY_MT_NONE
self.memory_location = WHOLEMEMORY_ML_NONE
self.access_type = WHOLEMEMORY_AT_NONE
self.ratio = 0.5
self.comm = None
def create_policy(self,
PyWholeMemoryComm comm,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location,
WholeMemoryAccessType access_type,
float ratio):
self.memory_type = <wholememory_memory_type_t> <int> memory_type
self.memory_location = <wholememory_memory_location_t> <int> memory_location
self.access_type = <wholememory_access_type_t> <int> access_type
self.ratio = ratio
check_wholememory_error_code(wholememory_create_embedding_cache_policy(&self.cache_policy,
comm.comm_id,
self.memory_type,
self.memory_location,
self.access_type,
self.ratio))
def destroy_policy(self):
if self.cache_policy == NULL:
return
check_wholememory_error_code(wholememory_destroy_embedding_cache_policy(self.cache_policy))
self.cache_policy = NULL
self.memory_type = WHOLEMEMORY_MT_NONE
self.memory_location = WHOLEMEMORY_ML_NONE
self.access_type = WHOLEMEMORY_AT_NONE
self.ratio = 0.5
self.comm = None
def create_cache_policy(PyWholeMemoryComm comm,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location,
WholeMemoryAccessType access_type,
float ratio):
cache_policy = WholeMemoryCachePolicy()
cache_policy.create_policy(comm, memory_type, memory_location, access_type, ratio)
return cache_policy
def create_non_cache_policy():
return WholeMemoryCachePolicy()
cdef class PyWholeMemoryEmbedding:
cdef wholememory_embedding_t wm_embedding
cdef wholememory_memory_type_t memory_type
cdef wholememory_memory_location_t memory_location
def __cinit__(self):
self.wm_embedding = NULL
self.memory_type = WHOLEMEMORY_MT_NONE
self.memory_location = WHOLEMEMORY_ML_NONE
def create_embedding(self,
PyWholeMemoryTensorDescription tensor_desc,
PyWholeMemoryComm comm,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location,
WholeMemoryOptimizer optimizer,
WholeMemoryCachePolicy cache_policy):
self.memory_type = <wholememory_memory_type_t> <int> memory_type
self.memory_location = <wholememory_memory_location_t> <int> memory_location
check_wholememory_error_code(wholememory_create_embedding(&self.wm_embedding,
&tensor_desc.tensor_description,
comm.comm_id,
self.memory_type,
self.memory_location,
optimizer.wm_optimizer,
cache_policy.cache_policy))
def destroy_embedding(self):
check_wholememory_error_code(wholememory_destroy_embedding(self.wm_embedding))
def writeback_all_cache(self,
int64_t stream):
check_wholememory_error_code(wholememory_embedding_writeback_cache(self.wm_embedding, stream))
def drop_all_cache(self,
int64_t stream):
check_wholememory_error_code(wholememory_embedding_drop_all_cache(self.wm_embedding, stream))
def get_embedding_tensor(self):
cdef wholememory_tensor_t wm_tensor
wm_tensor = wholememory_embedding_get_embedding_tensor(self.wm_embedding)
py_wm_tensor = PyWholeMemoryTensor()
py_wm_tensor.from_c_handle(wm_tensor)
return py_wm_tensor
def get_optimizer_state_names(self):
cdef int i = 0
result = []
cdef const char * const * state_names
state_names = wholememory_embedding_get_optimizer_state_names(self.wm_embedding)
while state_names[i] != NULL:
result.append(<object> PyUnicode_FromString(state_names[i]))
i += 1
return result
def get_optimizer_state(self,
state_name):
cdef wholememory_tensor_t state_tensor
state_tensor = wholememory_embedding_get_optimizer_state(
self.wm_embedding,
PyUnicode_AsUTF8(state_name))
py_state_tensor = PyWholeMemoryTensor()
py_state_tensor.from_c_handle(state_tensor)
return py_state_tensor
def create_embedding(PyWholeMemoryTensorDescription tensor_desc,
PyWholeMemoryComm comm,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location,
WholeMemoryOptimizer optimizer,
WholeMemoryCachePolicy cache_policy):
wm_embedding = PyWholeMemoryEmbedding()
wm_embedding.create_embedding(tensor_desc,
comm,
memory_type,
memory_location,
optimizer,
cache_policy)
return wm_embedding
cpdef void EmbeddingGatherForward(PyWholeMemoryEmbedding wm_embedding,
WrappedLocalTensor indice,
WrappedLocalTensor output,
bool adjust_cache,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(wholememory_embedding_gather(wm_embedding.wm_embedding,
<wholememory_tensor_t> <int64_t> indice.get_c_handle(),
<wholememory_tensor_t> <int64_t> output.get_c_handle(),
adjust_cache,
<wholememory_env_func_t *> <void *> p_env_fns_int,
stream_int))
cpdef void EmbeddingGatherGradientApply(PyWholeMemoryEmbedding wm_embedding,
WrappedLocalTensor indice,
WrappedLocalTensor grads,
bool adjust_cache,
float lr,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(wholememory_embedding_gather_gradient_apply(
wm_embedding.wm_embedding,
<wholememory_tensor_t> <int64_t> indice.get_c_handle(),
<wholememory_tensor_t> <int64_t> grads.get_c_handle(),
adjust_cache,
lr,
<wholememory_env_func_t *> <void *> p_env_fns_int,
stream_int))
######################################################################
# dlpack
# https://github.com/dmlc/dlpack/blob/main/include/dlpack/dlpack.h
# https://github.com/cupy/cupy/blob/master/cupy/_core/dlpack.pyx
cpdef enum DLDeviceType:
kDLCPU = 1
kDLCUDA = 2
kDLCUDAHost = 3
ctypedef struct DLDevice:
DLDeviceType device_type
int device_id
cdef enum DLDataTypeCode:
kDLInt = 0
kDLUInt = 1
kDLFloat = 2
kDLBfloat = 4
ctypedef struct DLDataType:
uint8_t code
uint8_t bits
uint16_t lanes
ctypedef struct DLTensor:
void * data
DLDevice device
int ndim
DLDataType dtype
int64_t * shape
int64_t * strides
uint64_t byte_offset
ctypedef struct DLManagedTensor:
DLTensor dl_tensor
void * manager_ctx
void (*deleter)(DLManagedTensor *)
cdef void pycapsule_deleter(object dltensor):
cdef DLManagedTensor * dlm_tensor
# Do not invoke the deleter on a used capsule
if cpython.PyCapsule_IsValid(dltensor, 'dltensor'):
dlm_tensor = <DLManagedTensor *> cpython.PyCapsule_GetPointer(
dltensor, 'dltensor')
dlm_tensor.deleter(dlm_tensor)
cdef void deleter(DLManagedTensor * tensor) with gil:
if tensor.manager_ctx is NULL:
return
cpython.Py_DECREF(<PyWholeMemoryFlattenDlpack> tensor.manager_ctx)
tensor.manager_ctx = NULL
stdlib.free(tensor)
# end dlpack
######################################################################
cdef class PyWholeMemoryUniqueID:
cdef wholememory_unique_id_t wholememory_unique_id
cdef Py_ssize_t shape[1]
cdef Py_ssize_t strides[1]
cdef int64_t shape_int64_t[1]
cdef int64_t strides_int64_t[1]
def __cinit__(self):
self.shape[0] = sizeof(self.wholememory_unique_id.internal)
self.strides[0] = 1
self.shape_int64_t[0] = self.shape[0]
self.strides_int64_t[0] = self.strides[0]
def __len__(self):
return self.shape[0]
def __getbuffer__(self, Py_buffer *buffer, int flags):
buffer.buf = &self.wholememory_unique_id.internal[0]
buffer.format = 'c'
buffer.internal = NULL
buffer.itemsize = 1
buffer.len = self.shape[0]
buffer.ndim = 1
buffer.obj = self
buffer.readonly = 0
buffer.shape = self.shape
buffer.strides = self.strides
buffer.suboffsets = NULL
def __releasebuffer__(self, Py_buffer *buffer):
buffer.buf = NULL
buffer.format = 'c'
buffer.len = 0
buffer.ndim = 0
buffer.obj = None
buffer.shape = NULL
buffer.strides = NULL
def __dlpack__(self, stream=None):
cdef DLManagedTensor * dlm_tensor = \
<DLManagedTensor *> stdlib.malloc(sizeof(DLManagedTensor))
cdef DLTensor * dl_tensor = &dlm_tensor.dl_tensor
dl_tensor.data = &self.wholememory_unique_id.internal[0]
dl_tensor.ndim = 1
dl_tensor.shape = &self.shape_int64_t[0]
dl_tensor.strides = &self.strides_int64_t[0]
dl_tensor.byte_offset = 0
dl_tensor.device.device_type, dl_tensor.device.device_id = self.__dlpack_device__()
cdef DLDataType * dtype = &dl_tensor.dtype
dtype.code = <uint8_t> kDLInt
dtype.lanes = <uint16_t> 1
dtype.bits = <uint8_t> 8
dlm_tensor.manager_ctx = <void *> self
cpython.Py_INCREF(self)
dlm_tensor.deleter = deleter
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', <cpython.PyCapsule_Destructor> &pycapsule_deleter)
def __dlpack_device__(self):
return (kDLCPU, 0)
def init(unsigned int flags):
check_wholememory_error_code(wholememory_init(flags))
def finalize():
check_wholememory_error_code(wholememory_finalize())
def create_unique_id():
py_uid = PyWholeMemoryUniqueID()
check_wholememory_error_code(wholememory_create_unique_id(&py_uid.wholememory_unique_id))
return py_uid
cpdef enum WholeMemoryViewType:
VtNone = 0
VtLocal = 1
VtGlobal = 2
VtRemote = 3
def get_type_string(WholeMemoryDataType data_type):
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.interface.html#__array_interface__
if data_type == DtFloat:
return '<f4'
elif data_type == DtHalf:
return '<f2'
elif data_type == DtDouble:
return '<f8'
elif data_type == DtBF16:
return '<f2'
elif data_type == DtInt:
return '<i4'
elif data_type == DtInt64:
return '<i8'
elif data_type == DtInt16:
return '<i2'
elif data_type == DtInt8:
return '|i1'
else:
raise ValueError('data type %d not valid' % (int(data_type),))
cdef class PyWholeMemoryFlattenDlpack:
cdef void * c_ptr
cdef WholeMemoryDataType data_type
cdef Py_ssize_t itemsize
cdef public object typestr
cdef Py_ssize_t shape[1]
cdef Py_ssize_t strides[1]
cdef int64_t shape_int64_t[1]
cdef int64_t strides_int64_t[1]
cdef WholeMemoryMemoryLocation device_type
cdef int device_id
def __cinit__(self):
self.c_ptr = NULL
self.shape[0] = 0
self.strides[0] = 1
self.shape_int64_t[0] = 0
self.strides_int64_t[0] = 1
self.itemsize = 0
self.typestr = ''
self.data_type = DtUnknown
self.device_type = MlHost
self.device_id = 0
def set_view_device(self, WholeMemoryMemoryLocation device_type, int device_id):
self.device_type = device_type
self.device_id = device_id
def get_view(self,
PyWholeMemoryHandle handle,
WholeMemoryDataType data_type,
WholeMemoryViewType view_type,
int target_rank):
"""Get view of a WholeMemoryHandle
Parameters
----------
handle : PyWholeMemoryHandle
handler to the WholeMemory
data_type: WholeMemoryDataType
data type of the WholeMemory
view_type : WholeMemoryViewType
view type
target_rank: int
if view_type is VtRemote, target_rank is the rank of remote rank's memory, otherwise target_rank is ignored
"""
self.data_type = data_type
elt_size = wholememory_dtype_get_element_size(int(data_type))
self.itemsize = elt_size
if elt_size <= 0 or elt_size > 8:
raise ValueError('data_type not supported')
self.typestr = get_type_string(data_type)
cdef WholeMemoryMemoryType mem_type
cdef WholeMemoryMemoryLocation mem_location
mem_type = int(wholememory_get_memory_type(handle.wholememory_handle))
mem_location = int(wholememory_get_memory_location(handle.wholememory_handle))
if self.device_type == MlHost and mem_location == MlDevice:
raise ValueError('Device WholeMemory cannot get view from host.')
if mem_type == MtDistributed and (view_type == VtGlobal or view_type == VtRemote):
raise ValueError('Distributed WholeMemory have no view of Global or Remote')
cdef size_t map_size
cdef size_t map_offset
cdef size_t global_size
cdef wholememory_comm_t comm
cdef int world_rank
cdef int world_size
global_size = wholememory_get_total_size(handle.wholememory_handle)
if global_size % elt_size != 0:
raise ValueError('global_size=%d not multiple of elt_size=%d' % (global_size, elt_size))
global_elt_count = global_size // elt_size
if view_type == VtLocal:
check_wholememory_error_code(
wholememory_get_local_memory(&self.c_ptr, &map_size, &map_offset, handle.wholememory_handle))
if map_size % elt_size != 0 or map_offset % elt_size != 0:
raise ValueError('map_size=%d, map_offset=%d not multiple of elt_size=%d'
% (map_size, map_offset, elt_size))
local_elt_count = map_size // elt_size
local_start = map_offset // elt_size
self.shape[0] = map_size // elt_size
self.shape_int64_t[0] = map_size // elt_size
return local_elt_count, local_start
elif view_type == VtGlobal:
check_wholememory_error_code(wholememory_get_global_pointer(&self.c_ptr, handle.wholememory_handle))
self.shape[0] = global_size // elt_size
self.shape_int64_t[0] = global_size // elt_size
global_elt_count
return global_elt_count, 0
elif view_type == VtRemote:
check_wholememory_error_code(wholememory_get_communicator(&comm, handle.wholememory_handle))
check_wholememory_error_code(wholememory_communicator_get_rank(&world_rank, comm))
check_wholememory_error_code(wholememory_communicator_get_size(&world_size, comm))
if target_rank < 0 or target_rank >= world_size:
raise IndexError('target_rank=%d but world_size=%d' % (target_rank, int(world_size)))
check_wholememory_error_code(wholememory_get_rank_memory(
&self.c_ptr, &map_size, &map_offset, target_rank, handle.wholememory_handle))
if map_size % elt_size != 0 or map_offset % elt_size != 0:
raise ValueError('target_rank=%d map_size=%d, map_offset=%d not multiple of elt_size=%d'
% (target_rank, map_size, map_offset, elt_size))
target_elt_count = map_size // elt_size
target_start = map_offset // elt_size
self.shape[0] = map_size // elt_size
self.shape_int64_t[0] = map_size // elt_size
return target_elt_count, target_start
else:
raise ValueError('view type should be VtLocal or VtGlobal or VtRemote')
def __len__(self):
return self.shape[0]
def __getbuffer__(self, Py_buffer *buffer, int flags):
buffer.buf = self.c_ptr
buffer.format = 'c'
buffer.internal = NULL
buffer.itemsize = self.itemsize
buffer.len = self.shape[0]
buffer.ndim = 1
buffer.obj = self
buffer.readonly = 0
buffer.shape = self.shape
buffer.strides = self.strides
buffer.suboffsets = NULL
def __releasebuffer__(self, Py_buffer *buffer):
buffer.buf = NULL
buffer.format = 'c'
buffer.len = 0
buffer.ndim = 0
buffer.obj = None
buffer.shape = NULL
buffer.strides = NULL
@property
def ptr(self):
return int(<uintptr_t> self.c_ptr)
@property
def __cuda_array_interface__(self):
"""See
https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.interface.html#__array_interface__
and
https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html
"""
cdef dict intf = {
"data": (self.ptr, False),
"shape": (self.shape[0],),
"strides": None,
"typestr": self.typestr,
"version": 2
}
return intf
def __dlpack__(self, stream=None):
cdef DLManagedTensor * dlm_tensor = \
<DLManagedTensor *> stdlib.malloc(sizeof(DLManagedTensor))
cdef DLTensor * dl_tensor = &dlm_tensor.dl_tensor
dl_tensor.data = self.c_ptr
dl_tensor.ndim = 1
dl_tensor.shape = &self.shape_int64_t[0]
dl_tensor.strides = &self.strides_int64_t[0]
dl_tensor.byte_offset = 0
dl_tensor.device.device_type, dl_tensor.device.device_id = self.__dlpack_device__()
cdef DLDataType * dtype = &dl_tensor.dtype
if self.data_type == DtInt or self.data_type == DtInt64 \
or self.data_type == DtInt16 or self.data_type == DtInt8:
dtype.code = <uint8_t> kDLInt
elif self.data_type == DtFloat or self.data_type == DtDouble \
or self.data_type == DtHalf:
dtype.code = <uint8_t> kDLFloat
elif self.data_type == DtHalf:
dtype.code = <uint8_t> kDLBfloat
else:
raise ValueError('Invalid data_type')
dtype.lanes = <uint16_t> 1
dtype.bits = <uint8_t> (self.itemsize * 8)
dlm_tensor.manager_ctx = <void *> self
cpython.Py_INCREF(self)
dlm_tensor.deleter = deleter
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', <cpython.PyCapsule_Destructor> &pycapsule_deleter)
def __dlpack_device__(self):
if self.device_type == MlHost:
return (kDLCPU, 0)
elif self.device_type == MlDevice:
return (kDLCUDA, self.device_id)
else:
raise ValueError('self.device_type=%d' % (int(self.device_type),))
cdef class PyWholeMemoryComm:
cdef wholememory_comm_t comm_id
def __cinit__(self):
self.comm_id = NULL
def get_c_handle(self):
return <int64_t> self.comm_id
def support_type_location(self,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location):
cdef WholeMemoryErrorCode err_code = int(
wholememory_communicator_support_type_location(self.comm_id, int(memory_type), int(memory_location)))
return err_code == Success
def get_rank(self):
cdef int world_rank = -1
check_wholememory_error_code(wholememory_communicator_get_rank(&world_rank, self.comm_id))
return world_rank
def get_size(self):
cdef int world_size = -1
check_wholememory_error_code(wholememory_communicator_get_size(&world_size, self.comm_id))
return world_size
def barrier(self):
check_wholememory_error_code(wholememory_communicator_barrier(self.comm_id))
cdef class PyWholeMemoryHandle:
cdef wholememory_handle_t wholememory_handle
def __cinit__(self):
self.wholememory_handle = NULL
def get_c_handle(self):
return <int64_t> self.wholememory_handle
def get_communicator(self):
py_comm = PyWholeMemoryComm()
check_wholememory_error_code(wholememory_get_communicator(&py_comm.comm_id, self.wholememory_handle))
return py_comm
def get_memory_type(self):
return WholeMemoryMemoryType(wholememory_get_memory_type(self.wholememory_handle))
def get_memory_location(self):
return WholeMemoryMemoryLocation(wholememory_get_memory_location(self.wholememory_handle))
def get_partition_plan(self):
cdef size_t size_per_rank
check_wholememory_error_code(wholememory_get_partition_plan(&size_per_rank, self.wholememory_handle))
return size_per_rank
def get_global_flatten_tensor(self,
object import_dlpack_fn,
WholeMemoryDataType data_type,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
tb = PyWholeMemoryFlattenDlpack()
tb.set_view_device(view_from_device, view_from_device_id)
tsize, toffset = tb.get_view(self, data_type, VtGlobal, 0)
assert toffset == 0
return import_dlpack_fn(tb), toffset
def get_local_flatten_tensor(self,
object import_dlpack_fn,
WholeMemoryDataType data_type,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
tb = PyWholeMemoryFlattenDlpack()
tb.set_view_device(view_from_device, view_from_device_id)
tsize, toffset = tb.get_view(self, data_type, VtLocal, 0)
return import_dlpack_fn(tb), toffset
def get_all_chunked_flatten_tensor(self,
object import_dlpack_fn,
WholeMemoryDataType data_type,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
cdef Whole
cdef int world_rank
cdef int world_size
cdef wholememory_comm_t comm
check_wholememory_error_code(wholememory_get_communicator(&comm, self.wholememory_handle))
check_wholememory_error_code(wholememory_communicator_get_rank(&world_rank, comm))
check_wholememory_error_code(wholememory_communicator_get_size(&world_size, comm))
chunked_tensors = []
toffsets = []
for r in range(world_size):
tb = PyWholeMemoryFlattenDlpack()
tb.set_view_device(view_from_device, view_from_device_id)
tsize, toffset = tb.get_view(self, data_type, VtRemote, r)
chunked_tensors.append(import_dlpack_fn(tb))
toffsets.append(toffset)
return chunked_tensors, toffsets
def from_filelist(self,
int64_t memory_offset,
int64_t memory_entry_size,
int64_t file_entry_size,
file_list):
load_wholememory_handle_from_filelist(<int64_t> self.wholememory_handle,
memory_offset,
memory_entry_size,
file_entry_size,
file_list)
def to_file(self,
int64_t memory_offset,
int64_t memory_entry_size,
int64_t file_entry_size,
file_name):
store_wholememory_handle_to_file(<int64_t> self.wholememory_handle,
memory_offset,
memory_entry_size,
file_entry_size,
file_name)
cdef class PyWholeMemoryTensorDescription:
cdef wholememory_tensor_description_t tensor_description
def __cinit__(self):
self.tensor_description.dim = 0
self.tensor_description.dtype = int(0)
self.tensor_description.storage_offset = 0
cdef set_by_tensor_desc(self, wholememory_tensor_description_t * td):
self.tensor_description = td[0]
def set_dtype(self, WholeMemoryDataType dtype):
self.tensor_description.dtype = int(dtype)
def set_shape(self, shape):
assert 0 < len(shape) < 8
dim = len(shape)
self.tensor_description.dim = dim
for i in range(dim):
self.tensor_description.sizes[i] = shape[i]
def set_stride(self, strides):
assert len(strides) == self.tensor_description.dim
for i in range(self.tensor_description.dim):
self.tensor_description.strides[i] = strides[i]
def set_storage_offset(self, storage_offset):
self.tensor_description.storage_offset = storage_offset
@property
def dtype(self):
return WholeMemoryDataType(self.tensor_description.dtype)
def dim(self):
return self.tensor_description.dim
@property
def shape(self):
ret_shape = tuple([self.tensor_description.sizes[i] for i in range(self.tensor_description.dim)])
return ret_shape
def stride(self):
return tuple([self.tensor_description.strides[i] for i in range(self.dim())])
def storage_offset(self):
return self.tensor_description.storage_offset
cdef class WrappedLocalTensor:
cdef wholememory_tensor_t wm_tensor
def __cinit__(self):
self.wm_tensor = NULL
def __dealloc__(self):
if self.wm_tensor:
check_wholememory_error_code(wholememory_destroy_tensor(self.wm_tensor))
self.wm_tensor = NULL
def wrap_tensor(self,
PyWholeMemoryTensorDescription py_desc,
int64_t data_ptr):
check_wholememory_error_code(wholememory_make_tensor_from_pointer(&self.wm_tensor,
<void *> data_ptr,
&py_desc.tensor_description))
return self
def get_c_handle(self) -> int:
if self.wm_tensor:
return <int64_t> self.wm_tensor
else:
return 0
cdef class PyWholeMemoryTensor:
cdef wholememory_tensor_t wholememory_tensor
cdef wholememory_tensor_description_t tensor_description
def __cinit__(self):
self.wholememory_tensor = NULL
cdef from_c_handle(self,
wholememory_tensor_t wm_tensor):
self.wholememory_tensor = wm_tensor
self.tensor_description = wholememory_tensor_get_tensor_description(wm_tensor)[0]
def get_c_handle(self):
return <int64_t> self.wholememory_tensor
def get_wholememory_handle(self):
handle = PyWholeMemoryHandle()
handle.wholememory_handle = wholememory_tensor_get_memory_handle(self.wholememory_tensor)
return handle
@property
def dtype(self):
return WholeMemoryDataType(self.tensor_description.dtype)
def dim(self):
return self.tensor_description.dim
@property
def shape(self):
if self.dim() == 1:
return (self.tensor_description.sizes[0],)
elif self.dim() == 2:
return (self.tensor_description.sizes[0], self.tensor_description.sizes[1])
else:
raise ValueError('self.dim()=%d' % (self.dim(),))
def stride(self):
if self.dim() == 1:
return (self.tensor_description.strides[0],)
elif self.dim() == 2:
return (self.tensor_description.strides[0], self.tensor_description.strides[1])
else:
raise ValueError('self.dim()=%d' % (self.dim(),))
def storage_offset(self):
return self.tensor_description.storage_offset
def get_partition_plan(self):
mem_size_per_rank = self.get_wholememory_handle().get_partition_plan()
element_size = wholememory_dtype_get_element_size(self.tensor_description.dtype)
vector_size = element_size * self.stride()[0]
assert mem_size_per_rank % vector_size == 0
return mem_size_per_rank // vector_size
def get_sub_tensor(self, starts, ends):
cdef int64_t start_array[2]
cdef int64_t end_array[2]
start_array[0] = starts[0]
end_array[0] = ends[0]
if self.dim() == 1:
pass
elif self.dim() == 2:
start_array[1] = starts[1]
end_array[1] = ends[1]
else:
raise ValueError('self.dim()=%d' % (self.dim(),))
sub_tensor = PyWholeMemoryTensor()
check_wholememory_error_code(
wholememory_tensor_get_subtensor(self.wholememory_tensor, start_array, end_array,
&sub_tensor.wholememory_tensor))
sub_tensor.from_c_handle(sub_tensor.wholememory_tensor)
return sub_tensor
def get_tensor_in_window(self,
flatten_tensor,
int64_t storage_window_offset):
if self.tensor_description.dim == 1:
start_indice = max(0, self.tensor_description.storage_offset - storage_window_offset)
end_indice = min(flatten_tensor.shape[0],
self.tensor_description.storage_offset + self.tensor_description.sizes[
0] - storage_window_offset)
return flatten_tensor[start_indice: end_indice], max(0,
storage_window_offset - self.tensor_description.storage_offset)
elif self.tensor_description.dim == 2:
embedding_stride = self.tensor_description.strides[0]
storage_offset0 = self.tensor_description.storage_offset // embedding_stride
storage_offset1 = self.tensor_description.storage_offset % embedding_stride
mat_tensor = flatten_tensor.reshape(-1, embedding_stride)
assert storage_window_offset % self.tensor_description.strides[0] == 0
vector_start_offset = storage_window_offset // self.tensor_description.strides[0]
start_indice0 = max(0, storage_offset0 - vector_start_offset)
end_indice0 = min(mat_tensor.shape[0],
storage_offset0 + self.tensor_description.sizes[0] - vector_start_offset)
start_indice_1 = storage_offset1
assert mat_tensor.shape[1] >= storage_offset1 + self.tensor_description.sizes[1]
end_indice_1 = storage_offset1 + self.tensor_description.sizes[1]
return mat_tensor[start_indice0:end_indice0, start_indice_1:end_indice_1], max(0,
vector_start_offset - storage_offset0)
else:
raise ValueError('tensor dim should be 1 or 2')
def get_local_tensor(self,
object import_dlpack_fn,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
flatten_tensor, element_offset = self.get_wholememory_handle().get_local_flatten_tensor(import_dlpack_fn,
self.tensor_description.dtype,
view_from_device,
view_from_device_id)
return self.get_tensor_in_window(flatten_tensor, element_offset)
def get_global_tensor(self,
object import_dlpack_fn,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
global_flatten_tensor, _ = self.get_wholememory_handle().get_global_flatten_tensor(import_dlpack_fn,
self.tensor_description.dtype,
view_from_device,
view_from_device_id)
return self.get_tensor_in_window(global_flatten_tensor, 0)[0]
def get_all_chunked_tensor(self,
object import_dlpack_fn,
WholeMemoryMemoryLocation view_from_device,
int view_from_device_id):
chunked_flatten_tensors, element_offsets = self.get_wholememory_handle().get_all_chunked_flatten_tensor(
import_dlpack_fn,
self.tensor_description.dtype,
view_from_device,
view_from_device_id)
chunked_tensors = []
for i in range(len(chunked_flatten_tensors)):
chunked_tensors.append(self.get_tensor_in_window(chunked_flatten_tensors[i], element_offsets[i])[0])
return chunked_tensors
def from_filelist(self, filelist):
handle = self.get_wholememory_handle()
strides = self.stride()
shape = self.shape
cdef size_t elt_size = wholememory_dtype_get_element_size(self.tensor_description.dtype)
cdef size_t memory_offset
cdef size_t memory_entry_size
cdef size_t file_entry_size
memory_offset = self.storage_offset() * elt_size
memory_entry_size = elt_size * strides[0]
if self.dim() == 1:
file_entry_size = elt_size
elif self.dim() == 2:
file_entry_size = elt_size * shape[1]
else:
raise ValueError('tensor dim should be 1 or 2')
handle.from_filelist(memory_offset, memory_entry_size, file_entry_size, filelist)
def to_file(self, filename):
handle = self.get_wholememory_handle()
strides = self.stride()
shape = self.shape
cdef size_t elt_size = wholememory_dtype_get_element_size(self.tensor_description.dtype)
cdef size_t memory_offset
cdef size_t memory_entry_size
cdef size_t file_entry_size
memory_offset = self.storage_offset() * elt_size
memory_entry_size = elt_size * strides[0]
if self.dim() == 1:
file_entry_size = elt_size
elif self.dim() == 2:
file_entry_size = elt_size * shape[1]
else:
raise ValueError('tensor dim should be 1 or 2')
handle.to_file(memory_offset, memory_entry_size, file_entry_size, filename)
###############################################################################
def create_communicator(PyWholeMemoryUniqueID py_uid, int world_rank, int world_size):
py_comm = PyWholeMemoryComm()
check_wholememory_error_code(wholememory_create_communicator(&py_comm.comm_id,
py_uid.wholememory_unique_id,
world_rank,
world_size))
return py_comm
def destroy_communicator(PyWholeMemoryComm py_comm):
check_wholememory_error_code(wholememory_destroy_communicator(py_comm.comm_id))
def determine_partition_plan(int64_t entry_count,
int world_size):
cdef size_t per_rank_count
check_wholememory_error_code(wholememory_determine_entry_partition_plan(&per_rank_count,
entry_count,
world_size))
return per_rank_count
def malloc(cython.size_t total_size,
PyWholeMemoryComm py_comm,
WholeMemoryMemoryType memory_type,
WholeMemoryMemoryLocation memory_location,
cython.size_t data_granularity):
handle = PyWholeMemoryHandle()
check_wholememory_error_code(wholememory_malloc(&handle.wholememory_handle, total_size, py_comm.comm_id,
int(memory_type), int(memory_location),
data_granularity))
return handle
def free(PyWholeMemoryHandle handle):
check_wholememory_error_code(wholememory_free(handle.wholememory_handle))
def create_wholememory_array(WholeMemoryDataType dtype,
int64_t size,
PyWholeMemoryComm comm,
WholeMemoryMemoryType mem_type,
WholeMemoryMemoryLocation mem_location):
wholememory_tensor = PyWholeMemoryTensor()
wholememory_tensor.tensor_description.dtype = int(dtype)
wholememory_tensor.tensor_description.storage_offset = 0
wholememory_tensor.tensor_description.dim = 1
wholememory_tensor.tensor_description.strides[0] = 1
wholememory_tensor.tensor_description.sizes[0] = size
check_wholememory_error_code(wholememory_create_tensor(&wholememory_tensor.wholememory_tensor,
&wholememory_tensor.tensor_description,
comm.comm_id,
int(mem_type),
int(mem_location)))
return wholememory_tensor
def create_wholememory_matrix(WholeMemoryDataType dtype,
int64_t row,
int64_t column,
int64_t stride,
PyWholeMemoryComm comm,
WholeMemoryMemoryType mem_type,
WholeMemoryMemoryLocation mem_location):
wholememory_tensor = PyWholeMemoryTensor()
wholememory_tensor.tensor_description.dtype = int(dtype)
wholememory_tensor.tensor_description.storage_offset = 0
wholememory_tensor.tensor_description.dim = 2
if stride == -1:
stride = column
wholememory_tensor.tensor_description.strides[0] = stride
wholememory_tensor.tensor_description.strides[1] = 1
wholememory_tensor.tensor_description.sizes[0] = row
wholememory_tensor.tensor_description.sizes[1] = column
check_wholememory_error_code(wholememory_create_tensor(&wholememory_tensor.wholememory_tensor,
&wholememory_tensor.tensor_description,
comm.comm_id,
int(mem_type),
int(mem_location)))
return wholememory_tensor
def create_wholememory_tensor(PyWholeMemoryTensorDescription tensor_description,
PyWholeMemoryComm comm,
WholeMemoryMemoryType mem_type,
WholeMemoryMemoryLocation mem_location):
if tensor_description.dim() != 1 and tensor_description.dim() != 2:
raise NotImplementedError('WholeMemory currently only support 1D or 2D tensor')
if tensor_description.stride()[tensor_description.dim() - 1] != 1:
raise ValueError('last stride should be 1')
if tensor_description.storage_offset() != 0:
raise ValueError('storage_offset be 0 when created')
wholememory_tensor = PyWholeMemoryTensor()
wholememory_tensor.tensor_description = tensor_description.tensor_description
check_wholememory_error_code(wholememory_create_tensor(&wholememory_tensor.wholememory_tensor,
&wholememory_tensor.tensor_description,
comm.comm_id,
int(mem_type),
int(mem_location)))
return wholememory_tensor
def make_tensor_as_wholememory(PyWholeMemoryTensorDescription tensor_description,
int64_t data_ptr):
if tensor_description.stride()[tensor_description.dim() - 1] != 1:
raise ValueError('last stride should be 1')
wholememory_tensor = PyWholeMemoryTensor()
check_wholememory_error_code(wholememory_make_tensor_from_pointer(&wholememory_tensor.wholememory_tensor,
<void *> data_ptr,
&tensor_description.tensor_description))
wholememory_tensor.from_c_handle(wholememory_tensor.wholememory_tensor)
return wholememory_tensor
def make_handle_as_wholememory(PyWholeMemoryTensorDescription tensor_description,
PyWholeMemoryHandle handle):
if tensor_description.stride()[tensor_description.dim() - 1] != 1:
raise ValueError('last stride should be 1')
wholememory_tensor = PyWholeMemoryTensor()
check_wholememory_error_code(wholememory_make_tensor_from_handle(&wholememory_tensor.wholememory_tensor,
handle.wholememory_handle,
&tensor_description.tensor_description))
wholememory_tensor.from_c_handle(wholememory_tensor.wholememory_tensor)
return wholememory_tensor
def destroy_wholememory_tensor(PyWholeMemoryTensor wholememory_tensor):
check_wholememory_error_code(wholememory_destroy_tensor(wholememory_tensor.wholememory_tensor))
def fork_get_gpu_count():
return fork_get_device_count()
cpdef load_wholememory_handle_from_filelist(int64_t wholememory_handle_int_ptr,
int64_t memory_offset,
int64_t memory_entry_size,
int64_t file_entry_size,
file_list):
cdef const char ** filenames
cdef int num_files = len(file_list)
cdef int i
filenames = <const char**> stdlib.malloc(num_files * sizeof(char *))
try:
for i in range(num_files):
filenames[i] = PyUnicode_AsUTF8(file_list[i])
check_wholememory_error_code(wholememory_load_from_file(
<wholememory_handle_t> <int64_t> wholememory_handle_int_ptr,
memory_offset,
memory_entry_size,
file_entry_size,
filenames,
num_files))
finally:
stdlib.free(filenames)
cpdef store_wholememory_handle_to_file(int64_t wholememory_handle_int_ptr,
int64_t memory_offset,
int64_t memory_entry_size,
int64_t file_entry_size,
file_name):
check_wholememory_error_code(wholememory_store_to_file(
<wholememory_handle_t> <int64_t> wholememory_handle_int_ptr,
memory_offset,
memory_entry_size,
file_entry_size,
PyUnicode_AsUTF8(file_name)))
cdef extern from "wholememory/wholememory_op.h":
cdef wholememory_error_code_t wholememory_gather(wholememory_tensor_t wholememory_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t output_tensor,
wholememory_env_func_t * p_env_fns,
void * stream)
cdef wholememory_error_code_t wholememory_scatter(wholememory_tensor_t input_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t wholememory_tensor,
wholememory_env_func_t * p_env_fns,
void * stream)
cdef wholememory_error_code_t wholememory_env_test_op(wholememory_tensor_t input_tensor,
wholememory_tensor_t output_fixed_tensor,
void *output_variable_device_tensor_handle,
void *output_variable_pinned_tensor_handle,
void *output_variable_host_tensor_handle,
int64_t output_variable_entry_count,
wholememory_env_func_t *p_env_fns,
void *stream)
cpdef void wholememory_gather_op(PyWholeMemoryTensor wholememory_tensor,
WrappedLocalTensor indices_tensor,
WrappedLocalTensor output_tensor,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(wholememory_gather(<wholememory_tensor_t> <int64_t> wholememory_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> indices_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> output_tensor.get_c_handle(),
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int))
cpdef void wholememory_scatter_op(WrappedLocalTensor input_tensor,
WrappedLocalTensor indices_tensor,
PyWholeMemoryTensor wholememory_tensor,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(wholememory_scatter(<wholememory_tensor_t> <int64_t> input_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> indices_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> wholememory_tensor.get_c_handle(),
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int))
cpdef void wholememory_env_test_cython_op(WrappedLocalTensor input,
WrappedLocalTensor output,
int64_t output_variable_device_tensor_handle,
int64_t output_variable_pinned_tensor_handle,
int64_t output_variable_host_tensor_handle,
int64_t output_variable_entry_count,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(wholememory_env_test_op(<wholememory_tensor_t> <int64_t> input.get_c_handle(),
<wholememory_tensor_t> <int64_t> output.get_c_handle(),
<void *> output_variable_device_tensor_handle,
<void *> output_variable_pinned_tensor_handle,
<void *> output_variable_host_tensor_handle,
output_variable_entry_count,
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int))
return
cdef extern from "wholememory/wholegraph_op.h":
cdef wholememory_error_code_t wholegraph_csr_unweighted_sample_without_replacement(
wholememory_tensor_t wm_csr_row_ptr_tensor,
wholememory_tensor_t wm_csr_col_ptr_tensor,
wholememory_tensor_t center_nodes_tensor,
int max_sample_count,
wholememory_tensor_t output_sample_offset_tensor,
void * output_dest_memory_context,
void * output_center_localid_memory_context,
void * output_edge_gid_memory_context,
unsigned long long random_seed,
wholememory_env_func_t * p_env_fns,
void * stream)
cdef wholememory_error_code_t wholegraph_csr_weighted_sample_without_replacement(
wholememory_tensor_t wm_csr_row_ptr_tensor,
wholememory_tensor_t wm_csr_col_ptr_tensor,
wholememory_tensor_t wm_csr_weight_ptr_tensor,
wholememory_tensor_t center_nodes_tensor,
int max_sample_count,
wholememory_tensor_t output_sample_offset_tensor,
void * output_dest_memory_context,
void * output_center_localid_memory_context,
void * output_edge_gid_memory_context,
unsigned long long random_seed,
wholememory_env_func_t * p_env_fns,
void * stream)
cdef wholememory_error_code_t generate_random_positive_int_cpu(
int64_t random_seed,
int64_t subsequence,
wholememory_tensor_t output)
cdef wholememory_error_code_t generate_exponential_distribution_negative_float_cpu(
int64_t random_seed,
int64_t subsequence,
wholememory_tensor_t output)
cpdef void csr_unweighted_sample_without_replacement(
PyWholeMemoryTensor wm_csr_row_ptr_tensor,
PyWholeMemoryTensor wm_csr_col_ptr_tensor,
WrappedLocalTensor center_nodes_tensor,
int max_sample_count,
WrappedLocalTensor output_sample_offset_tensor,
int64_t output_dest_memory_handle,
int64_t output_center_localid_memory_handle,
int64_t output_edge_gid_memory_handle,
unsigned long long random_seed,
int64_t p_env_fns_int,
int64_t stream_int
):
check_wholememory_error_code(wholegraph_csr_unweighted_sample_without_replacement(
<wholememory_tensor_t> <int64_t> wm_csr_row_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> wm_csr_col_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> center_nodes_tensor.get_c_handle(),
max_sample_count,
<wholememory_tensor_t> <int64_t> output_sample_offset_tensor.get_c_handle(),
<void *> output_dest_memory_handle,
<void *> output_center_localid_memory_handle,
<void *> output_edge_gid_memory_handle,
random_seed,
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int))
cpdef void csr_weighted_sample_without_replacement(
PyWholeMemoryTensor wm_csr_row_ptr_tensor,
PyWholeMemoryTensor wm_csr_col_ptr_tensor,
PyWholeMemoryTensor wm_csr_weight_ptr_tensor,
WrappedLocalTensor center_nodes_tensor,
int max_sample_count,
WrappedLocalTensor output_sample_offset_tensor,
int64_t output_dest_memory_handle,
int64_t output_center_localid_memory_handle,
int64_t output_edge_gid_memory_handle,
unsigned long long random_seed,
int64_t p_env_fns_int,
int64_t stream_int
):
check_wholememory_error_code(wholegraph_csr_weighted_sample_without_replacement(
<wholememory_tensor_t> <int64_t> wm_csr_row_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> wm_csr_col_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> wm_csr_weight_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> center_nodes_tensor.get_c_handle(),
max_sample_count,
<wholememory_tensor_t> <int64_t> output_sample_offset_tensor.get_c_handle(),
<void *> output_dest_memory_handle,
<void *> output_center_localid_memory_handle,
<void *> output_edge_gid_memory_handle,
random_seed,
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int))
cpdef void host_generate_random_positive_int(
int64_t random_seed,
int64_t subsequence,
WrappedLocalTensor output
):
check_wholememory_error_code(generate_random_positive_int_cpu(
random_seed,
subsequence,
<wholememory_tensor_t> <int64_t> output.get_c_handle()
))
cpdef void host_generate_exponential_distribution_negative_float(
int64_t random_seed,
int64_t subsequence,
WrappedLocalTensor output
):
check_wholememory_error_code(generate_exponential_distribution_negative_float_cpu(
random_seed,
subsequence,
<wholememory_tensor_t> <int64_t> output.get_c_handle()
))
cdef extern from "wholememory/graph_op.h":
cdef wholememory_error_code_t graph_append_unique(wholememory_tensor_t target_nodes_tensor,
wholememory_tensor_t neighbor_nodes_tensor,
void * output_unique_node_memory_context,
wholememory_tensor_t output_neighbor_raw_to_unique_mapping_tensor,
wholememory_env_func_t * p_env_fns,
void * stream)
cdef wholememory_error_code_t csr_add_self_loop(wholememory_tensor_t csr_row_ptr_tensor,
wholememory_tensor_t csr_col_ptr_tensor,
wholememory_tensor_t output_csr_row_ptr_tensor,
wholememory_tensor_t output_csr_col_ptr_tensor,
void * stream)
cpdef void append_unique(
WrappedLocalTensor target_node_tensor,
WrappedLocalTensor neighbor_node_tensor,
int64_t output_unique_node_memory_handle,
WrappedLocalTensor output_neighbor_raw_to_unique_mapping_tensor,
int64_t p_env_fns_int,
int64_t stream_int):
check_wholememory_error_code(graph_append_unique(
<wholememory_tensor_t> <int64_t> target_node_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> neighbor_node_tensor.get_c_handle(),
<void *> output_unique_node_memory_handle,
<wholememory_tensor_t> <int64_t> output_neighbor_raw_to_unique_mapping_tensor.get_c_handle(),
<wholememory_env_func_t *> p_env_fns_int,
<void *> stream_int
))
cpdef void add_csr_self_loop(
WrappedLocalTensor csr_row_ptr_tensor,
WrappedLocalTensor csr_col_ptr_tensor,
WrappedLocalTensor csr_row_ptr_self_tensor,
WrappedLocalTensor csr_col_ptr_self_tensor,
int64_t stream_int):
check_wholememory_error_code(csr_add_self_loop(
<wholememory_tensor_t> <int64_t> csr_row_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> csr_col_ptr_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> csr_row_ptr_self_tensor.get_c_handle(),
<wholememory_tensor_t> <int64_t> csr_col_ptr_self_tensor.get_c_handle(),
<void *> stream_int))
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/embedding.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibwholegraph.binding.wholememory_binding as wmb
import torch
from .utils import torch_dtype_to_wholememory_dtype, get_file_size
from .utils import str_to_wmb_wholememory_location, str_to_wmb_wholememory_memory_type
from .utils import (
str_to_wmb_wholememory_optimizer_type,
str_to_wmb_wholememory_access_type,
)
from typing import Union, List
from .comm import WholeMemoryCommunicator
from .comm import (
get_global_communicator,
get_local_node_communicator,
get_local_device_communicator,
)
from .tensor import WholeMemoryTensor
from .wholegraph_env import wrap_torch_tensor, get_wholegraph_env_fns, get_stream
class WholeMemoryOptimizer(object):
"""
Sparse Optimizer for WholeMemoryEmbedding.
Many WholeMemoryEmbedding can share same WholeMemoryOptimizer
You should not create WholeMemoryOptimizer object directly, but use :func:`create_wholememory_optimizer` instead.
"""
def __init__(self, global_comm: WholeMemoryCommunicator):
super().__init__()
self.wmb_opt = wmb.WholeMemoryOptimizer()
self.embeddings = []
self.global_comm = global_comm
def add_embedding(self, wm_embedding):
"""Add WholeMemory Embedding to this optimizer
NOTE: you don't need to call this method, it is automatic called when WholeMemory Embedding is created.
:param wm_embedding: WholeMemory Embedding that use this optimizer
:return: None
"""
self.embeddings.append(wm_embedding)
def step(self, lr: float):
r"""Apply gradients to all WholeMemory Embedding that use this optimizer.
:param lr: learing rate.
"""
for wm_embedding in self.embeddings:
if wm_embedding.need_apply:
wm_embedding.apply_gradients(lr)
self.global_comm.barrier()
def create_wholememory_optimizer(optimizer_type: str, param_dict: dict):
"""
Create WholeMemoryOptimizer.
:param optimizer_type: Type of the Optimizer
:param param_dict: parameters of the optimizer
:return: WholeMemoryOptimizer
"""
wm_optimizer = WholeMemoryOptimizer(get_global_communicator())
wm_optimizer.wmb_opt.create_optimizer(
str_to_wmb_wholememory_optimizer_type(optimizer_type), param_dict
)
return wm_optimizer
def destroy_wholememory_optimizer(optimizer: WholeMemoryOptimizer):
"""
Destroy WholeMemoryOptimizer
:param optimizer: WholeMemoryOptimizer to destroy
:return: None
"""
optimizer.wmb_opt.destroy_optimizer()
optimizer.wmb_opt = None
class WholeMemoryCachePolicy(object):
"""
Cache policy to create WholeMemoryEmbedding.
NOTE: You should not create WholeMemoryCachePolicy object directly,
use :func:`create_wholememory_cache_policy` instead.
"""
def __init__(self, wmb_cache_policy: wmb.WholeMemoryCachePolicy):
super().__init__()
self.wmb_cache_policy = wmb_cache_policy
def create_wholememory_cache_policy(
cache_comm: WholeMemoryCommunicator,
*,
memory_type: str = "chunked",
memory_location: str = "cuda",
access_type: str = "readonly",
ratio: float = 0.5,
):
"""
Create WholeMemoryCachePolicy
NOTE: in most cases, :func:`create_builtin_cache_policy` can support. This function is a more flexible interface
:param cache_comm: WholeMemory communicator of the cache
:param memory_type: WholeMemory type of cache
:param memory_location: WholeMemory location of cache
:param access_type: Access type needed
:param ratio: Ratio of cache
:return: WholeMemoryCachePolicy
"""
wmb_cache_policy = wmb.WholeMemoryCachePolicy()
wmb_cache_policy.create_policy(
cache_comm.wmb_comm,
str_to_wmb_wholememory_memory_type(memory_type),
str_to_wmb_wholememory_location(memory_location),
str_to_wmb_wholememory_access_type(access_type),
ratio,
)
return WholeMemoryCachePolicy(wmb_cache_policy)
def destroy_wholememory_cache_policy(cache_policy: WholeMemoryCachePolicy):
"""
Destroy WholeMemoryCachePolicy
:param cache_policy: WholeMemoryCachePolicy to destroy
:return: None
"""
wmb_cache_policy = cache_policy.wmb_cache_policy
wmb_cache_policy.destroy_policy()
cache_policy.wmb_cache_policy = None
def create_builtin_cache_policy(
builtin_cache_type: str,
embedding_memory_type: str,
embedding_memory_location: str,
access_type: str,
cache_ratio: float,
*,
cache_memory_type: str = "",
cache_memory_location: str = "",
):
r"""Create builtin cache policy
:param builtin_cache_type: supported types are none, local_device, local_node and all_devices
:param embedding_memory_type: WholeMemory type of raw embedding
:param embedding_memory_location: WholeMemory location of raw embedding
:param access_type: Access type needed
:param cache_ratio: Ratio of cache
:param cache_memory_type: WholeMemory type of cache
:param cache_memory_location: WholeMemory location of cache
:return: WholeMemoryCachePolicy or None
"""
if (
embedding_memory_type != "continuous"
and embedding_memory_type != "chunked"
and embedding_memory_type != "distributed"
):
raise ValueError(f"embedding_memory_type={embedding_memory_type} is not valid")
if embedding_memory_location != "cpu" and embedding_memory_location != "cuda":
raise ValueError(
f"embedding_memory_location={embedding_memory_location} is not valid"
)
if builtin_cache_type == "none":
return None
if (
cache_memory_location != ""
and cache_memory_location != "cpu"
and cache_memory_location != "cuda"
):
raise ValueError(
f"cache_memory_location is {cache_memory_location}, should be empty or cpu, cuda"
)
cache_memory_location = (
"cuda" if cache_memory_location == "" else cache_memory_location
)
if builtin_cache_type == "all_devices":
if embedding_memory_location == "cuda":
print(
"[WARNING] Seems you are using device cache for device memory, "
"this may consume more memory and have low performance than use none cache"
)
cache_memory_type = (
embedding_memory_type if cache_memory_type == "" else cache_memory_type
)
return create_wholememory_cache_policy(
get_global_communicator(),
memory_type=cache_memory_type,
memory_location=cache_memory_location,
access_type=access_type,
ratio=cache_ratio,
)
if builtin_cache_type == "local_node":
cache_memory_type = "chunked" if cache_memory_type == "" else cache_memory_type
return create_wholememory_cache_policy(
get_local_node_communicator(),
memory_type=cache_memory_type,
memory_location=cache_memory_location,
access_type=access_type,
ratio=cache_ratio,
)
if builtin_cache_type == "local_device":
cache_memory_type = "continuous"
return create_wholememory_cache_policy(
get_local_device_communicator(),
memory_type=cache_memory_type,
memory_location=cache_memory_location,
access_type=access_type,
ratio=cache_ratio,
)
raise ValueError(
f"builtin_cache_type={builtin_cache_type} not supported, "
f"should be none, local_device, local_node or all_devices"
)
class EmbeddingLookupFn(torch.autograd.Function):
@staticmethod
def forward(
ctx,
indice: torch.Tensor,
dummy_input: torch.Tensor,
wm_embedding,
is_training: bool = False,
force_dtype: Union[torch.dtype, None] = None,
):
output_tensor = wm_embedding.gather(
indice, is_training=is_training, force_dtype=force_dtype
)
if is_training and wm_embedding.need_grad():
ctx.save_for_backward(indice, output_tensor, dummy_input)
ctx.wm_embedding = wm_embedding
return output_tensor
@staticmethod
def backward(ctx, grad_outputs: torch.Tensor):
indice, output_tensor, dummy_input = ctx.saved_tensors
wm_embedding = ctx.wm_embedding
wm_embedding.add_gradients(indice, grad_outputs)
ctx.wm_embedding = None
return None, torch.zeros_like(dummy_input), None, None, None
class WholeMemoryEmbedding(object):
r"""WholeMemory Embedding"""
def __init__(
self,
wmb_embedding: wmb.PyWholeMemoryEmbedding,
wmb_optimizer: Union[WholeMemoryOptimizer, None],
wmb_cache_policy: Union[WholeMemoryCachePolicy, None],
):
super().__init__()
self.wmb_embedding = wmb_embedding
self.embedding_tensor = None
self.optimizer_states = None
self.wmb_optimizer = wmb_optimizer
self.wmb_cache_policy = wmb_cache_policy
self.adjust_cache = True if self.wmb_cache_policy is not None else False
dummy_input_need_grad = True if self.wmb_optimizer is not None else False
self.dummy_input = torch.nn.Parameter(
torch.zeros(1), requires_grad=dummy_input_need_grad
)
self.need_apply = False
self.sparse_indices = []
self.sparse_grads = []
def dim(self):
return self.get_embedding_tensor().dim()
@property
def shape(self):
return self.get_embedding_tensor().shape
def set_adjust_cache(self, adjust_cache: bool):
self.adjust_cache = adjust_cache if self.wmb_cache_policy is not None else False
def need_grad(self):
return self.wmb_embedding is not None
def gather(
self,
indice: torch.Tensor,
*,
is_training: bool = False,
force_dtype: Union[torch.dtype, None] = None,
):
assert indice.dim() == 1
embedding_dim = self.get_embedding_tensor().shape[1]
embedding_count = indice.shape[0]
current_cuda_device = "cuda:%d" % (torch.cuda.current_device(),)
output_dtype = (
force_dtype if force_dtype is not None else self.embedding_tensor.dtype
)
need_grad = self.need_grad() and is_training
output_tensor = torch.empty(
[embedding_count, embedding_dim],
device=current_cuda_device,
dtype=output_dtype,
requires_grad=need_grad,
)
if need_grad:
self.need_apply = True
wmb.EmbeddingGatherForward(
self.wmb_embedding,
wrap_torch_tensor(indice),
wrap_torch_tensor(output_tensor),
self.adjust_cache,
get_wholegraph_env_fns(),
get_stream(),
)
return output_tensor
def add_gradients(self, indice: torch.Tensor, grad_outputs: torch.Tensor):
# print(f'adding gradients sparse_indices={indice}, sparse_grads={grad_outputs}')
self.sparse_indices.append(indice)
self.sparse_grads.append(grad_outputs)
def apply_gradients(self, lr: float):
sparse_indices = torch.cat(self.sparse_indices)
sparse_grads = torch.cat(self.sparse_grads)
# print(f'applying gradients sparse_indices={sparse_indices}, sparse_grads={sparse_grads}')
wmb.EmbeddingGatherGradientApply(
self.wmb_embedding,
wrap_torch_tensor(sparse_indices),
wrap_torch_tensor(sparse_grads),
self.adjust_cache,
lr,
get_wholegraph_env_fns(),
get_stream(),
)
self.sparse_indices = []
self.sparse_grads = []
self.need_apply = []
def writeback_all_cache(self):
self.wmb_embedding.writeback_all_cache(get_stream(False))
def drop_all_cache(self):
self.wmb_embedding.drop_all_cache(get_stream(False))
def get_embedding_tensor(self):
if self.embedding_tensor is None:
self.embedding_tensor = WholeMemoryTensor(
self.wmb_embedding.get_embedding_tensor()
)
return self.embedding_tensor
def get_optimizer_state_names(self):
return self.wmb_embedding.get_optimizer_state_names()
def get_optimizer_state(self, state_name):
if state_name not in self.optimizer_states:
self.optimizer_states[state_name] = WholeMemoryTensor(
self.wmb_embedding.get_optimizer_state(state_name)
)
return self.optimizer_states[state_name]
def save(self, file_prefix: str):
self.get_embedding_tensor().to_file_prefix(file_prefix + "_embedding_tensor")
for state_name in self.get_optimizer_state_names():
state = self.get_optimizer_state(state_name)
state.to_file_prefix(file_prefix + "_" + state_name)
def load(
self,
file_prefix: str,
*,
ignore_embedding: bool = False,
part_count: Union[int, None] = None,
):
if ignore_embedding is False:
self.get_embedding_tensor().from_file_prefix(
file_prefix + "_embedding_tensor", part_count
)
for state_name in self.get_optimizer_state_names():
state = self.get_optimizer_state(state_name)
state.from_file_prefix(file_prefix + "_" + state_name, part_count)
def create_embedding(
comm: WholeMemoryCommunicator,
memory_type: str,
memory_location: str,
dtype: torch.dtype,
sizes: List[int],
*,
optimizer: Union[WholeMemoryOptimizer, None] = None,
cache_policy: Union[WholeMemoryCachePolicy, None] = None,
random_init: bool = False,
):
r"""
Create embedding
:param comm: WholeMemoryCommunicator
:param memory_type: WholeMemory type, should be continuous, chunked or distributed
:param memory_location: WholeMemory location, should be cpu or cuda
:param dtype: data type
:param sizes: size of the embedding, must be 2D
:param optimizer: optimizer
:param cache_policy: cache policy
:return: WholeMemoryEmbedding
"""
if optimizer is None:
wmb_optimizer = wmb.create_non_optimizer()
else:
wmb_optimizer = optimizer.wmb_opt
if cache_policy is None:
wmb_cache_policy = wmb.create_non_cache_policy()
else:
wmb_cache_policy = cache_policy.wmb_cache_policy
assert len(sizes) == 2
tensor_desc = wmb.PyWholeMemoryTensorDescription()
tensor_desc.set_dtype(torch_dtype_to_wholememory_dtype(dtype))
tensor_desc.set_shape(sizes)
tensor_desc.set_stride([sizes[1], 1])
wm_embedding = WholeMemoryEmbedding(
wmb.create_embedding(
tensor_desc,
comm.wmb_comm,
str_to_wmb_wholememory_memory_type(memory_type),
str_to_wmb_wholememory_location(memory_location),
wmb_optimizer,
wmb_cache_policy,
),
optimizer,
cache_policy,
)
if optimizer is not None:
optimizer.add_embedding(wm_embedding)
if random_init is True:
(
local_tensor,
local_offset,
) = wm_embedding.get_embedding_tensor().get_local_tensor()
torch.nn.init.xavier_uniform_(local_tensor)
comm.barrier()
return wm_embedding
def create_embedding_from_filelist(
comm: WholeMemoryCommunicator,
memory_type: str,
memory_location: str,
filelist: Union[List[str], str],
dtype: torch.dtype,
last_dim_size: int,
*,
optimizer: Union[WholeMemoryOptimizer, None] = None,
cache_policy: Union[WholeMemoryCachePolicy, None] = None,
):
r"""
Create embedding from file list
:param comm: WholeMemoryCommunicator
:param memory_type: WholeMemory type, should be continuous, chunked or distributed
:param memory_location: WholeMemory location, should be cpu or cuda
:param filelist: list of files
:param dtype: data type
:param last_dim_size: size of last dim
:param optimizer: optimizer
:param cache_policy: cache policy
:return:
"""
if isinstance(filelist, str):
filelist = [filelist]
assert last_dim_size > 0
element_size = torch.tensor([], dtype=dtype).element_size()
file_entry_size = element_size * last_dim_size
total_file_size = 0
for filename in filelist:
file_size = get_file_size(filename)
if file_size % file_entry_size != 0:
raise ValueError(
"File %s size is %d not mutlple of %d"
% (filename, file_size, file_entry_size)
)
total_file_size += file_size
total_entry_count = total_file_size // file_entry_size
wm_embedding = create_embedding(
comm,
memory_type,
memory_location,
dtype,
[total_entry_count, last_dim_size],
optimizer=optimizer,
cache_policy=cache_policy,
)
wm_embedding.get_embedding_tensor().from_filelist(filelist)
return wm_embedding
def destroy_embedding(wm_embedding: WholeMemoryEmbedding):
"""
Destroy WholeMemoryEmbedding
:param wm_embedding: WholeMemoryEmbedding to destroy
:return: None
"""
wm_embedding.wmb_embedding.destroy_embedding()
wm_embedding.wmb_embedding = None
class WholeMemoryEmbeddingModule(torch.nn.Module):
"""
torch.nn.Module wrapper of WholeMemoryEmbedding
"""
def __init__(self, wm_embedding: WholeMemoryEmbedding):
super().__init__()
self.wm_embedding = wm_embedding
self.embedding_gather_fn = EmbeddingLookupFn.apply
def forward(
self, indice: torch.Tensor, force_dtype: Union[torch.dtype, None] = None
):
return self.embedding_gather_fn(
indice,
self.wm_embedding.dummy_input,
self.wm_embedding,
self.training,
force_dtype,
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/graph_ops.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pylibwholegraph.binding.wholememory_binding as wmb
from .wholegraph_env import (
get_stream,
TorchMemoryContext,
get_wholegraph_env_fns,
wrap_torch_tensor,
)
def append_unique(
target_node_tensor: torch.Tensor,
neighbor_node_tensor: torch.Tensor,
need_neighbor_raw_to_unique: bool = False,
):
"""
Append neighbor_node_tenosr to target_node_tensor, keep target_node_tensor unchanged and do unique
e.g. if target_node_tensor is [3, 11, 2, 10], neighbor_node_tensor is [4, 5, 2, 11, 6, 9, 10, 5],
output_unique_node may be [3, 11, 2, 10, 6, 4, 9, 5], order of 6, 4, 9, 5 may change.
neighbor_raw_to_unique_mapping will be [5, 7, 2, 1, 4, 6, 3, 7]
:param target_node_tensor: target node tensor
:param neighbor_node_tensor: neighbor node tensor
:param need_neighbor_raw_to_unique: if need to output neighbor_raw_to_unique_mapping
:return: output_unique_node and neighbor_raw_to_unique_mapping
"""
assert target_node_tensor.dim() == 1
assert neighbor_node_tensor.dim() == 1
assert target_node_tensor.is_cuda
assert neighbor_node_tensor.is_cuda
output_unique_node_context = TorchMemoryContext()
output_unique_node_c_context = output_unique_node_context.get_c_context()
output_neighbor_raw_to_unique_mapping_tensor = None
if need_neighbor_raw_to_unique:
output_neighbor_raw_to_unique_mapping_tensor = torch.empty(
neighbor_node_tensor.shape[0], device="cuda", dtype=torch.int
)
wmb.append_unique(
wrap_torch_tensor(target_node_tensor),
wrap_torch_tensor(neighbor_node_tensor),
output_unique_node_c_context,
wrap_torch_tensor(output_neighbor_raw_to_unique_mapping_tensor),
get_wholegraph_env_fns(),
get_stream(),
)
if need_neighbor_raw_to_unique:
return (
output_unique_node_context.get_tensor(),
output_neighbor_raw_to_unique_mapping_tensor,
)
else:
return output_unique_node_context.get_tensor()
def add_csr_self_loop(
csr_row_ptr_tensor: torch.Tensor, csr_col_ptr_tensor: torch.Tensor
):
"""
Add self loop to sampled CSR graph
NOTE: this function will not check if there is already self loop in the raw CSR graph.
:param csr_row_ptr_tensor: CSR row pointer tensor
:param csr_col_ptr_tensor: CSR column index tensor
:return: CSR graph added self loop
"""
assert csr_row_ptr_tensor.dim() == 1
assert csr_col_ptr_tensor.dim() == 1
assert csr_row_ptr_tensor.is_cuda
assert csr_col_ptr_tensor.is_cuda
output_csr_row_ptr_tensor = torch.empty(
(csr_row_ptr_tensor.shape[0],), device="cuda", dtype=csr_row_ptr_tensor.dtype
)
output_csr_col_ptr_tensor = torch.empty(
(csr_col_ptr_tensor.shape[0] + csr_row_ptr_tensor.shape[0] - 1,),
device="cuda",
dtype=csr_col_ptr_tensor.dtype,
)
wmb.add_csr_self_loop(
wrap_torch_tensor(csr_row_ptr_tensor),
wrap_torch_tensor(csr_col_ptr_tensor),
wrap_torch_tensor(output_csr_row_ptr_tensor),
wrap_torch_tensor(output_csr_col_ptr_tensor),
get_stream(),
)
return output_csr_row_ptr_tensor, output_csr_col_ptr_tensor
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/wholegraph_env.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import importlib
import torch
import pylibwholegraph
import pylibwholegraph.binding.wholememory_binding as wmb
from typing import Union
from .utils import wholememory_dtype_to_torch_dtype, torch_dtype_to_wholememory_dtype
default_wholegraph_env_context = None
torch_cpp_ext_loaded = False
torch_cpp_ext_lib = None
def get_stream():
cuda_stream_int_ptr = None
cuda_stream = torch.cuda.current_stream()._as_parameter_
if cuda_stream.value is not None:
cuda_stream_int_ptr = cuda_stream.value
else:
cuda_stream_int_ptr = int(0)
return cuda_stream_int_ptr
class TorchEmptyGlobalContext(object):
def __init__(self):
pass
class TorchMemoryContext(object):
def __init__(self):
self.tensor = None
if torch_cpp_ext_loaded:
self.handle = torch_cpp_ext_lib.create_output_context()
else:
self.handle = 0
def __del__(self):
self.free()
def get_c_context(self):
if torch_cpp_ext_loaded:
return self.handle
else:
return id(self)
def set_tensor(self, t: torch.Tensor):
self.tensor = t
def get_handle(self):
return self.handle
def get_tensor(self):
if torch_cpp_ext_loaded:
self.tensor = torch_cpp_ext_lib.get_tensor_from_context(self.handle)
return self.tensor
else:
return self.tensor
def free(self):
self.tensor = None
if torch_cpp_ext_loaded and self.get_handle() != 0:
torch_cpp_ext_lib.destroy_output_context(self.get_handle())
self.handle = 0
def torch_create_memory_context_env_fn(
global_context: TorchEmptyGlobalContext,
) -> TorchMemoryContext:
t = TorchMemoryContext()
return t
def torch_destroy_memory_context_env_fn(
memory_context: TorchMemoryContext, global_context: TorchEmptyGlobalContext
):
memory_context.free()
def torch_malloc_env_fn(
tensor_desc: wmb.PyWholeMemoryTensorDescription,
malloc_type: wmb.PyMemoryAllocType,
memory_context: TorchMemoryContext,
global_context: TorchEmptyGlobalContext,
) -> int:
# print('already in torch_malloc_env_fn', file=sys.stderr)
pinned = False
device = None
# print('torch_malloc_env_fn before config, type=%d' % (malloc_type.get_type(), ), file=sys.stderr)
if malloc_type.get_type() == wmb.WholeMemoryMemoryAllocType.MatDevice:
device = torch.device("cuda")
elif malloc_type.get_type() == wmb.WholeMemoryMemoryAllocType.MatHost:
device = torch.device("cpu")
else:
assert malloc_type.get_type() == wmb.WholeMemoryMemoryAllocType.MatPinned
device = torch.device("cpu")
pinned = True
# print('torch_malloc_env_fn after config', file=sys.stderr)
shape = tensor_desc.shape
# print('torch_malloc_env_fn after shape', file=sys.stderr)
dtype = wholememory_dtype_to_torch_dtype(tensor_desc.dtype)
# print('torch_malloc_env_fn after dtype', file=sys.stderr)
t = torch.empty(shape, dtype=dtype, device=device, pin_memory=pinned)
memory_context.set_tensor(t)
# print('torch_malloc_env_fn done return=%ld' % (t.data_ptr(), ), file=sys.stderr)
return t.data_ptr()
def torch_free_env_fn(
memory_context: TorchMemoryContext, global_context: TorchEmptyGlobalContext
):
memory_context.free()
class ExtContextWrapper(object):
def __init__(self, env_func: int):
self.env_func = env_func
def get_env_fns(self) -> int:
return self.env_func
def create_current_env_context():
# print('in wholegraph_env.py create_current_env_context')
global torch_cpp_ext_loaded
global torch_cpp_ext_lib
if torch_cpp_ext_loaded:
return ExtContextWrapper(torch_cpp_ext_lib.get_wholegraph_env_fns())
context = wmb.GlobalContextWrapper()
global_context = TorchEmptyGlobalContext()
context.create_context(
torch_create_memory_context_env_fn,
torch_destroy_memory_context_env_fn,
torch_malloc_env_fn,
torch_free_env_fn,
global_context,
torch_malloc_env_fn,
torch_free_env_fn,
global_context,
)
return context
def get_wholegraph_env_fns(use_default=True) -> int:
global default_wholegraph_env_context
wholegraph_env_context = None
if default_wholegraph_env_context is None or not use_default:
wholegraph_env_context = create_current_env_context()
if use_default:
default_wholegraph_env_context = wholegraph_env_context
else:
wholegraph_env_context = default_wholegraph_env_context
return wholegraph_env_context.get_env_fns()
def wrap_torch_tensor(t: Union[torch.Tensor, None]) -> wmb.WrappedLocalTensor:
py_desc = wmb.PyWholeMemoryTensorDescription()
wm_t = wmb.WrappedLocalTensor()
if t is None:
return wm_t.wrap_tensor(py_desc, 0)
py_desc.set_dtype(torch_dtype_to_wholememory_dtype(t.dtype))
py_desc.set_storage_offset(0)
py_desc.set_shape(tuple(t.shape))
py_desc.set_stride(tuple(t.stride()))
return wm_t.wrap_tensor(py_desc, t.data_ptr())
def get_cpp_extension_src_path():
return os.path.dirname(pylibwholegraph.__file__)
def compile_cpp_extension():
import torch.utils.cpp_extension
global torch_cpp_ext_loaded
global torch_cpp_ext_lib
cpp_extension_path = os.path.join(get_cpp_extension_src_path(), "torch_cpp_ext")
extra_cflags = []
extra_ldflags = ["-lwholegraph"]
if "CONDA_PREFIX" in os.environ:
extra_cflags.append(
"".join(["-I", os.path.join(os.environ["CONDA_PREFIX"], "include")])
)
extra_ldflags.append(
"".join(["-L", os.path.join(os.environ["CONDA_PREFIX"], "lib")])
)
if "LIBWHOLEGRAPH_DIR" in os.environ:
extra_cflags.append(
"".join(["-I", os.path.join(os.environ["LIBWHOLEGRAPH_DIR"], "include")])
)
extra_ldflags.append(
"".join(["-L", os.path.join(os.environ["LIBWHOLEGRAPH_DIR"], "lib")])
)
torch.utils.cpp_extension.load(
name="pylibwholegraph.pylibwholegraph_torch_ext",
sources=[
os.path.join(cpp_extension_path, "wholegraph_torch_ext.cpp"),
os.path.join(cpp_extension_path, "torch_env_func_ptrs.cpp"),
os.path.join(cpp_extension_path, "torch_utils.cpp"),
],
extra_cflags=extra_cflags,
extra_ldflags=extra_ldflags,
with_cuda=True,
verbose=True,
)
torch_cpp_ext_lib = importlib.import_module(
"pylibwholegraph.pylibwholegraph_torch_ext"
)
torch_cpp_ext_loaded = True
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/graph_structure.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Union, List
from .tensor import WholeMemoryTensor
from . import graph_ops
from . import wholegraph_ops
class GraphStructure(object):
r"""Graph structure storage
Actually, it is the graph structure of one relation, represented in CSR format.
It contains CSR representation of Graph structure, and also attributes associated with nodes and edges.
"""
def __init__(self):
super().__init__()
self.node_count = 0
self.edge_count = 0
self.csr_row_ptr = None
self.csr_col_ind = None
self.node_attributes = {}
self.edge_attributes = {}
def set_csr_graph(
self, csr_row_ptr: WholeMemoryTensor, csr_col_ind: WholeMemoryTensor
):
"""
Set the CSR graph structure
:param csr_row_ptr: CSR graph row pointer
:param csr_col_ind: CSR graph column index
:return: None
"""
assert csr_row_ptr.dim() == 1
assert csr_row_ptr.dtype == torch.int64
assert csr_row_ptr.shape[0] > 1
self.node_count = csr_row_ptr.shape[0] - 1
self.edge_count = csr_col_ind.shape[0]
assert csr_col_ind.dim() == 1
assert csr_col_ind.dtype == torch.int32 or csr_col_ind.dtype == torch.int64
self.csr_row_ptr = csr_row_ptr
self.csr_col_ind = csr_col_ind
def set_node_attribute(self, attr_name: str, attr_tensor: WholeMemoryTensor):
"""
Set attribute for node
:param attr_name: attribute name for node
:param attr_tensor: attribute tensor
:return: None
"""
assert attr_name not in self.node_attributes
assert attr_tensor.shape[0] == self.node_count
self.node_attributes[attr_name] = attr_tensor
def set_edge_attribute(self, attr_name: str, attr_tensor: WholeMemoryTensor):
"""
Set attribute for edge
:param attr_name: attribute name for edge
:param attr_tensor: attribute tensor
:return: None
"""
assert attr_name not in self.edge_attributes
assert attr_tensor.shape[0] == self.edge_count
self.edge_attributes[attr_name] = attr_tensor
def unweighted_sample_without_replacement_one_hop(
self,
center_nodes_tensor: torch.Tensor,
max_sample_count: int,
*,
random_seed: Union[int, None] = None,
need_center_local_output: bool = False,
need_edge_output: bool = False
):
"""
Unweighted Sample without replacement on CSR graph structure
:param center_nodes_tensor: center node ids
:param max_sample_count: max sample count for each center node
:param random_seed: random seed for the sampler
:param need_center_local_output: If True, output a tensor same length as sampled nodes but each element is the
center node index in center_nodes_tensor.
:param need_edge_output: If True, output the edge index of each sampled node
:return: csr_row_ptr, sampled_nodes[, center_node_local_id, edge_index]
"""
return wholegraph_ops.unweighted_sample_without_replacement(
self.csr_row_ptr.wmb_tensor,
self.csr_col_ind.wmb_tensor,
center_nodes_tensor,
max_sample_count,
random_seed,
need_center_local_output,
need_edge_output,
)
def weighted_sample_without_replacement_one_hop(
self,
weight_name: str,
center_nodes_tensor: torch.Tensor,
max_sample_count: int,
*,
random_seed: Union[int, None] = None,
need_center_local_output: bool = False,
need_edge_output: bool = False
):
"""
Weighted Sample without replacement on CSR graph structure with edge weights attribute
:param weight_name: edge attribute name for weight
:param center_nodes_tensor: center node ids
:param max_sample_count: max sample count for each center node
:param random_seed: random seed for the sampler
:param need_center_local_output: If True, output a tensor same length as sampled nodes but each element is the
center node index in center_nodes_tensor.
:param need_edge_output: If True, output the edge index of each sampled node
:return: csr_row_ptr, sampled_nodes[, center_node_local_id, edge_index]
"""
assert weight_name in self.edge_attributes
weight_tensor = self.edge_attributes[weight_name]
return wholegraph_ops.weighted_sample_without_replacement(
self.csr_row_ptr.wmb_tensor,
self.csr_col_ind.wmb_tensor,
weight_tensor.wmb_tensor,
center_nodes_tensor,
max_sample_count,
random_seed,
need_center_local_output,
need_edge_output,
)
def multilayer_sample_without_replacement(
self,
node_ids: torch.Tensor,
max_neighbors: List[int],
weight_name: Union[str, None] = None,
):
"""
Multilayer sample without replacement
:param node_ids: initial node ids
:param max_neighbors: maximum neighbor for each layer
:param weight_name: edge attribute name for weight, if None, use unweighted sample
:return: target_gids, edge_indice, csr_row_ptr, csr_col_ind
"""
hops = len(max_neighbors)
edge_indice = [None] * hops
csr_row_ptr = [None] * hops
csr_col_ind = [None] * hops
target_gids = [None] * (hops + 1)
target_gids[hops] = node_ids
for i in range(hops - 1, -1, -1):
if weight_name is None:
(
neighbor_gids_offset,
neighbor_gids_vdata,
neighbor_src_lids,
) = self.unweighted_sample_without_replacement_one_hop(
target_gids[i + 1],
max_neighbors[hops - i - 1],
need_center_local_output=True,
)
else:
(
neighbor_gids_offset,
neighbor_gids_vdata,
neighbor_src_lids,
) = self.weighted_sample_without_replacement_one_hop(
weight_name,
target_gids[i + 1],
max_neighbors[hops - i - 1],
need_center_local_output=True,
)
(unique_gids, neighbor_raw_to_unique_mapping,) = graph_ops.append_unique(
target_gids[i + 1],
neighbor_gids_vdata,
need_neighbor_raw_to_unique=True,
)
csr_row_ptr[i] = neighbor_gids_offset
csr_col_ind[i] = neighbor_raw_to_unique_mapping
neighbor_count = neighbor_gids_vdata.size()[0]
edge_indice[i] = torch.cat(
[
torch.reshape(neighbor_raw_to_unique_mapping, (1, neighbor_count)),
torch.reshape(neighbor_src_lids, (1, neighbor_count)),
]
)
target_gids[i] = unique_gids
return target_gids, edge_indice, csr_row_ptr, csr_col_ind
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/distributed_launch.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
class DistributedConfig(object):
def __init__(self):
super(DistributedConfig, self).__init__()
self.rank = -1
self.world_size = -1
self.local_rank = -1
self.local_size = -1
self.master_addr = ""
self.master_port = -1
def get_rank(self):
return self.rank
def get_world_size(self):
return self.world_size
def get_local_rank(self):
return self.local_rank
def get_local_size(self):
return self.local_size
def get_master_addr(self):
return self.master_addr
def get_master_port(self):
return self.master_port
distributed_config = DistributedConfig()
def get_rank():
global distributed_config
return distributed_config.get_rank()
def get_world_size():
global distributed_config
return distributed_config.get_world_size()
def get_local_rank():
global distributed_config
return distributed_config.get_local_rank()
def get_master_addr():
global distributed_config
return distributed_config.get_master_addr()
def get_master_port():
global distributed_config
return distributed_config.get_master_port()
def get_local_size():
global distributed_config
return distributed_config.get_local_size()
def is_main_process():
return get_rank() == 0
def add_distributed_launch_options(parser: ArgumentParser):
parser.add_argument(
"--launch-agent",
dest="launch_agent",
default="mpi",
help="launch agent used, mpi, pytorch or spawn",
)
# command line flags
parser.add_argument(
"--rank", dest="rank", type=int, default=-1, help="command line flag for rank"
)
parser.add_argument(
"--world-size",
dest="world_size",
type=int,
default=-1,
help="command line flag for world_size",
)
parser.add_argument(
"--local-rank",
dest="local_rank",
type=int,
default=-1,
help="command line flag for local_rank",
)
parser.add_argument(
"--local-size",
dest="local_size",
type=int,
default=-1,
help="command line flag for local_size",
)
parser.add_argument(
"--master-addr",
dest="master_addr",
default="",
help="command line flag for master_addr",
)
parser.add_argument(
"--master-port",
dest="master_port",
type=int,
default=-1,
help="command line flag for master_port",
)
# environment variable names
parser.add_argument(
"--launch-env-name-world-rank",
dest="launch_env_name_world_rank",
default="RANK",
help="environment variable name for world rank",
)
parser.add_argument(
"--launch-env-name-world-size",
dest="launch_env_name_world_size",
default="WORLD_SIZE",
help="environment variable name for world size",
)
parser.add_argument(
"--launch-env-name-local-rank",
dest="launch_env_name_local_rank",
default="LOCAL_RANK",
help="environment variable name for local rank",
)
parser.add_argument(
"--launch-env-name-local-size",
dest="launch_env_name_local_size",
default="LOCAL_WORLD_SIZE",
help="environment variable name for local size",
)
parser.add_argument(
"--launch-env-name-master-addr",
dest="launch_env_name_master_addr",
default="MASTER_ADDR",
help="environment variable name for master_addr",
)
parser.add_argument(
"--launch-env-name-master-port",
dest="launch_env_name_master_port",
default="MASTER_PORT",
help="environment variable name for master_port",
)
return
def get_value_from_env(env_name, fill_default=None):
if env_name not in os.environ:
if fill_default is not None:
return fill_default
else:
raise ValueError(
"both command line flag and environment %s not exist." % (env_name,)
)
else:
return os.environ[env_name]
def get_value_from_option_and_env(
option_value, env_name, empty_value, fill_default=None
):
if option_value == empty_value:
return get_value_from_env(env_name, fill_default)
else:
return option_value
def distributed_launch_mpi(args, main_func):
from mpi4py import MPI
mpi_communicator = MPI.COMM_WORLD
shared_mpi_communicator = mpi_communicator.Split_type(MPI.COMM_TYPE_SHARED)
global distributed_config
distributed_config.rank = mpi_communicator.Get_rank()
distributed_config.world_size = mpi_communicator.Get_size()
distributed_config.local_rank = shared_mpi_communicator.Get_rank()
distributed_config.local_size = shared_mpi_communicator.Get_size()
distributed_config.master_addr = get_value_from_option_and_env(
args.master_addr, args.launch_env_name_master_addr, "", "localhost"
)
distributed_config.master_port = int(
get_value_from_option_and_env(
args.master_port, args.launch_env_name_master_port, -1, 12335
)
)
os.environ["RANK"] = str(distributed_config.rank)
os.environ["WORLD_SIZE"] = str(distributed_config.world_size)
os.environ["MASTER_ADDR"] = distributed_config.master_addr
os.environ["MASTER_PORT"] = str(distributed_config.master_port)
main_func()
def distributed_launch_pytorch(
args,
main_func,
):
global distributed_config
distributed_config.rank = int(
get_value_from_env(args.launch_env_name_world_rank)
)
distributed_config.world_size = int(
get_value_from_env(args.launch_env_name_world_size)
)
distributed_config.local_rank = int(
get_value_from_option_and_env(
args.local_rank, args.launch_env_name_local_rank, -1
)
)
assert distributed_config.local_rank >= 0
distributed_config.local_size = int(
get_value_from_option_and_env(
args.local_size, args.launch_env_name_local_size, -1
)
)
assert distributed_config.local_size > 0
distributed_config.master_addr = get_value_from_env(
args.launch_env_name_master_addr
)
distributed_config.master_port = int(
get_value_from_env(args.launch_env_name_master_port)
)
main_func()
def main_spawn_routine(local_rank, main_func, distributed_config_input):
global distributed_config
distributed_config = distributed_config_input
node_rank = distributed_config.rank
node_size = distributed_config.world_size
distributed_config.rank = (
node_rank * distributed_config.get_local_size() + local_rank
)
distributed_config.world_size = node_size * distributed_config.get_local_size()
distributed_config.local_rank = local_rank
os.environ["RANK"] = str(distributed_config.rank)
os.environ["WORLD_SIZE"] = str(distributed_config.world_size)
os.environ["MASTER_ADDR"] = distributed_config.master_addr
os.environ["MASTER_PORT"] = str(distributed_config.master_port)
main_func()
def distributed_launch_spawn(args, main_func):
global distributed_config
distributed_config.rank = int(
get_value_from_option_and_env(
args.rank, args.launch_env_name_world_rank, -1, 0
)
)
distributed_config.world_size = int(
get_value_from_option_and_env(
args.world_size, args.launch_env_name_world_size, -1, 1
)
)
distributed_config.local_rank = 0
distributed_config.local_size = int(
get_value_from_option_and_env(
args.local_size, args.launch_env_name_local_size, -1, 1
)
)
distributed_config.master_addr = get_value_from_option_and_env(
args.master_addr, args.launch_env_name_master_addr, "", "localhost"
)
distributed_config.master_port = int(
get_value_from_option_and_env(
args.master_port, args.launch_env_name_master_port, -1, 12335
)
)
import torch.multiprocessing as mp
if distributed_config.local_size > 1:
mp.spawn(
main_spawn_routine,
nprocs=distributed_config.local_size,
args=(main_func, distributed_config),
)
else:
main_spawn_routine(0, main_func, distributed_config)
def distributed_launch(args, main_func):
assert (
args.launch_agent == "mpi"
or args.launch_agent == "pytorch"
or args.launch_agent == "spawn"
)
if args.launch_agent == "mpi":
# use MPI to launch multiprocess
# when using MPI, command is like:
# mpirun python [train_script.py]
distributed_launch_mpi(args, main_func)
elif args.launch_agent == "pytorch":
# use pytorch DDP to launch multiprocess
# when using pytorch DDP, assume two nodes with 8 GPU each, command is like:
# on node1: python -m torch.distributed.run --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=node1
# --master_port=12335 [train_script.py] --launch_agent=pytorch
# on node2: python -m torch.distributed.run --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=node1
# --master_port=12335 [train_script.py] --launch_agent=pytorch
distributed_launch_pytorch(args, main_func)
else:
# cluster scheduler
# when using spawn to create multiprocess for each node, assume two nodes with 8 GPU each, command is like:
# on node1: python [train_script.py] --launch_agent=spawn --master_addr=node1 --master_port=12335
# --local_size=8 --rank=0 --world_size=2
# on node2: python [train_script.py] --launch_agent=spawn --master_addr=node1 --master_port=12335
# --local_size=8 --rank=1 --world_size=2
distributed_launch_spawn(args, main_func)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/initialize.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.utils.dlpack
import pylibwholegraph.binding.wholememory_binding as wmb
from .comm import set_world_info, get_global_communicator, get_local_node_communicator
def init(world_rank: int, world_size: int, local_rank: int, local_size: int):
wmb.init(0)
set_world_info(world_rank, world_size, local_rank, local_size)
def init_torch_env(world_rank: int, world_size: int, local_rank: int, local_size: int):
r"""Init WholeGraph environment for PyTorch.
:param world_rank: world rank of current process
:param world_size: world size of all processes
:param local_rank: local rank of current process
:param local_size: local size
:return: None
"""
os.environ["RANK"] = str(world_rank)
os.environ["WORLD_SIZE"] = str(world_size)
if "MASTER_ADDR" not in os.environ:
if world_rank == 0:
print("[WARNING] MASTER_ADDR not set, resetting to localhost")
os.environ["MASTER_ADDR"] = "localhost"
if "MASTER_PORT" not in os.environ:
if world_rank == 0:
print("[WARNING] MASTER_PORT not set, resetting to 12335")
os.environ["MASTER_PORT"] = "12335"
wmb.init(0)
torch.set_num_threads(1)
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
set_world_info(world_rank, world_size, local_rank, local_size)
def init_torch_env_and_create_wm_comm(
world_rank: int, world_size: int, local_rank: int, local_size: int
):
r"""Init WholeGraph environment for PyTorch and create single communicator for all ranks.
:param world_rank: world rank of current process
:param world_size: world size of all processes
:param local_rank: local rank of current process
:param local_size: local size
:return: global and local node Communicator
"""
init_torch_env(world_rank, world_size, local_rank, local_size)
global_comm = get_global_communicator()
local_comm = get_local_node_communicator()
return global_comm, local_comm
def finalize():
r"""Finalize WholeGraph.
:return: None
"""
wmb.finalize()
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/gnn_model.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .graph_structure import GraphStructure
from .embedding import WholeMemoryEmbedding, WholeMemoryEmbeddingModule
from .common_options import parse_max_neighbors
import torch.nn.functional as F
from .graph_ops import add_csr_self_loop
framework_name = None
def set_framework(framework: str):
global framework_name
assert framework_name is None
framework_name = framework
global SAGEConv, GATConv
if framework_name == "dgl":
global dgl
import dgl
from dgl.nn.pytorch.conv import SAGEConv, GATConv
elif framework_name == "pyg":
global SparseTensor
from torch_sparse import SparseTensor
from torch_geometric.nn import SAGEConv, GATConv
elif framework_name == "wg":
from wg_torch.gnn.SAGEConv import SAGEConv
from wg_torch.gnn.GATConv import GATConv
elif framework_name == "cugraph":
from .cugraphops.sage_conv import CuGraphSAGEConv as SAGEConv
from .cugraphops.gat_conv import CuGraphGATConv as GATConv
def create_gnn_layers(
in_feat_dim, hidden_feat_dim, class_count, num_layer, num_head, model_type
):
gnn_layers = torch.nn.ModuleList()
global framework_name
for i in range(num_layer):
layer_output_dim = (
hidden_feat_dim // num_head if i != num_layer - 1 else class_count
)
layer_input_dim = in_feat_dim if i == 0 else hidden_feat_dim
mean_output = True if i == num_layer - 1 else False
if framework_name == "pyg":
if model_type == "sage":
gnn_layers.append(SAGEConv(layer_input_dim, layer_output_dim))
elif model_type == "gat":
concat = not mean_output
gnn_layers.append(
GATConv(
layer_input_dim, layer_output_dim, heads=num_head, concat=concat
)
)
else:
assert model_type == "gcn"
gnn_layers.append(
SAGEConv(layer_input_dim, layer_output_dim, root_weight=False)
)
elif framework_name == "dgl":
if model_type == "sage":
gnn_layers.append(SAGEConv(layer_input_dim, layer_output_dim, "mean"))
elif model_type == "gat":
gnn_layers.append(
GATConv(
layer_input_dim,
layer_output_dim,
num_heads=num_head,
allow_zero_in_degree=True,
)
)
else:
assert model_type == "gcn"
gnn_layers.append(SAGEConv(layer_input_dim, layer_output_dim, "gcn"))
elif framework_name == "wg":
if model_type == "sage":
gnn_layers.append(SAGEConv(layer_input_dim, layer_output_dim))
elif model_type == "gat":
gnn_layers.append(
GATConv(
layer_input_dim,
layer_output_dim,
num_heads=num_head,
mean_output=mean_output,
)
)
else:
assert model_type == "gcn"
gnn_layers.append(
SAGEConv(layer_input_dim, layer_output_dim, aggregator="gcn")
)
elif framework_name == "cugraph":
assert model_type == "sage" or model_type == "gat"
if model_type == "sage":
gnn_layers.append(SAGEConv(layer_input_dim, layer_output_dim))
elif model_type == "gat":
concat = not mean_output
gnn_layers.append(
GATConv(
layer_input_dim, layer_output_dim, heads=num_head, concat=concat
)
)
return gnn_layers
def create_sub_graph(
target_gid,
target_gid_1,
edge_data,
csr_row_ptr,
csr_col_ind,
max_num_neighbors: int,
add_self_loop: bool,
):
global framework_name
if framework_name == "pyg":
neighboor_dst_unique_ids = csr_col_ind
neighboor_src_unique_ids = edge_data[1]
target_neighbor_count = target_gid.size()[0]
if add_self_loop:
self_loop_ids = torch.arange(
0,
target_gid_1.size()[0],
dtype=neighboor_dst_unique_ids.dtype,
device=target_gid.device,
)
edge_index = SparseTensor(
row=torch.cat([neighboor_src_unique_ids, self_loop_ids]).long(),
col=torch.cat([neighboor_dst_unique_ids, self_loop_ids]).long(),
sparse_sizes=(target_gid_1.size()[0], target_neighbor_count),
)
else:
edge_index = SparseTensor(
row=neighboor_src_unique_ids.long(),
col=neighboor_dst_unique_ids.long(),
sparse_sizes=(target_gid_1.size()[0], target_neighbor_count),
)
return edge_index
elif framework_name == "dgl":
if add_self_loop:
csr_row_ptr, csr_col_ind = add_csr_self_loop(csr_row_ptr, csr_col_ind)
block = dgl.create_block(
(
'csc',
(
csr_row_ptr,
csr_col_ind,
torch.empty(0, dtype=torch.int),
),
),
num_src_nodes=target_gid.size(0),
num_dst_nodes=target_gid_1.size(0),
)
return block
elif framework_name == "cugraph":
if add_self_loop:
csr_row_ptr, csr_col_ind = add_csr_self_loop(csr_row_ptr, csr_col_ind)
max_num_neighbors = max_num_neighbors + 1
return [csr_row_ptr, csr_col_ind, max_num_neighbors]
else:
assert framework_name == "wg"
return [csr_row_ptr, csr_col_ind]
return None
def layer_forward(layer, x_feat, x_target_feat, sub_graph):
global framework_name
if framework_name == "pyg":
x_feat = layer((x_feat, x_target_feat), sub_graph)
elif framework_name == "dgl":
x_feat = layer(sub_graph, (x_feat, x_target_feat))
elif framework_name == "cugraph":
x_feat = layer(x_feat, sub_graph[0], sub_graph[1], sub_graph[2])
elif framework_name == "wg":
x_feat = layer(sub_graph[0], sub_graph[1], x_feat, x_target_feat)
return x_feat
class HomoGNNModel(torch.nn.Module):
def __init__(
self,
graph_structure: GraphStructure,
node_embedding: WholeMemoryEmbedding,
args,
):
super().__init__()
hidden_feat_dim = args.hiddensize
self.graph_structure = graph_structure
self.node_embedding = node_embedding
self.num_layer = args.layernum
self.hidden_feat_dim = args.hiddensize
num_head = args.heads if (args.model == "gat") else 1
assert hidden_feat_dim % num_head == 0
in_feat_dim = self.node_embedding.shape[1]
self.gnn_layers = create_gnn_layers(
in_feat_dim,
hidden_feat_dim,
args.classnum,
args.layernum,
num_head,
args.model,
)
self.mean_output = True if args.model == "gat" else False
self.add_self_loop = True if args.model == "gat" else False
self.gather_fn = WholeMemoryEmbeddingModule(self.node_embedding)
self.dropout = args.dropout
self.max_neighbors = parse_max_neighbors(args.layernum, args.neighbors)
self.max_inference_neighbors = parse_max_neighbors(args.layernum, args.inferencesample)
def forward(self, ids):
global framework_name
max_neighbors = self.max_neighbors if self.training else self.max_inference_neighbors
ids = ids.to(self.graph_structure.csr_col_ind.dtype).cuda()
(
target_gids,
edge_indice,
csr_row_ptrs,
csr_col_inds,
) = self.graph_structure.multilayer_sample_without_replacement(
ids, max_neighbors
)
x_feat = self.gather_fn(target_gids[0], force_dtype=torch.float32)
for i in range(self.num_layer):
x_target_feat = x_feat[: target_gids[i + 1].numel()]
sub_graph = create_sub_graph(
target_gids[i],
target_gids[i + 1],
edge_indice[i],
csr_row_ptrs[i],
csr_col_inds[i],
max_neighbors[self.num_layer - 1 - i],
self.add_self_loop,
)
x_feat = layer_forward(
self.gnn_layers[i],
x_feat,
x_target_feat,
sub_graph,
)
if i != self.num_layer - 1:
if framework_name == "dgl":
x_feat = x_feat.flatten(1)
x_feat = F.relu(x_feat)
x_feat = F.dropout(x_feat, self.dropout, training=self.training)
if framework_name == "dgl" and self.mean_output:
out_feat = x_feat.mean(1)
else:
out_feat = x_feat
return out_feat
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/tensor.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibwholegraph.binding.wholememory_binding as wmb
import torch
from .utils import (
torch_dtype_to_wholememory_dtype,
wholememory_dtype_to_torch_dtype,
get_file_size,
)
from .utils import str_to_wmb_wholememory_memory_type, str_to_wmb_wholememory_location
from .utils import get_part_file_name, get_part_file_list
from .comm import WholeMemoryCommunicator
from typing import Union, List
from .dlpack_utils import torch_import_from_dlpack
from .wholegraph_env import wrap_torch_tensor, get_wholegraph_env_fns, get_stream
WholeMemoryMemoryType = wmb.WholeMemoryMemoryType
WholeMemoryMemoryLocation = wmb.WholeMemoryMemoryLocation
class WholeMemoryTensor(object):
r"""WholeMemory Tensor"""
def __init__(self, wmb_tensor: wmb.PyWholeMemoryTensor):
self.wmb_tensor = wmb_tensor
@property
def dtype(self):
return wholememory_dtype_to_torch_dtype(self.wmb_tensor.dtype)
def dim(self):
return self.wmb_tensor.dim()
@property
def shape(self):
return self.wmb_tensor.shape
def stride(self):
return self.wmb_tensor.stride()
def storage_offset(self):
return self.wmb_tensor.storage_offset()
def get_comm(self):
return WholeMemoryCommunicator(
self.wmb_tensor.get_wholememory_handle().get_communicator()
)
def gather(self,
indice: torch.Tensor,
*,
force_dtype: Union[torch.dtype, None] = None):
assert indice.dim() == 1
embedding_dim = self.shape[1]
embedding_count = indice.shape[0]
current_cuda_device = "cuda:%d" % (torch.cuda.current_device(),)
output_dtype = (
force_dtype if force_dtype is not None else self.embedding_tensor.dtype
)
output_tensor = torch.empty(
[embedding_count, embedding_dim],
device=current_cuda_device,
dtype=output_dtype,
requires_grad=False,
)
wmb.wholememory_gather_op(self.wmb_tensor,
wrap_torch_tensor(indice),
wrap_torch_tensor(output_tensor),
get_wholegraph_env_fns(),
get_stream())
return output_tensor
def scatter(self,
input_tensor: torch.Tensor,
indice: torch.Tensor):
assert indice.dim() == 1
assert input_tensor.dim() == 2
assert indice.shape[0] == input_tensor.shape[0]
assert input_tensor.shape[1] == self.shape[1]
wmb.wholememory_scatter_op(wrap_torch_tensor(input_tensor),
wrap_torch_tensor(indice),
self.wmb_tensor,
get_wholegraph_env_fns(),
get_stream())
def get_sub_tensor(self, starts, ends):
"""
Get sub tensor of WholeMemory Tensor
:param starts: An array of the start indices of each dim
:param ends: An array of the end indices of each dim, -1 means to the last element
:return: WholeMemory Tensor
"""
return WholeMemoryTensor(self.wmb_tensor.get_sub_tensor(starts, ends))
def get_local_tensor(self, host_view: bool = False):
"""Get local tensor of WholeMemory Tensor
:param host_view: Get host view or not, if True, return host tensor, else return device tensor
:return: Tuple of DLPack Tensor and element offset.
"""
if host_view:
return self.wmb_tensor.get_local_tensor(
torch_import_from_dlpack, WholeMemoryMemoryLocation.MlHost, -1
)
else:
return self.wmb_tensor.get_local_tensor(
torch_import_from_dlpack,
WholeMemoryMemoryLocation.MlDevice,
torch.cuda.current_device(),
)
def get_global_tensor(self, host_view: bool = False):
"""Get global tensor of WholeMemory Tensor
:param host_view: Get host view or not, if True, return host tensor, else return device tensor
:return: Tuple of DLPack Tensor and element offset (0 for global tensor).
"""
if host_view:
return self.wmb_tensor.get_global_tensor(
torch_import_from_dlpack, WholeMemoryMemoryLocation.MlHost, -1
)
else:
return self.wmb_tensor.get_global_tensor(
torch_import_from_dlpack,
WholeMemoryMemoryLocation.MlDevice,
torch.cuda.current_device(),
)
def get_all_chunked_tensor(self, host_view: bool = False):
"""Get all chunked tensor of WholeMemory Tensor
:param host_view: Get host view or not, if True, return host tensor, else return device tensor
:return: Tuple of DLPack Tensors and element offsets.
"""
if host_view:
return self.wmb_tensor.get_global_tensorget_all_chunked_tensor(
torch_import_from_dlpack, WholeMemoryMemoryLocation.MlHost, -1
)
else:
return self.wmb_tensor.get_global_tensorget_all_chunked_tensor(
torch_import_from_dlpack,
WholeMemoryMemoryLocation.MlDevice,
torch.cuda.current_device(),
)
def from_filelist(self, filelist: Union[List[str], str]):
"""
Load WholeMemory Tensor from file lists
:param filelist: file list to load from
:return: None
"""
if isinstance(filelist, str):
filelist = [filelist]
self.wmb_tensor.from_filelist(filelist)
def from_file_prefix(self, file_prefix: str, part_count: Union[int, None] = None):
"""
Load WholeMemory tensor from files with same prefix, files has format
"%s_part_%d_of_%d" % (prefix, part_id, part_count)
:param file_prefix: file name prefix
:param part_count: part count of file
:return: None
"""
if part_count is None:
part_count = self.get_comm().get_size()
file_list = get_part_file_list(file_prefix, part_count)
self.from_filelist(file_list)
def local_to_file(self, filename: str):
"""
Store local tensor of WholeMemory Tensor to file, all ranks should call this together with different filename
:param filename: file name of local tensor file.
:return: None
"""
self.wmb_tensor.to_file(filename)
def to_file_prefix(self, file_prefix: str):
"""
Store WholeMemory Tensor to files with same prefix.
:param file_prefix: file name prefix
:return: None
"""
wm_comm = self.get_comm()
filename = get_part_file_name(
file_prefix, wm_comm.get_rank(), wm_comm.get_size()
)
self.local_to_file(filename)
def create_wholememory_tensor(
comm: WholeMemoryCommunicator,
memory_type: str,
memory_location: str,
sizes: List[int],
dtype: torch.dtype,
strides: List[int],
):
"""
Create empty WholeMemory Tensor. Now only support dim = 1 or 2
:param comm: WholeMemoryCommunicator
:param memory_type: WholeMemory type, should be continuous, chunked or distributed
:param memory_location: WholeMemory location, should be cpu or cuda
:param sizes: size of the tensor
:param dtype: data type of the tensor
:param strides: strides of the tensor
:return: Allocated WholeMemoryTensor
"""
dim = len(sizes)
if dim < 1 or dim > 2:
raise ValueError("Only dim 1 or 2 is supported now.")
if strides is None:
strides = [1] * dim
strides[0] = sizes[1] if dim == 2 else 1
else:
assert len(strides) == dim
assert strides[-1] == 1
if dim == 2:
assert strides[0] >= sizes[1]
td = wmb.PyWholeMemoryTensorDescription()
td.set_shape(sizes)
td.set_stride(strides)
td.set_dtype(torch_dtype_to_wholememory_dtype(dtype))
wm_memory_type = str_to_wmb_wholememory_memory_type(memory_type)
wm_location = str_to_wmb_wholememory_location(memory_location)
return WholeMemoryTensor(
wmb.create_wholememory_tensor(td, comm.wmb_comm, wm_memory_type, wm_location)
)
def create_wholememory_tensor_from_filelist(
comm: WholeMemoryCommunicator,
memory_type: str,
memory_location: str,
filelist: Union[List[str], str],
dtype: torch.dtype,
last_dim_size: int = 0,
last_dim_strides: int = -1,
):
"""
Create WholeMemory Tensor from list of binary files.
:param comm: WholeMemoryCommunicator
:param memory_type: WholeMemory type, should be continuous, chunked or distributed
:param memory_location: WholeMemory location, should be cpu or cuda
:param filelist: list of binary files
:param dtype: data type of the tensor
:param last_dim_size: 0 for create 1-D array, positive value for create matrix column size
:param last_dim_strides: stride of last_dim, -1 for same as size of last dim.
:return: WholeMemoryTensor
"""
if isinstance(filelist, str):
filelist = [filelist]
element_size = torch.tensor([], dtype=dtype).element_size()
if last_dim_strides == -1:
last_dim_strides = last_dim_size if last_dim_size > 0 else 1
file_entry_size = (
element_size * last_dim_size if last_dim_size > 0 else element_size
)
total_file_size = 0
for filename in filelist:
file_size = get_file_size(filename)
if file_size % file_entry_size != 0:
raise ValueError(
"File %s size is %d not mutlple of %d"
% (filename, file_size, file_entry_size)
)
total_file_size += file_size
total_entry_count = total_file_size // file_entry_size
if last_dim_size == 0:
sizes = [total_entry_count]
strides = [1]
else:
sizes = [total_entry_count, last_dim_size]
strides = [last_dim_strides, 1]
wm_tensor = create_wholememory_tensor(
comm, memory_type, memory_location, sizes, dtype, strides
)
wm_tensor.from_filelist(filelist)
return wm_tensor
def destroy_wholememory_tensor(wm_tensor: WholeMemoryTensor):
"""
Destroy allocated WholeMemory Tensor
:param wm_tensor: WholeMemory Tensor
:return: None
"""
wmb.destroy_wholememory_tensor(wm_tensor.wmb_tensor)
wm_tensor.wmb_tensor = None
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/comm.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import torch.utils.dlpack
import pylibwholegraph.binding.wholememory_binding as wmb
from .utils import str_to_wmb_wholememory_memory_type, str_to_wmb_wholememory_location
global_communicator = None
local_node_communicator = None
local_device_communicator = None
all_comm_world_rank = 0
all_comm_world_size = 1
all_comm_local_rank = 0
all_comm_local_size = 1
def set_world_info(world_rank: int, world_size: int, local_rank: int, local_size: int):
"""
Set the global world's information. This is used for create common used communicators, like local node communicator,
global communicator, or local device communicator.
:param world_rank: world rank of current process.
:param world_size: world size
:param local_rank: local rank of current process in current machine node.
:param local_size: local size of each machine node
:return: None
"""
global all_comm_world_rank, all_comm_world_size, all_comm_local_rank, all_comm_local_size
all_comm_world_rank = world_rank
all_comm_world_size = world_size
all_comm_local_rank = local_rank
all_comm_local_size = local_size
class WholeMemoryCommunicator(object):
"""
WholeMemory Communicator.
You should not create object of this class directly, use create_group_communicator, get_global_communicator,
get_local_node_communicator or get_local_device_communicator instead.
"""
def __init__(self, wmb_comm: wmb.PyWholeMemoryComm):
super().__init__()
self.wmb_comm = wmb_comm
def get_rank(self):
"""Get rank of current process in this communicator"""
return self.wmb_comm.get_rank()
def get_size(self):
"""Get world size of this communicator"""
return self.wmb_comm.get_size()
def barrier(self):
"""
Barrier on WholeMemory Communicator.
This function will use internal communicator associated CUDA stream. And synchronized with host.
So if you have work in other CUDA stream, and expect that to be done before barrier, you may need to
synchrionze that stream before calling this function.
"""
return self.wmb_comm.barrier()
def support_type_location(self,
memory_type: str,
memory_location: str):
"""
Return True if Communicator supports combination of memory_type and memory_location.
"""
wm_memory_type = str_to_wmb_wholememory_memory_type(memory_type)
wm_location = str_to_wmb_wholememory_location(memory_location)
return self.wmb_comm.support_type_location(wm_memory_type, wm_location)
def destroy(self):
wmb.destroy_communicator(self.wmb_comm)
self.wmb_comm = None
def create_group_communicator(group_size: int = -1, comm_stride: int = 1):
"""Create WholeMemory Communicator.
For example: 24 ranks with group_size = 4 and comm_stride = 2 will create following groups:
[0, 2, 4, 6], [1, 3, 5, 7], [8, 10, 12, 14], [9, 11, 13, 15], [16, 18, 20, 22], [17, 19, 21, 23]
:param group_size: Size of each group, -1 means to use all ranks in just one single group.
:param comm_stride: Stride of each rank in each group
:return: WholeMemoryCommunicator
"""
world_size = dist.get_world_size()
if group_size == -1:
group_size = world_size
strided_group_size = group_size * comm_stride
assert world_size % strided_group_size == 0
strided_group_count = world_size // strided_group_size
world_rank = dist.get_rank()
strided_group_idx = world_rank // strided_group_size
idx_in_strided_group = world_rank % strided_group_size
inner_group_idx = idx_in_strided_group % comm_stride
idx_in_group = idx_in_strided_group // comm_stride
wm_uid = wmb.PyWholeMemoryUniqueID()
for strided_group in range(strided_group_count):
for inner_group in range(comm_stride):
group_root_rank = strided_group * strided_group_size + inner_group
if world_rank == group_root_rank:
tmp_wm_uid = wmb.create_unique_id()
else:
tmp_wm_uid = wmb.PyWholeMemoryUniqueID()
uid_th = torch.utils.dlpack.from_dlpack(tmp_wm_uid.__dlpack__())
uid_th_cuda = uid_th.cuda()
dist.broadcast(uid_th_cuda, group_root_rank)
uid_th.copy_(uid_th_cuda.cpu())
if strided_group_idx == strided_group and inner_group_idx == inner_group:
wm_uid_th = torch.utils.dlpack.from_dlpack(wm_uid.__dlpack__())
wm_uid_th.copy_(uid_th)
wm_comm = wmb.create_communicator(wm_uid, idx_in_group, group_size)
return WholeMemoryCommunicator(wm_comm)
def destroy_communicator(wm_comm: WholeMemoryCommunicator):
"""
Destroy WholeMemoryCommunicator
:param wm_comm: WholeMemoryCommunicator to destroy
:return: None
"""
if wm_comm is not None and wm_comm.wmb_comm is not None:
wmb.destroy_communicator(wm_comm.wmb_comm)
wm_comm.wmb_comm = None
def get_global_communicator():
"""
Get the global communicator of this job
:return: WholeMemoryCommunicator that has all GPUs in it.
"""
global global_communicator, local_node_communicator, local_device_communicator
global all_comm_local_size, all_comm_world_size
if global_communicator is None:
global_communicator = create_group_communicator()
if all_comm_local_size == all_comm_world_size:
assert local_node_communicator is None
local_node_communicator = global_communicator
if all_comm_world_size == 1:
assert local_device_communicator is None
local_device_communicator = global_communicator
return global_communicator
def get_local_node_communicator():
"""
Get the local node communicator of this job
:return: WholeMemoryCommunicator that has GPUs in the same node.
"""
global global_communicator, local_node_communicator, local_device_communicator
global all_comm_local_size, all_comm_world_size
if local_node_communicator is None:
local_node_communicator = create_group_communicator(all_comm_local_size)
if all_comm_local_size == all_comm_world_size:
assert global_communicator is None
global_communicator = local_node_communicator
if all_comm_local_size == 1:
assert local_device_communicator is None
local_device_communicator = local_node_communicator
return local_node_communicator
def get_local_device_communicator():
"""
Get the local device communicator of this job
:return: WholeMemoryCommunicator that has only the GPU belonging to current process.
"""
global global_communicator, local_node_communicator, local_device_communicator
global all_comm_local_size, all_comm_world_size
if local_device_communicator is None:
local_device_communicator = create_group_communicator(1)
if all_comm_local_size == 1:
assert local_node_communicator is None
local_node_communicator = local_device_communicator
if all_comm_world_size == 1:
assert global_communicator is None
global_communicator = local_device_communicator
return local_device_communicator
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/wholegraph_ops.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pylibwholegraph.binding.wholememory_binding as wmb
from .wholegraph_env import (
get_stream,
TorchMemoryContext,
get_wholegraph_env_fns,
wrap_torch_tensor,
)
from typing import Union
import random
def unweighted_sample_without_replacement(
wm_csr_row_ptr_tensor: wmb.PyWholeMemoryTensor,
wm_csr_col_ptr_tensor: wmb.PyWholeMemoryTensor,
center_nodes_tensor: torch.Tensor,
max_sample_count: int,
random_seed: Union[int, None] = None,
need_center_local_output: bool = False,
need_edge_output: bool = False,
):
"""
Unweighted neighborhood sample in CSR WholeGraph
"""
assert wm_csr_row_ptr_tensor.dim() == 1
assert wm_csr_col_ptr_tensor.dim() == 1
assert center_nodes_tensor.dim() == 1
if random_seed is None:
random_seed = random.getrandbits(64)
output_sample_offset_tensor = torch.empty(
center_nodes_tensor.shape[0] + 1, device="cuda", dtype=torch.int
)
output_dest_context = TorchMemoryContext()
output_dest_c_context = output_dest_context.get_c_context()
output_center_localid_context = None
output_center_localid_c_context = 0
output_edge_gid_context = None
output_edge_gid_c_context = 0
if need_center_local_output:
output_center_localid_context = TorchMemoryContext()
output_center_localid_c_context = output_center_localid_context.get_c_context()
if need_edge_output:
output_edge_gid_context = TorchMemoryContext()
output_edge_gid_c_context = output_edge_gid_context.get_c_context()
wmb.csr_unweighted_sample_without_replacement(
wm_csr_row_ptr_tensor,
wm_csr_col_ptr_tensor,
wrap_torch_tensor(center_nodes_tensor),
max_sample_count,
wrap_torch_tensor(output_sample_offset_tensor),
output_dest_c_context,
output_center_localid_c_context,
output_edge_gid_c_context,
random_seed,
get_wholegraph_env_fns(),
get_stream(),
)
if need_edge_output and need_center_local_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_center_localid_context.get_tensor(),
output_edge_gid_context.get_tensor(),
)
elif need_center_local_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_center_localid_context.get_tensor(),
)
elif need_edge_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_edge_gid_context.get_tensor(),
)
else:
return output_sample_offset_tensor, output_dest_context.get_tensor()
def weighted_sample_without_replacement(
wm_csr_row_ptr_tensor: wmb.PyWholeMemoryTensor,
wm_csr_col_ptr_tensor: wmb.PyWholeMemoryTensor,
wm_csr_weight_ptr_tensor: wmb.PyWholeMemoryTensor,
center_nodes_tensor: torch.Tensor,
max_sample_count: int,
random_seed: Union[int, None] = None,
need_center_local_output: bool = False,
need_edge_output: bool = False,
):
"""
Weighted neighborhood sample in CSR WholeGraph
"""
assert wm_csr_row_ptr_tensor.dim() == 1
assert wm_csr_col_ptr_tensor.dim() == 1
assert wm_csr_weight_ptr_tensor.dim() == 1
assert wm_csr_weight_ptr_tensor.shape[0] == wm_csr_col_ptr_tensor.shape[0]
assert center_nodes_tensor.dim() == 1
if random_seed is None:
random_seed = random.getrandbits(64)
output_sample_offset_tensor = torch.empty(
center_nodes_tensor.shape[0] + 1, device="cuda", dtype=torch.int
)
output_dest_context = TorchMemoryContext()
output_dest_c_context = output_dest_context.get_c_context()
output_center_localid_context = None
output_center_localid_c_context = 0
output_edge_gid_context = None
output_edge_gid_c_context = 0
if need_center_local_output:
output_center_localid_context = TorchMemoryContext()
output_center_localid_c_context = output_center_localid_context.get_c_context()
if need_edge_output:
output_edge_gid_context = TorchMemoryContext()
output_edge_gid_c_context = output_edge_gid_context.get_c_context()
wmb.csr_weighted_sample_without_replacement(
wm_csr_row_ptr_tensor,
wm_csr_col_ptr_tensor,
wm_csr_weight_ptr_tensor,
wrap_torch_tensor(center_nodes_tensor),
max_sample_count,
wrap_torch_tensor(output_sample_offset_tensor),
output_dest_c_context,
output_center_localid_c_context,
output_edge_gid_c_context,
random_seed,
get_wholegraph_env_fns(),
get_stream(),
)
if need_edge_output and need_center_local_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_center_localid_context.get_tensor(),
output_edge_gid_context.get_tensor(),
)
elif need_center_local_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_center_localid_context.get_tensor(),
)
elif need_edge_output:
return (
output_sample_offset_tensor,
output_dest_context.get_tensor(),
output_edge_gid_context.get_tensor(),
)
else:
return output_sample_offset_tensor, output_dest_context.get_tensor()
def generate_random_positive_int_cpu(
random_seed, sub_sequence, output_random_value_count
):
output = torch.empty((output_random_value_count,), dtype=torch.int)
wmb.host_generate_random_positive_int(
random_seed, sub_sequence, wrap_torch_tensor(output)
)
return output
def generate_exponential_distribution_negative_float_cpu(
random_seed: int, sub_sequence: int, output_random_value_count: int
):
output = torch.empty((output_random_value_count,), dtype=torch.float)
wmb.host_generate_exponential_distribution_negative_float(
random_seed, sub_sequence, wrap_torch_tensor(output)
)
return output
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/common_options.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
def add_training_options(argparser: ArgumentParser):
argparser.add_argument(
"-e", "--epochs", type=int, dest="epochs", default=24, help="number of epochs"
)
argparser.add_argument(
"-b",
"--batchsize",
type=int,
dest="batchsize",
default=1024,
help="batch size",
)
argparser.add_argument(
"--lr", type=float, dest="lr", default=0.003, help="learning rate"
)
argparser.add_argument(
"--embedding-memory-type",
dest="embedding_memory_type",
default="chunked",
help="Embedding memory type, should be: continuous, chunked or distributed",
)
argparser.add_argument(
"--cache-type",
dest="cache_type",
default="none",
help="Embedding cache type, should be: none, local_device, local_node or all_devices",
)
argparser.add_argument(
"--cache-ratio",
type=float,
dest="cache_ratio",
default=0.5,
help="cache ratio",
)
argparser.add_argument(
"--use-cpp-ext",
action="store_true",
dest="use_cpp_ext",
default=False,
help="Whether to use cpp extension for pytorch"
)
argparser.add_argument(
"--train-embedding",
action="store_true",
dest="train_embedding",
default=False,
help="Whether to train embedding",
)
def add_common_graph_options(argparser: ArgumentParser):
argparser.add_argument(
"-r",
"--root-dir",
dest="root_dir",
default="dataset",
help="graph dataset root directory.",
)
argparser.add_argument(
"--use-global-embedding",
action="store_true",
dest="use_global_embedding",
default=False,
help="Store embedding across all ranks or only in local node.",
)
argparser.add_argument(
"--feat-dim",
type=int,
dest="feat_dim",
default=100,
help="default feature dim",
)
def add_common_model_options(argparser: ArgumentParser):
argparser.add_argument(
"--hiddensize", type=int, dest="hiddensize", default=256, help="hidden size"
)
argparser.add_argument(
"-l", "--layernum", type=int, dest="layernum", default=3, help="layer number"
)
argparser.add_argument(
"-m",
"--model",
dest="model",
default="sage",
help="model type, valid values are: sage, gcn, gat",
)
argparser.add_argument(
"-f",
"--framework",
dest="framework",
default="cugraph",
help="framework type, valid values are: dgl, pyg, wg, cugraph",
)
argparser.add_argument("--heads", type=int, dest="heads", default=4, help="num heads")
argparser.add_argument(
"-d", "--dropout", type=float, dest="dropout", default=0.5, help="dropout"
)
def add_common_sampler_options(argparser: ArgumentParser):
argparser.add_argument(
"-n",
"--neighbors",
dest="neighbors",
default="30,30,30",
help="train neighboor sample count",
)
argparser.add_argument(
"-s",
"--inferencesample",
type=int,
dest="inferencesample",
default="30",
help="inference sample count, -1 is all",
)
def add_node_classfication_options(argparser: ArgumentParser):
argparser.add_argument(
"-c",
"--classnum",
type=int,
dest="classnum",
default=172,
help="class number",
)
def add_dataloader_options(argparser: ArgumentParser):
argparser.add_argument(
"--pickle-data-path",
dest="pickle_data_path",
default="",
help="training data file path, should be pickled dict",
)
argparser.add_argument(
"-w",
"--dataloaderworkers",
type=int,
dest="dataloaderworkers",
default=0,
help="number of workers for dataloader",
)
def parse_max_neighbors(num_layer, neighbor_str):
neighbor_str_vec = neighbor_str.split(",")
max_neighbors = []
for ns in neighbor_str_vec:
max_neighbors.append(int(ns))
assert len(max_neighbors) == 1 or len(max_neighbors) == num_layer
if len(max_neighbors) != num_layer:
for i in range(1, num_layer):
max_neighbors.append(max_neighbors[0])
# max_neighbors.reverse()
return max_neighbors
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/__init__.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .comm import (
WholeMemoryCommunicator,
create_group_communicator,
destroy_communicator,
)
from .comm import (
get_global_communicator,
get_local_node_communicator,
get_local_device_communicator,
)
from .embedding import (
WholeMemoryOptimizer,
create_wholememory_optimizer,
destroy_wholememory_optimizer,
)
from .embedding import (
WholeMemoryCachePolicy,
create_builtin_cache_policy,
create_wholememory_cache_policy,
destroy_wholememory_cache_policy,
)
from .embedding import (
WholeMemoryEmbedding,
create_embedding,
create_embedding_from_filelist,
destroy_embedding,
)
from .embedding import WholeMemoryEmbeddingModule
from .initialize import init, init_torch_env, init_torch_env_and_create_wm_comm, finalize
from .tensor import (
WholeMemoryTensor,
create_wholememory_tensor,
create_wholememory_tensor_from_filelist,
destroy_wholememory_tensor,
)
from .graph_structure import GraphStructure
from .utils import get_part_file_name, get_part_file_list
from .distributed_launch import add_distributed_launch_options, distributed_launch
from .distributed_launch import get_rank, get_world_size, get_local_rank, get_local_size
from .common_options import (
add_common_graph_options,
add_common_model_options,
add_common_sampler_options,
)
from .common_options import (
add_training_options,
add_dataloader_options,
add_node_classfication_options,
)
from .gnn_model import set_framework, create_gnn_layers, create_sub_graph, HomoGNNModel
from .data_loader import (
create_node_claffication_datasets,
get_train_dataloader,
get_valid_test_dataloader,
)
from .wholegraph_env import compile_cpp_extension
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/dlpack_utils.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.utils.dlpack
def torch_import_from_dlpack(dp):
return torch.utils.dlpack.from_dlpack(dp.__dlpack__())
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/utils.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibwholegraph.binding.wholememory_binding as wmb
import torch
import os
WholeMemoryDataType = wmb.WholeMemoryDataType
def torch_dtype_to_wholememory_dtype(torch_dtype: torch.dtype):
"""
Convert torch.dtype to WholeMemoryDataType
:param torch_dtype: torch.dtype
:return: WholeMemoryDataType
"""
if torch_dtype == torch.float:
return WholeMemoryDataType.DtFloat
elif torch_dtype == torch.half:
return WholeMemoryDataType.DtHalf
elif torch_dtype == torch.double:
return WholeMemoryDataType.DtDouble
elif torch_dtype == torch.bfloat16:
return WholeMemoryDataType.DtBF16
elif torch_dtype == torch.int:
return WholeMemoryDataType.DtInt
elif torch_dtype == torch.int64:
return WholeMemoryDataType.DtInt64
elif torch_dtype == torch.int16:
return WholeMemoryDataType.DtInt16
elif torch_dtype == torch.int8:
return WholeMemoryDataType.DtInt8
else:
raise ValueError("torch_dtype: %s not supported" % (torch_dtype,))
def wholememory_dtype_to_torch_dtype(wm_dtype: WholeMemoryDataType):
"""
Convert WholeMemoryDataType to torch.dtype
:param wm_dtype: WholeMemoryDataType
:return: torch.dtype
"""
if wm_dtype == WholeMemoryDataType.DtFloat:
return torch.float
elif wm_dtype == WholeMemoryDataType.DtHalf:
return torch.half
elif wm_dtype == WholeMemoryDataType.DtDouble:
return torch.double
elif wm_dtype == WholeMemoryDataType.DtBF16:
return torch.bfloat16
elif wm_dtype == WholeMemoryDataType.DtInt:
return torch.int
elif wm_dtype == WholeMemoryDataType.DtInt64:
return torch.int64
elif wm_dtype == WholeMemoryDataType.DtInt16:
return torch.int16
elif wm_dtype == WholeMemoryDataType.DtInt8:
return torch.int8
else:
raise ValueError("WholeMemoryMemory: %s not supported" % (int(wm_dtype),))
def get_file_size(filename: str):
"""
Get file size.
:param filename: file name
:return: size of file
"""
if not os.path.isfile(filename):
raise ValueError("File %s not found or not file" % (filename,))
if not os.access(filename, os.R_OK):
raise ValueError("File %s not readable" % (filename,))
file_size = os.path.getsize(filename)
return file_size
def str_to_wmb_wholememory_memory_type(str_wmb_type: str):
if str_wmb_type == "continuous":
return wmb.WholeMemoryMemoryType.MtContinuous
elif str_wmb_type == "chunked":
return wmb.WholeMemoryMemoryType.MtChunked
elif str_wmb_type == "distributed":
return wmb.WholeMemoryMemoryType.MtDistributed
else:
raise ValueError(
"WholeMemory type %s not supported, should be (continuous, chunked, distributed)"
% (str_wmb_type,)
)
def str_to_wmb_wholememory_location(str_wmb_location: str):
if str_wmb_location == "cuda":
return wmb.WholeMemoryMemoryLocation.MlDevice
elif str_wmb_location == "cpu":
return wmb.WholeMemoryMemoryLocation.MlHost
else:
raise ValueError(
"WholeMemory location %s not supported, should be (cuda, cpu)"
% (str_wmb_location,)
)
def str_to_wmb_wholememory_access_type(str_wmb_access: str):
if str_wmb_access == "readonly" or str_wmb_access == "ro":
return wmb.WholeMemoryAccessType.AtReadOnly
elif str_wmb_access == "readwrite" or str_wmb_access == "rw":
return wmb.WholeMemoryAccessType.AtReadWrite
else:
raise ValueError(
"WholeMemory access %s not supported, should be (readonly, ro, readwrite, rw)"
% (str_wmb_access,)
)
def str_to_wmb_wholememory_optimizer_type(str_wmb_optimizer: str):
if str_wmb_optimizer == "sgd":
return wmb.WholeMemoryOptimizerType.OptSgd
elif str_wmb_optimizer == "adam":
return wmb.WholeMemoryOptimizerType.OptLazyAdam
elif str_wmb_optimizer == "adagrad":
return wmb.WholeMemoryOptimizerType.OptAdaGrad
elif str_wmb_optimizer == "rmsprop":
return wmb.WholeMemoryOptimizerType.OptRmsProp
else:
raise ValueError(
"WholeMemory optimizer %s not supported, should be (sgd, adam, adagrad, rmsprop)"
% (str_wmb_optimizer,)
)
def get_part_file_name(prefix: str, part_id: int, part_count: int):
return "%s_part_%d_of_%d" % (prefix, part_id, part_count)
def get_part_file_list(prefix: str, part_count: int):
filelist = []
for part_id in range(part_count):
filelist.append("%s_part_%d_of_%d" % (prefix, part_id, part_count))
return filelist
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/wholememory_ops.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pylibwholegraph.binding.wholememory_binding as wmb
from .wholegraph_env import (
get_stream,
get_wholegraph_env_fns,
wrap_torch_tensor,
)
from .utils import wholememory_dtype_to_torch_dtype
def wholememory_gather_forward_functor(
wholememory_tensor: wmb.PyWholeMemoryTensor,
indices_tensor: torch.Tensor,
requires_grad=False,
torch_output_dtype=None,
):
"""
Wrapper functor for gather op of WholeMemory Tensor
:param wholememory_tensor: PyWholeMemoryTensor
:param indices_tensor: Indices to gather from
:param requires_grad: if requires gradients
:param torch_output_dtype: output dtype, None for same as wholememory_tensor
:return: Gathered tensor
"""
assert indices_tensor.dim() == 1
assert indices_tensor.dtype == torch.int32 or indices_tensor.dtype == torch.int64
if torch_output_dtype is None:
torch_output_dtype = wholememory_dtype_to_torch_dtype(wholememory_tensor.dtype)
output_tensor = torch.empty(
[indices_tensor.shape[0], wholememory_tensor.shape[1]],
device="cuda",
dtype=torch_output_dtype,
requires_grad=requires_grad,
)
wmb.wholememory_gather_op(
wholememory_tensor,
wrap_torch_tensor(indices_tensor),
wrap_torch_tensor(output_tensor),
get_wholegraph_env_fns(),
get_stream(),
)
return output_tensor
def wholememory_scatter_functor(
input_tensor: torch.Tensor,
indices_tensor: torch.Tensor,
wholememory_tensor: wmb.PyWholeMemoryTensor,
):
"""
Wrapper functor for scatter op of WholeMemory Tensor
:param input_tensor: Input tensor to scater to WholeMemory Tensor
:param indices_tensor: Indices to scatter to
:param wholememory_tensor: WholeMemory Tensor
:return: None
"""
assert indices_tensor.dim() == 1
assert indices_tensor.dtype == torch.int32 or indices_tensor.dtype == torch.int64
wmb.wholememory_scatter_op(
wrap_torch_tensor(input_tensor),
wrap_torch_tensor(indices_tensor),
wholememory_tensor,
get_wholegraph_env_fns(),
get_stream(),
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/data_loader.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import pickle
from torch.utils.data import Dataset
class NodeClassificationDataset(Dataset):
def __init__(self, raw_dataset):
self.dataset = raw_dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def create_node_claffication_datasets(pickle_data_filename: str):
with open(pickle_data_filename, "rb") as f:
data_and_label = pickle.load(f)
train_data = {
"idx": data_and_label["train_idx"],
"label": data_and_label["train_label"],
}
valid_data = {
"idx": data_and_label["valid_idx"],
"label": data_and_label["valid_label"],
}
test_data = {
"idx": data_and_label["test_idx"],
"label": data_and_label["test_label"],
}
train_dataset = list(
list(zip(train_data["idx"], train_data["label"].astype(np.int64)))
)
valid_dataset = list(
list(zip(valid_data["idx"], valid_data["label"].astype(np.int64)))
)
test_dataset = list(
list(zip(test_data["idx"], test_data["label"].astype(np.int64)))
)
return (
NodeClassificationDataset(train_dataset),
NodeClassificationDataset(valid_dataset),
NodeClassificationDataset(test_dataset),
)
def get_train_dataloader(
train_dataset,
batch_size: int,
*,
replica_id: int = 0,
num_replicas: int = 1,
num_workers: int = 0
):
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=num_replicas,
rank=replica_id,
shuffle=True,
drop_last=True,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
persistent_workers=True if num_workers > 0 else None,
sampler=train_sampler,
)
return train_dataloader
def get_valid_test_dataloader(
valid_test_dataset, batch_size: int, *, num_workers: int = 0
):
valid_test_sampler = torch.utils.data.distributed.DistributedSampler(
valid_test_dataset, num_replicas=1, rank=0, shuffle=False, drop_last=False
)
valid_test_dataloader = torch.utils.data.DataLoader(
valid_test_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
sampler=valid_test_sampler,
)
return valid_test_dataloader
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/cugraphops/sage_conv.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Linear
from pylibcugraphops.pytorch.operators import agg_concat_n2n as SAGEConvAgg
from pylibcugraphops.pytorch import SampledCSC
class CuGraphSAGEConv(torch.nn.Module): # pragma: no cover
r"""The GraphSAGE operator from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper.
:class:`CuGraphSAGEConv` is an optimized version of
package that fuses message passing computation for accelerated execution
and lower memory footprint.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
aggr: str = "mean",
normalize: bool = False,
root_weight: bool = True,
project: bool = False,
bias: bool = True,
):
super().__init__()
if aggr not in ["mean", "sum", "min", "max"]:
raise ValueError(
f"Aggregation function must be either 'mean', "
f"'sum', 'min' or 'max' (got '{aggr}')"
)
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.normalize = normalize
self.root_weight = root_weight
self.project = project
if self.project:
self.pre_lin = Linear(in_channels, in_channels, bias=True)
if self.root_weight:
self.lin = Linear(2 * in_channels, out_channels, bias=bias)
else:
self.lin = Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
gain = torch.nn.init.calculate_gain("relu")
torch.nn.init.xavier_uniform_(self.lin.weight, gain=gain)
if self.project:
torch.nn.init.xavier_uniform_(self.pre_lin.weight, gain=gain)
torch.nn.init.xavier_uniform_(self.lin.weight, gain=gain)
def forward(
self,
x: Tensor,
csr_row_ptr: Tensor,
csr_col_ind: Tensor,
max_num_neighbors: int,
) -> Tensor:
graph = SampledCSC(csr_row_ptr, csr_col_ind, max_num_neighbors, x.shape[0])
if self.project:
x = self.pre_lin(x).relu()
out = SAGEConvAgg(x, graph, self.aggr)
if self.root_weight:
out = self.lin(out)
else:
out = self.lin(out[:, : self.in_channels])
if self.normalize:
out = F.normalize(out, p=2.0, dim=-1)
return out
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.in_channels}, "
f"{self.out_channels}, aggr={self.aggr})"
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/pylibwholegraph/torch/cugraphops/gat_conv.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from torch.nn import Linear, Parameter
from pylibcugraphops.pytorch.operators import mha_gat_n2n as GATConvAgg
from pylibcugraphops.pytorch import SampledCSC
class CuGraphGATConv(torch.nn.Module): # pragma: no cover
r"""The graph attentional operator from the `"Graph Attention Networks"
<https://arxiv.org/abs/1710.10903>`_ paper.
:class:`CuGraphGATConv` is an optimized version of
:class:`~torch_geometric.nn.conv.GATConv` based on the :obj:`cugraph-ops`
package that fuses message passing computation for accelerated execution
and lower memory footprint.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
heads: int = 1,
concat: bool = True,
negative_slope: float = 0.2,
bias: bool = True,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.lin = Linear(in_channels, heads * out_channels, bias=False)
self.att = Parameter(torch.Tensor(2 * heads * out_channels))
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
gain = torch.nn.init.calculate_gain("relu")
torch.nn.init.xavier_normal_(self.lin.weight, gain=gain)
torch.nn.init.xavier_normal_(
self.att.view(2, self.heads, self.out_channels)[0, :, :], gain=gain
)
torch.nn.init.xavier_normal_(
self.att.view(2, self.heads, self.out_channels)[1, :, :], gain=gain
)
torch.nn.init.zeros_(self.bias)
def forward(
self,
x: Tensor,
csr_row_ptr: Tensor,
csr_col_ind: Tensor,
max_num_neighbors: int,
) -> Tensor:
graph = SampledCSC(csr_row_ptr, csr_col_ind, max_num_neighbors, x.shape[0])
x = self.lin(x)
out = GATConvAgg(
x,
self.att,
graph,
self.heads,
"LeakyReLU",
self.negative_slope,
self.concat,
)
if self.bias is not None:
out = out + self.bias
return out
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.in_channels}, "
f"{self.out_channels}, heads={self.heads})"
)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/examples/ogbn_papers100m_convert.py
|
import argparse
import os
import numpy as np
from scipy.sparse import coo_matrix
import pickle
from ogb.nodeproppred import NodePropPredDataset
def save_array(np_array, save_path, array_file_name):
array_full_path = os.path.join(save_path, array_file_name)
with open(array_full_path, 'wb') as f:
np_array.tofile(f)
def convert_papers100m_dataset(args):
ogb_root = args.ogb_root_dir
dataset = NodePropPredDataset(name='ogbn-papers100M', root=ogb_root)
graph, label = dataset[0]
split_idx = dataset.get_idx_split()
train_idx, valid_idx, test_idx = (
split_idx["train"],
split_idx["valid"],
split_idx["test"],
)
train_label = label[train_idx]
valid_label = label[valid_idx]
test_label = label[test_idx]
data_and_label = {
"train_idx": train_idx,
"valid_idx": valid_idx,
"test_idx": test_idx,
"train_label": train_label,
"valid_label": valid_label,
"test_label": test_label,
}
num_nodes = graph["num_nodes"]
edge_index = graph["edge_index"]
node_feat = graph["node_feat"].astype(np.dtype(args.node_feat_format))
if not os.path.exists(args.convert_dir):
print(f"creating directory {args.convert_dir}...")
os.makedirs(args.convert_dir)
print("saving idx and labels...")
with open(
os.path.join(args.convert_dir, 'ogbn_papers100M_data_and_label.pkl'), "wb"
) as f:
pickle.dump(data_and_label, f)
print("saving node feature...")
with open(
os.path.join(args.convert_dir, 'node_feat.bin'), "wb"
) as f:
node_feat.tofile(f)
print("converting graph to csr...")
assert len(edge_index.shape) == 2
assert edge_index.shape[0] == 2
coo_src_ids = edge_index[0, :].astype(np.int32)
coo_dst_ids = edge_index[1, :].astype(np.int32)
if args.add_reverse_edges:
arg_graph_src = np.concatenate([coo_src_ids, coo_dst_ids])
arg_graph_dst = np.concatenate([coo_dst_ids, coo_src_ids])
else:
arg_graph_src = coo_src_ids
arg_graph_dst = coo_dst_ids
values = np.arange(len(arg_graph_src), dtype='int64')
coo_graph = coo_matrix((values, (arg_graph_src, arg_graph_dst)), shape=(num_nodes, num_nodes))
csr_graph = coo_graph.tocsr()
csr_row_ptr = csr_graph.indptr.astype(dtype='int64')
csr_col_ind = csr_graph.indices.astype(dtype='int32')
print("saving csr graph...")
save_array(csr_row_ptr, args.convert_dir, 'homograph_csr_row_ptr')
save_array(csr_col_ind, args.convert_dir, 'homograph_csr_col_idx')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ogb_root_dir', type=str, default='dataset',
help='root dir of containing ogb datasets')
parser.add_argument('--convert_dir', type=str, default='dataset_papers100m_converted',
help='output dir containing converted datasets')
parser.add_argument('--node_feat_format', type=str, default='float32',
choices=['float32', 'float16'],
help='save format of node feature')
parser.add_argument('--add_reverse_edges', type=bool, default=True,
help='whether to add reverse edges')
args = parser.parse_args()
convert_papers100m_dataset(args)
| 0 |
rapidsai_public_repos/wholegraph/python/pylibwholegraph
|
rapidsai_public_repos/wholegraph/python/pylibwholegraph/examples/node_classfication.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import time
import argparse
import apex
import torch
from apex.parallel import DistributedDataParallel as DDP
import pylibwholegraph.torch as wgth
argparser = argparse.ArgumentParser()
wgth.add_distributed_launch_options(argparser)
wgth.add_training_options(argparser)
wgth.add_common_graph_options(argparser)
wgth.add_common_model_options(argparser)
wgth.add_common_sampler_options(argparser)
wgth.add_node_classfication_options(argparser)
wgth.add_dataloader_options(argparser)
argparser.add_argument(
"--fp16_embedding", action="store_true", dest="fp16_mbedding", default=False, help="Whether to use fp16 embedding"
)
args = argparser.parse_args()
def valid_test(dataloader, model, name):
total_correct = 0
total_valid_sample = 0
if wgth.get_rank() == 0:
print("%s..." % (name,))
for i, (idx, label) in enumerate(dataloader):
label = torch.reshape(label, (-1,)).cuda()
model.eval()
logits = model(idx)
pred = torch.argmax(logits, 1)
correct = (pred == label).sum()
total_correct += correct.cpu()
total_valid_sample += label.shape[0]
if wgth.get_rank() == 0:
print(
"[%s] [%s] accuracy=%5.2f%%"
% (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
name,
100.0 * total_correct / total_valid_sample,
)
)
def valid(valid_dataloader, model):
valid_test(valid_dataloader, model, "VALID")
def test(test_dataset, model):
test_dataloader = wgth.get_valid_test_dataloader(test_dataset, args.batchsize)
valid_test(test_dataloader, model, "TEST")
def train(train_data, valid_data, model, optimizer, wm_optimizer, global_comm):
if wgth.get_rank() == 0:
print("start training...")
train_dataloader = wgth.get_train_dataloader(
train_data,
args.batchsize,
replica_id=wgth.get_rank(),
num_replicas=wgth.get_world_size(),
num_workers=args.dataloaderworkers,
)
valid_dataloader = wgth.get_valid_test_dataloader(valid_data, args.batchsize)
valid(valid_dataloader, model)
train_step = 0
epoch = 0
loss_fcn = torch.nn.CrossEntropyLoss()
train_start_time = time.time()
while epoch < args.epochs:
for i, (idx, label) in enumerate(train_dataloader):
label = torch.reshape(label, (-1,)).cuda()
optimizer.zero_grad()
model.train()
logits = model(idx)
loss = loss_fcn(logits, label)
loss.backward()
optimizer.step()
if wm_optimizer is not None:
wm_optimizer.step(args.lr * 0.1)
if wgth.get_rank() == 0 and train_step % 100 == 0:
print(
"[%s] [LOSS] step=%d, loss=%f"
% (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
train_step,
loss.cpu().item(),
)
)
train_step = train_step + 1
epoch = epoch + 1
global_comm.barrier()
train_end_time = time.time()
train_time = train_end_time - train_start_time
if wgth.get_rank() == 0:
print(
"[%s] [TRAIN_TIME] train time is %.2f seconds"
% (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), train_time)
)
print(
"[EPOCH_TIME] %.2f seconds."
% ((train_end_time - train_start_time) / args.epochs,)
)
valid(valid_dataloader, model)
def main_func():
print(f"Rank={wgth.get_rank()}, local_rank={wgth.get_local_rank()}")
global_comm, local_comm = wgth.init_torch_env_and_create_wm_comm(
wgth.get_rank(),
wgth.get_world_size(),
wgth.get_local_rank(),
wgth.get_local_size(),
)
if args.use_cpp_ext:
wgth.compile_cpp_extension()
train_ds, valid_ds, test_ds = wgth.create_node_claffication_datasets(
args.pickle_data_path
)
graph_structure = wgth.GraphStructure()
graph_structure_wholememory_type = "chunked"
graph_structure_wholememory_location = "cuda"
graph_comm = local_comm
if global_comm.get_size() != local_comm.get_size() and global_comm.support_type_location("continuous", "cuda"):
print("Using global communicator for graph structure.")
graph_comm = global_comm
graph_structure_wholememory_type = "continuous"
graph_structure_wholememory_location = "cuda"
if not args.use_global_embedding:
args.use_global_embedding = True
print("Changing to using global communicator for embedding...")
if args.embedding_memory_type == "chunked":
print("Changing to continuous wholememory for embedding...")
args.embedding_memory_type = "continuous"
csr_row_ptr_wm_tensor = wgth.create_wholememory_tensor_from_filelist(
graph_comm,
graph_structure_wholememory_type,
graph_structure_wholememory_location,
os.path.join(args.root_dir, "homograph_csr_row_ptr"),
torch.int64,
)
csr_col_ind_wm_tensor = wgth.create_wholememory_tensor_from_filelist(
graph_comm,
graph_structure_wholememory_type,
graph_structure_wholememory_location,
os.path.join(args.root_dir, "homograph_csr_col_idx"),
torch.int,
)
graph_structure.set_csr_graph(csr_row_ptr_wm_tensor, csr_col_ind_wm_tensor)
feature_comm = global_comm if args.use_global_embedding else local_comm
embedding_wholememory_type = args.embedding_memory_type
embedding_wholememory_location = (
"cpu" if args.cache_type != "none" or args.cache_ratio == 0.0 else "cuda"
)
if args.cache_ratio == 0.0:
args.cache_type = "none"
access_type = "readonly" if args.train_embedding is False else "readwrite"
if wgth.get_rank() == 0:
print(
f"graph_structure: type={graph_structure_wholememory_type}, "
f"location={graph_structure_wholememory_location}\n"
f"embedding: type={embedding_wholememory_type}, location={embedding_wholememory_location}, "
f"cache_type={args.cache_type}, cache_ratio={args.cache_ratio}, "
f"trainable={args.train_embedding}"
)
cache_policy = wgth.create_builtin_cache_policy(
args.cache_type,
embedding_wholememory_type,
embedding_wholememory_location,
access_type,
args.cache_ratio,
)
wm_optimizer = (
None
if args.train_embedding is False
else wgth.create_wholememory_optimizer("adam", {})
)
embedding_dtype = torch.float32 if not args.fp16_mbedding else torch.float16
if wm_optimizer is None:
node_feat_wm_embedding = wgth.create_embedding_from_filelist(
feature_comm,
embedding_wholememory_type,
embedding_wholememory_location,
os.path.join(args.root_dir, "node_feat.bin"),
embedding_dtype,
args.feat_dim,
optimizer=wm_optimizer,
cache_policy=cache_policy,
)
else:
node_feat_wm_embedding = wgth.create_embedding(
feature_comm,
embedding_wholememory_type,
embedding_wholememory_location,
embedding_dtype,
[graph_structure.node_count, args.feat_dim],
optimizer=wm_optimizer,
cache_policy=cache_policy,
random_init=True,
)
wgth.set_framework(args.framework)
model = wgth.HomoGNNModel(graph_structure, node_feat_wm_embedding, args)
model.cuda()
model = DDP(model, delay_allreduce=True)
optimizer = apex.optimizers.FusedAdam(model.parameters(), lr=args.lr)
train(train_ds, valid_ds, model, optimizer, wm_optimizer, global_comm)
test(test_ds, model)
wgth.finalize()
if __name__ == "__main__":
wgth.distributed_launch(args, main_func)
| 0 |
rapidsai_public_repos/wholegraph/conda
|
rapidsai_public_repos/wholegraph/conda/environments/all_cuda-120_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- pytorch
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.0
- clangxx=16.0.0
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx
- cuda-version=12.0
- cudnn=8.4
- cxx-compiler
- cython
- doxygen=1.8.20
- gcc_linux-64=11.*
- gitpython
- graphviz
- ipykernel
- ipython
- libraft-headers==23.12.*
- librmm==23.12.*
- nanobind>=0.2.0
- nbsphinx
- nccl
- ninja
- numpy>=1.17
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-forked
- pytest-xdist
- python>=3.9,<3.11
- recommonmark
- scikit-build
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx<6
- sphinxcontrib-websupport
- sysroot_linux-64=2.17
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/wholegraph/conda
|
rapidsai_public_repos/wholegraph/conda/environments/all_cuda-118_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- pytorch
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.0
- clangxx=16.0.0
- cmake>=3.26.4
- cuda-nvtx=11.8
- cudatoolkit=11.8
- cudnn=8.4
- cxx-compiler
- cython
- doxygen=1.8.20
- gcc_linux-64=11.*
- gitpython
- graphviz
- ipykernel
- ipython
- libraft-headers==23.12.*
- librmm==23.12.*
- nanobind>=0.2.0
- nbsphinx
- nccl
- ninja
- numpy>=1.17
- numpydoc
- nvcc_linux-64=11.8
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-forked
- pytest-xdist
- python>=3.9,<3.11
- pytorch-cuda=11.8
- pytorch=2.0.0
- recommonmark
- scikit-build
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx<6
- sphinxcontrib-websupport
- sysroot_linux-64=2.17
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/pylibwholegraph/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
cmake_version:
- ">=3.26.4"
scikit_build_version:
- ">=0.13.1"
sysroot_version:
- "2.17"
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/pylibwholegraph/build.sh
|
#!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
CMAKE_EXTRA_ARGS="--cmake-args=\"-DBUILD_OPS_WITH_TORCH_C10_API=OFF\""
./build.sh pylibwholegraph --allgpuarch -v ${CMAKE_EXTRA_ARGS}
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/pylibwholegraph/meta.yaml
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: pylibwholegraph
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_BUILD_TYPE
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=pylibwholegraph-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=pylibwholegraph-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
ignore_run_exports_from:
- {{ compiler('cuda') }}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- doxygen =1.8.20
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- cython
- libwholegraph ={{ version }}
- python
- scikit-build {{ scikit_build_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- libwholegraph ={{ version }}
- python
about:
home: https://rapids.ai/
summary: pylibwholegraph library
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/libwholegraph/install_libwholegraph_tests.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/libwholegraph/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
cmake_version:
- ">=3.26.4"
doxygen_version:
- ">=1.8.11"
nccl_version:
- ">=2.9.9"
gtest_version:
- ">=1.13.0"
gmock_version:
- ">=1.13.0"
sysroot_version:
- "2.17"
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/libwholegraph/install_libwholegraph.sh
|
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
cmake --install cpp/build
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/libwholegraph/build.sh
|
#!/usr/bin/env bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
./build.sh -n libwholegraph tests -v --allgpuarch
| 0 |
rapidsai_public_repos/wholegraph/conda/recipes
|
rapidsai_public_repos/wholegraph/conda/recipes/libwholegraph/meta.yaml
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libwholegraph-split
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- BUILD_ABI
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libwholegraph-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libwholegraph-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
# Here we duplicate the dependencies so conda will populate both _build_env
# and _h_env_placeholder directories. This is needed to run clang-tidy.
# We need both build and host, or conda will only use _h_env_placeholder
# directory, which is removed after a build. Having only _build_env is not
# enough as CMake will not search for dependencies in this directory.
# This needs to be removed once CI support from sources builds
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
- cuda-driver-dev
{% endif %}
- cuda-version ={{ cuda_version }}
- doxygen {{ doxygen_version }}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
- libraft ={{ minor_version }}
- libraft-headers ={{ minor_version }}
- librmm ={{ minor_version }}
- nccl {{ nccl_version }}
outputs:
- name: libwholegraph
version: {{ version }}
script: install_libwholegraph.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major != "11" %}
- cuda-cudart-dev
- cuda-driver-dev
{% endif %}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- libraft ={{ minor_version }}
- nccl {{ nccl_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
license_file: ../../../LICENSE
summary: libwholegraph library
- name: libwholegraph-tests
version: {{ version }}
script: install_libwholegraph_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major != "11" %}
- cuda-cudart-dev
- cuda-driver-dev
{% endif %}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- {{ pin_subpackage('libwholegraph', exact=True) }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
license_file: ../../../LICENSE
summary: libwholegraph tests
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/fileutils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import git
DEFAULT_DIRS = ["cpp", "pylibwholegraph"]
HEADER_SUB_DIRS = ["benchmarks", "include", "src", "tests", "cpp"]
ALWAYS_IGNORED_DIRS = ["build", "_skbuild"]
HEADER_EXT = ["h", "hpp", "cuh"]
SRC_EXT_RE = r"[.](cu|cuh|h|hpp|cpp)$"
def get_default_dirs_with_sources(roots=None):
if roots is None:
roots = ["."]
dirs = set()
for r in roots:
rr = os.path.realpath(os.path.expanduser(r))
for d in DEFAULT_DIRS:
for h in HEADER_SUB_DIRS:
dd = os.path.join(rr, d, h)
if os.path.isdir(dd):
dirs.add(dd)
return dirs
def list_all_src_files(srcdirs=None, file_re=None, ignore_re=None):
all_files = []
if srcdirs is None:
# we always assume that these scripts are run from repo root
srcdirs = [
os.path.realpath(os.path.expanduser(d)) for d in DEFAULT_DIRS
]
if file_re is None:
file_re = re.compile(SRC_EXT_RE)
for srcdir in srcdirs:
for root, dirs, files in os.walk(srcdir):
if (any(d in root.split(os.sep) for d in ALWAYS_IGNORED_DIRS)):
continue
for f in files:
if re.search(file_re, f):
src = os.path.join(root, f)
if ignore_re is not None and re.search(ignore_re, src):
continue
all_files.append(src)
return all_files
def modifiedFiles():
"""Get a set of all modified files, as Diff objects.
The files returned have been modified in git since the merge base of HEAD
and the upstream of the target branch. We return the Diff objects so that
we can read only the staged changes.
"""
repo = git.Repo()
# Use the environment variable TARGET_BRANCH or RAPIDS_BASE_BRANCH
# (defined in CI) if possible
target_branch = os.environ.get(
"TARGET_BRANCH", os.environ.get("RAPIDS_BASE_BRANCH")
)
if target_branch is None:
# Fall back to the closest branch if not on CI
target_branch = repo.git.describe(
all=True, tags=True, match=["branch-*", "main"], abbrev=0
).lstrip("heads/")
upstream_target_branch = None
if target_branch in repo.heads:
# Use the tracking branch of the local reference if it exists. This
# returns None if no tracking branch is set.
upstream_target_branch = repo.heads[target_branch].tracking_branch()
if upstream_target_branch is None:
# Fall back to the remote with the newest target_branch. This code
# path is used on CI because the only local branch reference is
# current-pr-branch, and thus target_branch is not in repo.heads.
# This also happens if no tracking branch is defined for the local
# target_branch. We use the remote with the latest commit if
# multiple remotes are defined.
candidate_branches = [
remote.refs[target_branch]
for remote in repo.remotes
if target_branch in remote.refs
]
if len(candidate_branches) > 0:
upstream_target_branch = sorted(
candidate_branches,
key=lambda branch: branch.commit.committed_datetime,
)[-1]
else:
# If no remotes are defined, try to use the local version of the
# target_branch. If this fails, the repo configuration must be very
# strange and we can fix this script on a case-by-case basis.
upstream_target_branch = repo.heads[target_branch]
merge_base = repo.merge_base("HEAD", upstream_target_branch.commit)[0]
diff = merge_base.diff()
changed_files = {f for f in diff if f.b_path is not None}
return changed_files
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/clangutils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import subprocess
DEFAULT_DIRS = ["cpp"]
ALWAYS_IGNORED_DIRS = ["build", "_skbuild", "_cython_build", "cmake-build-debug"]
HEADER_SUB_DIRS = ["cpp/include", "cpp/src"]
EXCLUDED_HEADER_NAMES = set(["dlpack.h"])
HEADER_EXT = ["h", "hpp", "cuh"]
SRC_EXT_RE = r"[.](cu|cuh|h|hpp|cpp)$"
CLANG_COMPILER = "clang++"
EXPECTED_VERSIONS = ("16.0.0",)
CLANG_VERSION_RE = re.compile(r"(Ubuntu |Debian )?clang version ([0-9.]+)(-[0-9]+)?(~ubuntu[0-9.]+)?")
CLANG_FMT_VERSION_RE = re.compile(r"(Ubuntu |Debian )?clang-format version ([0-9.]+)(-[0-9]+)?(~ubuntu[0-9.]+)?")
GNU_DEFAULT_COMPILER = "g++"
CMAKE_COMPILER_REGEX = re.compile(
r"^\s*CMAKE_CXX_COMPILER:FILEPATH=(.+)\s*$", re.MULTILINE)
def check_clang_version(compiler_name):
ret = subprocess.check_output(
"%s --version" % compiler_name, shell=True
)
ret = ret.decode("utf-8")
version = CLANG_VERSION_RE.match(ret)
if version is None:
raise Exception("Failed to figure out clang compiler version!")
version = version.group(2)
if version not in EXPECTED_VERSIONS:
raise Exception("clang compiler version must be in %s found '%s'" %
(EXPECTED_VERSIONS, version))
def check_clang_format_version(exe_name):
ret = subprocess.check_output("%s --version" % exe_name, shell=True)
ret = ret.decode("utf-8")
version = CLANG_FMT_VERSION_RE.match(ret)
if version is None:
raise Exception("Failed to figure out clang-format version!")
version = version.group(2)
if version not in EXPECTED_VERSIONS:
raise Exception("clang-format version must be in %s found '%s'" %
(EXPECTED_VERSIONS, version))
def get_default_dirs_with_sources(roots=None):
if roots is None:
roots = ["."]
dirs = set()
for r in roots:
rr = os.path.realpath(os.path.expanduser(r))
for d in DEFAULT_DIRS:
for h in HEADER_SUB_DIRS:
dd = os.path.join(rr, d, h)
if os.path.isdir(dd):
dirs.add(dd)
return dirs
def list_all_src_files(srcdirs=None, file_re=None, ignore_re=None):
all_files = []
if srcdirs is None:
# we always assume that these scripts are run from repo root
srcdirs = [
os.path.realpath(os.path.expanduser(d)) for d in DEFAULT_DIRS
]
if file_re is None:
file_re = re.compile(SRC_EXT_RE)
for srcdir in srcdirs:
for root, dirs, files in os.walk(srcdir):
if (any(d in root.split(os.sep) for d in ALWAYS_IGNORED_DIRS)):
continue
for f in files:
if re.search(file_re, f):
src = os.path.join(root, f)
if ignore_re is not None and re.search(ignore_re, src):
continue
all_files.append(src)
return all_files
def list_all_headers(srcdirs=None):
header_re = re.compile("[.]({})$".format("|".join(HEADER_EXT)))
h_excl = "|".join(re.escape(h) for h in EXCLUDED_HEADER_NAMES)
excl_re = re.compile("{}({})$".format(re.escape(os.sep), h_excl))
return list_all_src_files(
srcdirs=srcdirs, file_re=header_re, ignore_re=excl_re)
def get_gcc_root(args, build_dir):
# first try to determine GCC based on CMakeCache
cmake_cache = os.path.join(build_dir, "CMakeCache.txt")
if os.path.isfile(cmake_cache):
with open(cmake_cache) as f:
content = f.read()
match = CMAKE_COMPILER_REGEX.search(content)
if match:
return os.path.dirname(os.path.dirname(match.group(1)))
# fall-back to g++ install. Note that this might fail on OSes other than
# Linux, but our build assumes a Linux OS anyway (such as in CI)
default_gxx = shutil.which(GNU_DEFAULT_COMPILER)
if default_gxx:
return os.path.dirname(os.path.dirname(default_gxx))
raise Exception("Cannot find any g++ install on the system.")
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/__clang_cuda_additional_intrinsics.h
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#define __CLANG_CUDA_ADDITIONAL_INTRINSICS_H__
#ifndef __CUDA__
#error "This file is for CUDA compilation only."
#endif
// for some of these macros, see cuda_fp16.hpp
#if defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __LDG_PTR "l"
#define __LBITS "64"
#else
#define __LDG_PTR "r"
#define __LBITS "32"
#endif // (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
#define __NOARG
#define __MAKE_LD(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld ## cop (const c_typ* addr) { \
int_typ out; \
asm("ld." #cop "." ptx_typ " %0, [%1];" \
: "=" inl_typ(out) : __LDG_PTR(addr)mem); \
return (c_typ)out; \
}
#define __MAKE_LD2(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld ## cop (const c_typ* addr) { \
int_typ out1, out2; \
asm("ld." #cop ".v2." ptx_typ " {%0, %1}, [%2];" \
: "=" inl_typ(out1), "=" inl_typ(out2) : __LDG_PTR(addr)mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
return out; \
}
#define __MAKE_LD4(cop, c_typ, int_typ, ptx_typ, inl_typ, mem) \
__device__ __forceinline__ c_typ __ld ## cop (const c_typ* addr) { \
int_typ out1, out2, out3, out4; \
asm("ld." #cop".v4." ptx_typ " {%0, %1, %2, %3}, [%4];" \
: "=" inl_typ(out1), "=" inl_typ(out2), \
"=" inl_typ(out3), "=" inl_typ(out4) : __LDG_PTR(addr)mem); \
c_typ out; \
out.x = out1; \
out.y = out2; \
out.z = out3; \
out.w = out4; \
return out; \
}
__MAKE_LD(cg, char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cg, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cg, short, short, "s16", "h", __NOARG)
__MAKE_LD(cg, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cg, int, int, "s32", "r", __NOARG)
__MAKE_LD(cg, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cg, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cg, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cg, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cg, float, float, "f32", "f", __NOARG)
__MAKE_LD(cg, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cg, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cg, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cg, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cg, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cg, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cg, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cg, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cg, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cg, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cg, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cg, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cg, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cg, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cg, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cg, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cg, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cg, float4, float, "f32", "f", __NOARG)
__MAKE_LD(ca, char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(ca, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(ca, short, short, "s16", "h", __NOARG)
__MAKE_LD(ca, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(ca, int, int, "s32", "r", __NOARG)
__MAKE_LD(ca, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(ca, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(ca, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(ca, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(ca, float, float, "f32", "f", __NOARG)
__MAKE_LD(ca, double, double, "f64", "d", __NOARG)
__MAKE_LD2(ca, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(ca, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(ca, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(ca, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(ca, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(ca, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(ca, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(ca, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(ca, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(ca, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(ca, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(ca, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(ca, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(ca, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(ca, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(ca, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(ca, float4, float, "f32", "f", __NOARG)
__MAKE_LD(cs, char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, signed char, short, "s8", "h", __NOARG)
__MAKE_LD(cs, unsigned char, short, "u8", "h", __NOARG)
__MAKE_LD(cs, short, short, "s16", "h", __NOARG)
__MAKE_LD(cs, unsigned short, unsigned short, "u16", "h", __NOARG)
__MAKE_LD(cs, int, int, "s32", "r", __NOARG)
__MAKE_LD(cs, unsigned int, unsigned int, "u32", "r", __NOARG)
__MAKE_LD(cs, long, long, "s" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, __NOARG)
__MAKE_LD(cs, long long, long long, "s64", "l", __NOARG)
__MAKE_LD(cs, unsigned long long, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD(cs, float, float, "f32", "f", __NOARG)
__MAKE_LD(cs, double, double, "f64", "d", __NOARG)
__MAKE_LD2(cs, char2, short, "s8", "h", __NOARG)
__MAKE_LD2(cs, uchar2, short, "u8", "h", __NOARG)
__MAKE_LD2(cs, short2, short, "s16", "h", __NOARG)
__MAKE_LD2(cs, ushort2, unsigned short, "u16", "h", __NOARG)
__MAKE_LD2(cs, int2, int, "s32", "r", __NOARG)
__MAKE_LD2(cs, uint2, unsigned int, "u32", "r", __NOARG)
__MAKE_LD2(cs, longlong2, long long, "s64", "l", __NOARG)
__MAKE_LD2(cs, ulonglong2, unsigned long long, "u64", "l", __NOARG)
__MAKE_LD2(cs, float2, float, "f32", "f", __NOARG)
__MAKE_LD2(cs, double2, double, "f64", "d", __NOARG)
__MAKE_LD4(cs, char4, short, "s8", "h", __NOARG)
__MAKE_LD4(cs, uchar4, short, "u8", "h", __NOARG)
__MAKE_LD4(cs, short4, short, "s16", "h", __NOARG)
__MAKE_LD4(cs, ushort4, unsigned short, "u16", "h", __NOARG)
__MAKE_LD4(cs, int4, int, "s32", "r", __NOARG)
__MAKE_LD4(cs, uint4, unsigned int, "u32", "r", __NOARG)
__MAKE_LD4(cs, float4, float, "f32", "f", __NOARG)
__MAKE_LD(lu, char, short, "s8", "h", : "memory")
__MAKE_LD(lu, signed char, short, "s8", "h", : "memory")
__MAKE_LD(lu, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(lu, short, short, "s16", "h", : "memory")
__MAKE_LD(lu, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(lu, int, int, "s32", "r", : "memory")
__MAKE_LD(lu, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(lu, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(lu, long long, long long, "s64", "l", : "memory")
__MAKE_LD(lu, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(lu, float, float, "f32", "f", : "memory")
__MAKE_LD(lu, double, double, "f64", "d", : "memory")
__MAKE_LD2(lu, char2, short, "s8", "h", : "memory")
__MAKE_LD2(lu, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(lu, short2, short, "s16", "h", : "memory")
__MAKE_LD2(lu, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(lu, int2, int, "s32", "r", : "memory")
__MAKE_LD2(lu, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(lu, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(lu, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(lu, float2, float, "f32", "f", : "memory")
__MAKE_LD2(lu, double2, double, "f64", "d", : "memory")
__MAKE_LD4(lu, char4, short, "s8", "h", : "memory")
__MAKE_LD4(lu, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(lu, short4, short, "s16", "h", : "memory")
__MAKE_LD4(lu, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(lu, int4, int, "s32", "r", : "memory")
__MAKE_LD4(lu, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(lu, float4, float, "f32", "f", : "memory")
__MAKE_LD(cv, char, short, "s8", "h", : "memory")
__MAKE_LD(cv, signed char, short, "s8", "h", : "memory")
__MAKE_LD(cv, unsigned char, short, "u8", "h", : "memory")
__MAKE_LD(cv, short, short, "s16", "h", : "memory")
__MAKE_LD(cv, unsigned short, unsigned short, "u16", "h", : "memory")
__MAKE_LD(cv, int, int, "s32", "r", : "memory")
__MAKE_LD(cv, unsigned int, unsigned int, "u32", "r", : "memory")
__MAKE_LD(cv, long, long, "s" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR, : "memory")
__MAKE_LD(cv, long long, long long, "s64", "l", : "memory")
__MAKE_LD(cv, unsigned long long, unsigned long long, "u64", "l", : "memory")
__MAKE_LD(cv, float, float, "f32", "f", : "memory")
__MAKE_LD(cv, double, double, "f64", "d", : "memory")
__MAKE_LD2(cv, char2, short, "s8", "h", : "memory")
__MAKE_LD2(cv, uchar2, short, "u8", "h", : "memory")
__MAKE_LD2(cv, short2, short, "s16", "h", : "memory")
__MAKE_LD2(cv, ushort2, unsigned short, "u16", "h", : "memory")
__MAKE_LD2(cv, int2, int, "s32", "r", : "memory")
__MAKE_LD2(cv, uint2, unsigned int, "u32", "r", : "memory")
__MAKE_LD2(cv, longlong2, long long, "s64", "l", : "memory")
__MAKE_LD2(cv, ulonglong2, unsigned long long, "u64", "l", : "memory")
__MAKE_LD2(cv, float2, float, "f32", "f", : "memory")
__MAKE_LD2(cv, double2, double, "f64", "d", : "memory")
__MAKE_LD4(cv, char4, short, "s8", "h", : "memory")
__MAKE_LD4(cv, uchar4, short, "u8", "h", : "memory")
__MAKE_LD4(cv, short4, short, "s16", "h", : "memory")
__MAKE_LD4(cv, ushort4, unsigned short, "u16", "h", : "memory")
__MAKE_LD4(cv, int4, int, "s32", "r", : "memory")
__MAKE_LD4(cv, uint4, unsigned int, "u32", "r", : "memory")
__MAKE_LD4(cv, float4, float, "f32", "f", : "memory")
#define __MAKE_ST(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st ## cop (c_typ* addr, c_typ v) { \
asm("st." #cop "." ptx_typ " [%0], %1;" \
:: __LDG_PTR(addr), inl_typ((int_typ)v) : "memory"); \
}
#define __MAKE_ST2(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st ## cop (c_typ* addr, c_typ v) { \
int_typ v1 = v.x, v2 = v.y; \
asm("st." #cop ".v2." ptx_typ " [%0], {%1, %2};" \
:: __LDG_PTR(addr), inl_typ(v1), inl_typ(v2) : "memory"); \
}
#define __MAKE_ST4(cop, c_typ, int_typ, ptx_typ, inl_typ) \
__device__ __forceinline__ void __st ## cop (c_typ* addr, c_typ v) { \
int_typ v1 = v.x, v2 = v.y, v3 = v.z, v4 = v.w; \
asm("st." #cop ".v4." ptx_typ " [%0], {%1, %2, %3, %4};" \
:: __LDG_PTR(addr), inl_typ(v1), inl_typ(v2), \
inl_typ(v3), inl_typ(v4) : "memory"); \
}
__MAKE_ST(wb, char, short, "s8", "h")
__MAKE_ST(wb, signed char, short, "s8", "h")
__MAKE_ST(wb, unsigned char, short, "u8", "h")
__MAKE_ST(wb, short, short, "s16", "h")
__MAKE_ST(wb, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wb, int, int, "s32", "r")
__MAKE_ST(wb, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wb, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wb, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wb, long long, long long, "s64", "l")
__MAKE_ST(wb, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wb, float, float, "f32", "f")
__MAKE_ST(wb, double, double, "f64", "d")
__MAKE_ST2(wb, char2, short, "s8", "h")
__MAKE_ST2(wb, uchar2, short, "u8", "h")
__MAKE_ST2(wb, short2, short, "s16", "h")
__MAKE_ST2(wb, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wb, int2, int, "s32", "r")
__MAKE_ST2(wb, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wb, longlong2, long long, "s64", "l")
__MAKE_ST2(wb, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wb, float2, float, "f32", "f")
__MAKE_ST2(wb, double2, double, "f64", "d")
__MAKE_ST4(wb, char4, short, "s8", "h")
__MAKE_ST4(wb, uchar4, short, "u8", "h")
__MAKE_ST4(wb, short4, short, "s16", "h")
__MAKE_ST4(wb, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wb, int4, int, "s32", "r")
__MAKE_ST4(wb, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wb, float4, float, "f32", "f")
__MAKE_ST(cg, char, short, "s8", "h")
__MAKE_ST(cg, signed char, short, "s8", "h")
__MAKE_ST(cg, unsigned char, short, "u8", "h")
__MAKE_ST(cg, short, short, "s16", "h")
__MAKE_ST(cg, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cg, int, int, "s32", "r")
__MAKE_ST(cg, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cg, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cg, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cg, long long, long long, "s64", "l")
__MAKE_ST(cg, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cg, float, float, "f32", "f")
__MAKE_ST(cg, double, double, "f64", "d")
__MAKE_ST2(cg, char2, short, "s8", "h")
__MAKE_ST2(cg, uchar2, short, "u8", "h")
__MAKE_ST2(cg, short2, short, "s16", "h")
__MAKE_ST2(cg, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cg, int2, int, "s32", "r")
__MAKE_ST2(cg, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cg, longlong2, long long, "s64", "l")
__MAKE_ST2(cg, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cg, float2, float, "f32", "f")
__MAKE_ST2(cg, double2, double, "f64", "d")
__MAKE_ST4(cg, char4, short, "s8", "h")
__MAKE_ST4(cg, uchar4, short, "u8", "h")
__MAKE_ST4(cg, short4, short, "s16", "h")
__MAKE_ST4(cg, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cg, int4, int, "s32", "r")
__MAKE_ST4(cg, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cg, float4, float, "f32", "f")
__MAKE_ST(cs, char, short, "s8", "h")
__MAKE_ST(cs, signed char, short, "s8", "h")
__MAKE_ST(cs, unsigned char, short, "u8", "h")
__MAKE_ST(cs, short, short, "s16", "h")
__MAKE_ST(cs, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(cs, int, int, "s32", "r")
__MAKE_ST(cs, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(cs, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(cs, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(cs, long long, long long, "s64", "l")
__MAKE_ST(cs, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(cs, float, float, "f32", "f")
__MAKE_ST(cs, double, double, "f64", "d")
__MAKE_ST2(cs, char2, short, "s8", "h")
__MAKE_ST2(cs, uchar2, short, "u8", "h")
__MAKE_ST2(cs, short2, short, "s16", "h")
__MAKE_ST2(cs, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(cs, int2, int, "s32", "r")
__MAKE_ST2(cs, uint2, unsigned int, "u32", "r")
__MAKE_ST2(cs, longlong2, long long, "s64", "l")
__MAKE_ST2(cs, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(cs, float2, float, "f32", "f")
__MAKE_ST2(cs, double2, double, "f64", "d")
__MAKE_ST4(cs, char4, short, "s8", "h")
__MAKE_ST4(cs, uchar4, short, "u8", "h")
__MAKE_ST4(cs, short4, short, "s16", "h")
__MAKE_ST4(cs, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(cs, int4, int, "s32", "r")
__MAKE_ST4(cs, uint4, unsigned int, "u32", "r")
__MAKE_ST4(cs, float4, float, "f32", "f")
__MAKE_ST(wt, char, short, "s8", "h")
__MAKE_ST(wt, signed char, short, "s8", "h")
__MAKE_ST(wt, unsigned char, short, "u8", "h")
__MAKE_ST(wt, short, short, "s16", "h")
__MAKE_ST(wt, unsigned short, unsigned short, "u16", "h")
__MAKE_ST(wt, int, int, "s32", "r")
__MAKE_ST(wt, unsigned int, unsigned int, "u32", "r")
__MAKE_ST(wt, long, long, "s" __LBITS, __LDG_PTR)
__MAKE_ST(wt, unsigned long, unsigned long, "u" __LBITS, __LDG_PTR)
__MAKE_ST(wt, long long, long long, "s64", "l")
__MAKE_ST(wt, unsigned long long, unsigned long long, "u64", "l")
__MAKE_ST(wt, float, float, "f32", "f")
__MAKE_ST(wt, double, double, "f64", "d")
__MAKE_ST2(wt, char2, short, "s8", "h")
__MAKE_ST2(wt, uchar2, short, "u8", "h")
__MAKE_ST2(wt, short2, short, "s16", "h")
__MAKE_ST2(wt, ushort2, unsigned short, "u16", "h")
__MAKE_ST2(wt, int2, int, "s32", "r")
__MAKE_ST2(wt, uint2, unsigned int, "u32", "r")
__MAKE_ST2(wt, longlong2, long long, "s64", "l")
__MAKE_ST2(wt, ulonglong2, unsigned long long, "u64", "l")
__MAKE_ST2(wt, float2, float, "f32", "f")
__MAKE_ST2(wt, double2, double, "f64", "d")
__MAKE_ST4(wt, char4, short, "s8", "h")
__MAKE_ST4(wt, uchar4, short, "u8", "h")
__MAKE_ST4(wt, short4, short, "s16", "h")
__MAKE_ST4(wt, ushort4, unsigned short, "u16", "h")
__MAKE_ST4(wt, int4, int, "s32", "r")
__MAKE_ST4(wt, uint4, unsigned int, "u32", "r")
__MAKE_ST4(wt, float4, float, "f32", "f")
#undef __MAKE_ST4
#undef __MAKE_ST2
#undef __MAKE_ST
#undef __MAKE_LD4
#undef __MAKE_LD2
#undef __MAKE_LD
#undef __NOARG
#undef __LBITS
#undef __LDG_PTR
#endif // defined(__cplusplus) && (!defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 320))
#endif // defined(__CLANG_CUDA_ADDITIONAL_INTRINSICS_H__)
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/gitutils.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import re
def isFileEmpty(f):
return os.stat(f).st_size == 0
def __git(*opts):
"""Runs a git command and returns its output"""
cmd = "git " + " ".join(list(opts))
ret = subprocess.check_output(cmd, shell=True)
return ret.decode("UTF-8").rstrip("\n")
def __gitdiff(*opts):
"""Runs a git diff command with no pager set"""
return __git("--no-pager", "diff", *opts)
def branch():
"""Returns the name of the current branch"""
name = __git("rev-parse", "--abbrev-ref", "HEAD")
name = name.rstrip()
return name
def repo_version():
"""
Determines the version of the repo by using `git describe`
Returns
-------
str
The full version of the repo in the format 'v#.#.#{a|b|rc}'
"""
return __git("describe", "--tags", "--abbrev=0")
def repo_version_major_minor():
"""
Determines the version of the repo using `git describe` and returns only
the major and minor portion
Returns
-------
str
The partial version of the repo in the format '{major}.{minor}'
"""
full_repo_version = repo_version()
match = re.match(r"^v?(?P<major>[0-9]+)(?:\.(?P<minor>[0-9]+))?",
full_repo_version)
if (match is None):
print(" [DEBUG] Could not determine repo major minor version. "
f"Full repo version: {full_repo_version}.")
return None
out_version = match.group("major")
if (match.group("minor")):
out_version += "." + match.group("minor")
return out_version
def determine_merge_commit(current_branch="HEAD"):
"""
When running outside of CI, this will estimate the target merge commit hash
of `current_branch` by finding a common ancester with the remote branch
'branch-{major}.{minor}' where {major} and {minor} are determined from the
repo version.
Parameters
----------
current_branch : str, optional
Which branch to consider as the current branch, by default "HEAD"
Returns
-------
str
The common commit hash ID
"""
try:
# Try to determine the target branch from the most recent tag
head_branch = __git("describe",
"--all",
"--tags",
"--match='branch-*'",
"--abbrev=0")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not determine target branch from most recent "
"tag. Falling back to 'branch-{major}.{minor}.")
head_branch = None
if (head_branch is not None):
# Convert from head to branch name
head_branch = __git("name-rev", "--name-only", head_branch)
else:
# Try and guess the target branch as "branch-<major>.<minor>"
version = repo_version_major_minor()
if (version is None):
return None
head_branch = "branch-{}".format(version)
try:
# Now get the remote tracking branch
remote_branch = __git("rev-parse",
"--abbrev-ref",
"--symbolic-full-name",
head_branch + "@{upstream}")
except subprocess.CalledProcessError:
print(" [DEBUG] Could not remote tracking reference for "
f"branch {head_branch}.")
remote_branch = None
if (remote_branch is None):
return None
print(f" [DEBUG] Determined TARGET_BRANCH as: '{remote_branch}'. "
"Finding common ancestor.")
common_commit = __git("merge-base", remote_branch, current_branch)
return common_commit
def uncommittedFiles():
"""
Returns a list of all changed files that are not yet committed. This
means both untracked/unstaged as well as uncommitted files too.
"""
files = __git("status", "-u", "-s")
ret = []
for f in files.splitlines():
f = f.strip(" ")
f = re.sub("\s+", " ", f) # noqa: W605
tmp = f.split(" ", 1)
# only consider staged files or uncommitted files
# in other words, ignore untracked files
if tmp[0] == "M" or tmp[0] == "A":
ret.append(tmp[1])
return ret
def changedFilesBetween(baseName, branchName, commitHash):
"""
Returns a list of files changed between branches baseName and latest commit
of branchName.
"""
current = branch()
# checkout "base" branch
__git("checkout", "--force", baseName)
# checkout branch for comparing
__git("checkout", "--force", branchName)
# checkout latest commit from branch
__git("checkout", "-fq", commitHash)
files = __gitdiff("--name-only",
"--ignore-submodules",
f"{baseName}..{branchName}")
# restore the original branch
__git("checkout", "--force", current)
return files.splitlines()
def changesInFileBetween(file, b1, b2, filter=None):
"""Filters the changed lines to a file between the branches b1 and b2"""
current = branch()
__git("checkout", "--quiet", b1)
__git("checkout", "--quiet", b2)
diffs = __gitdiff("--ignore-submodules",
"-w",
"--minimal",
"-U0",
"%s...%s" % (b1, b2),
"--",
file)
__git("checkout", "--quiet", current)
lines = []
for line in diffs.splitlines():
if filter is None or filter(line):
lines.append(line)
return lines
def modifiedFiles(pathFilter=None):
"""
If inside a CI-env (ie. TARGET_BRANCH and COMMIT_HASH are defined, and
current branch is "current-pr-branch"), then lists out all files modified
between these 2 branches. Locally, TARGET_BRANCH will try to be determined
from the current repo version and finding a coresponding branch named
'branch-{major}.{minor}'. If this fails, this functino will list out all
the uncommitted files in the current branch.
Such utility function is helpful while putting checker scripts as part of
cmake, as well as CI process. This way, during development, only the files
touched (but not yet committed) by devs can be checked. But, during the CI
process ALL files modified by the dev, as submiited in the PR, will be
checked. This happens, all the while using the same script.
"""
targetBranch = os.environ.get("TARGET_BRANCH")
commitHash = os.environ.get("COMMIT_HASH")
currentBranch = branch()
print(
f" [DEBUG] TARGET_BRANCH={targetBranch}, COMMIT_HASH={commitHash}, "
f"currentBranch={currentBranch}")
if targetBranch and commitHash and (
currentBranch == "current-pr-branch" or "/pr/" in commitHash):
print(" [DEBUG] Assuming a CI environment.")
allFiles = changedFilesBetween(targetBranch, currentBranch, commitHash)
else:
print(" [DEBUG] Did not detect CI environment. "
"Determining TARGET_BRANCH locally.")
common_commit = determine_merge_commit(currentBranch)
if (common_commit is not None):
# Now get the diff. Use --staged to get both diff between
# common_commit..HEAD and any locally staged files
allFiles = __gitdiff("--name-only",
"--ignore-submodules",
"--staged",
f"{common_commit}").splitlines()
else:
# Fallback to just uncommitted files
allFiles = uncommittedFiles()
files = []
for f in allFiles:
if pathFilter is None or pathFilter(f):
files.append(f)
filesToCheckString = "\n\t".join(files) if files else "<None>"
print(f" [DEBUG] Found files to check:\n\t{filesToCheckString}\n")
return files
def listAllFilesInDir(folder):
"""Utility function to list all files/subdirs in the input folder"""
allFiles = []
for root, dirs, files in os.walk(folder):
for name in files:
allFiles.append(os.path.join(root, name))
return allFiles
def listFilesToCheck(filesDirs, filter=None):
"""
Utility function to filter the input list of files/dirs based on the input
filter method and returns all the files that need to be checked
"""
allFiles = []
for f in filesDirs:
if os.path.isfile(f):
if filter is None or filter(f):
allFiles.append(f)
elif os.path.isdir(f):
files = listAllFilesInDir(f)
for f_ in files:
if filter is None or filter(f_):
allFiles.append(f_)
return allFiles
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/run-clang-format.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
import tempfile
import clangutils
def parse_args():
argparser = argparse.ArgumentParser("Runs clang-format on a C++ project")
argparser.add_argument(
"-dstdir", type=str, default=None,
help="Directory to store the temporary outputs of clang-format. If"
" nothing is passed for this, then a temporary dir will be created"
" using `mkdtemp`")
argparser.add_argument(
"-exe", type=str, default="clang-format",
help="Path to clang-format exe. By default it is expected to be"
" available via the `PATH` env-var.")
argparser.add_argument(
"-inplace", default=False, action="store_true",
help="Replace the source files itself with the format corrections.")
argparser.add_argument(
"-regex", type=str, default=clangutils.SRC_EXT_RE,
help="Regex string to filter in sources to be checked for formatting.")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from matched list")
argparser.add_argument(
"-v", dest="verbose", action="store_true",
help="Print verbose messages")
argparser.add_argument(
"dirs", type=str, nargs="*",
help="List of dirs where to find sources. "
"Must be relative paths to working directory of this script. "
"By default, we assume that the script is run from repo root and "
"search for sources in all C++ source folders.")
args = argparser.parse_args()
args.regex_compiled = re.compile(args.regex)
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
if args.dstdir is None:
args.dstdir = tempfile.mkdtemp()
clangutils.check_clang_format_version(args.exe)
if len(args.dirs) == 0:
args.dirs = clangutils.DEFAULT_DIRS
return args
def run_clang_format(src, dst, exe, verbose):
dstdir = os.path.dirname(dst)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
# run the clang format command itself
if src == dst:
cmd = "%s -i %s" % (exe, src)
else:
cmd = "%s %s > %s" % (exe, src, dst)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
print("Failed to run clang-format! Maybe your env is not proper?")
raise
# run the diff to check if there are any formatting issues
cmd = "diff -q %s %s >/dev/null" % (src, dst)
try:
subprocess.check_call(cmd, shell=True)
if verbose:
print("%s passed" % os.path.basename(src))
except subprocess.CalledProcessError:
src_real = os.path.realpath(os.path.expanduser(src))
print("%s failed! 'diff %s %s' will show formatting violations!" %
(os.path.basename(src), src_real, dst))
return False
return True
def main():
args = parse_args()
# Attempt to making sure that we run this script from root of repo always
if not os.path.exists(".git"):
print("Error!! This needs to always be run from the root of repo")
sys.exit(-1)
all_files = clangutils.list_all_src_files(
args.dirs, args.regex_compiled, args.ignore_compiled
)
# actual format checker
status = True
for src in all_files:
if args.inplace:
_dir = os.path.dirname(src)
else:
_dir = os.path.join(args.dstdir, os.path.relpath(src))
dst = os.path.join(_dir, os.path.basename(src))
if not run_clang_format(src, dst, args.exe, args.verbose):
status = False
if not status:
print("clang-format failed! You have 2 options:")
print(" 1. Look at formatting differences above and fix them manually")
print(" 2. Or run the below command to bulk-fix all these at once")
print("Bulk-fix command: ")
print(" python ./scripts/run-clang-format.py %s -inplace" %
" ".join(sys.argv[1:]))
sys.exit(-1)
return
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/copyright.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import datetime
import os
import re
import sys
import git
from fileutils import modifiedFiles
FilesToCheck = [
re.compile(r"[.](cmake|cpp|cu|cuh|h|hpp|sh|pxd|py|pyx)$"),
re.compile(r"CMakeLists[.]txt$"),
re.compile(r"setup[.]cfg$"),
re.compile(r"meta[.]yaml$"),
]
ExemptFiles = [
re.compile(r"versioneer[.]py"),
re.compile(r".*[.]json$"),
re.compile(r"src/io/gzstream[.]hpp$"),
]
# this will break starting at year 10000, which is probably OK :)
CheckSimple = re.compile(
r"Copyright *(?:\(c\))? *(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)"
)
CheckDouble = re.compile(
r"Copyright *(?:\(c\))? *(\d{4})-(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)" # noqa: E501
)
def checkThisFile(f):
if isinstance(f, git.Diff):
if f.deleted_file or f.b_blob.size == 0:
return False
f = f.b_path
elif not os.path.exists(f) or os.stat(f).st_size == 0:
# This check covers things like symlinks which point to files that DNE
return False
for exempt in ExemptFiles:
if exempt.search(f):
return False
for checker in FilesToCheck:
if checker.search(f):
return True
return False
def getCopyrightYears(line):
res = CheckSimple.search(line)
if res:
return int(res.group(1)), int(res.group(1))
res = CheckDouble.search(line)
if res:
return int(res.group(1)), int(res.group(2))
return None, None
def replaceCurrentYear(line, start, end):
# first turn a simple regex into double (if applicable). then update years
res = CheckSimple.sub(r"Copyright (c) \1-\1, NVIDIA CORPORATION", line)
res = CheckDouble.sub(
rf"Copyright (c) {start:04d}-{end:04d}, NVIDIA CORPORATION",
res,
)
return res
def checkCopyright(f, update_current_year):
"""Checks for copyright headers and their years."""
errs = []
thisYear = datetime.datetime.now().year
lineNum = 0
crFound = False
yearMatched = False
if isinstance(f, git.Diff):
path = f.b_path
lines = f.b_blob.data_stream.read().decode().splitlines(keepends=True)
else:
path = f
with open(f, encoding="utf-8") as fp:
lines = fp.readlines()
for line in lines:
lineNum += 1
start, end = getCopyrightYears(line)
if start is None:
continue
crFound = True
if start > end:
e = [
path,
lineNum,
"First year after second year in the copyright "
"header (manual fix required)",
None,
]
errs.append(e)
elif thisYear < start or thisYear > end:
e = [
path,
lineNum,
f"Current year {thisYear} not included in the copyright header {start}-{end}",
None,
]
if thisYear < start:
e[-1] = replaceCurrentYear(line, thisYear, end)
if thisYear > end:
e[-1] = replaceCurrentYear(line, start, thisYear)
errs.append(e)
else:
yearMatched = True
# copyright header itself not found
if not crFound:
e = [
path,
0,
"Copyright header missing or formatted incorrectly "
"(manual fix required)",
None,
]
errs.append(e)
# even if the year matches a copyright header, make the check pass
if yearMatched:
errs = []
if update_current_year:
errs_update = [x for x in errs if x[-1] is not None]
if len(errs_update) > 0:
lines_changed = ", ".join(str(x[1]) for x in errs_update)
print(f"File: {path}. Changing line(s) {lines_changed}")
for _, lineNum, __, replacement in errs_update:
lines[lineNum - 1] = replacement
with open(path, "w", encoding="utf-8") as out_file:
out_file.writelines(lines)
return errs
def getAllFilesUnderDir(root, pathFilter=None):
retList = []
for dirpath, dirnames, filenames in os.walk(root):
for fn in filenames:
filePath = os.path.join(dirpath, fn)
if pathFilter(filePath):
retList.append(filePath)
return retList
def checkCopyright_main():
"""
Checks for copyright headers in all the modified files. In case of local
repo, this script will just look for uncommitted files and in case of CI
it compares between branches "$PR_TARGET_BRANCH" and "current-pr-branch"
"""
retVal = 0
argparser = argparse.ArgumentParser(
"Checks for a consistent copyright header in git's modified files"
)
argparser.add_argument(
"--update-current-year",
dest="update_current_year",
action="store_true",
required=False,
help="If set, "
"update the current year if a header is already "
"present and well formatted.",
)
argparser.add_argument(
"--git-modified-only",
dest="git_modified_only",
action="store_true",
required=False,
help="If set, " "only files seen as modified by git will be " "processed.",
)
args, dirs = argparser.parse_known_args()
if args.git_modified_only:
files = [f for f in modifiedFiles() if checkThisFile(f)]
else:
files = []
for d in [os.path.abspath(d) for d in dirs]:
if not os.path.isdir(d):
raise ValueError(f"{d} is not a directory.")
files += getAllFilesUnderDir(d, pathFilter=checkThisFile)
errors = []
for f in files:
errors += checkCopyright(f, args.update_current_year)
if len(errors) > 0:
if any(e[-1] is None for e in errors):
print("Copyright headers incomplete in some of the files!")
for e in errors:
print(" %s:%d Issue: %s" % (e[0], e[1], e[2]))
print("")
n_fixable = sum(1 for e in errors if e[-1] is not None)
file_from_repo = os.path.relpath(os.path.abspath(__file__))
if n_fixable > 0 and not args.update_current_year:
print(
f"You can run `python {file_from_repo} --git-modified-only "
"--update-current-year` and stage the results in git to "
f"fix {n_fixable} of these errors.\n"
)
retVal = 1
return retVal
if __name__ == "__main__":
sys.exit(checkCopyright_main())
| 0 |
rapidsai_public_repos/wholegraph/scripts
|
rapidsai_public_repos/wholegraph/scripts/checks/run-clang-tidy.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import json
import multiprocessing as mp
import os
import re
import shutil
import subprocess
import clangutils
from gitutils import modifiedFiles
GPU_ARCH_REGEX = re.compile(r"sm_(\d+)")
SPACES = re.compile(r"\s+")
# depfiles are tricky. We first replace '\[any space char]' with \0 which
# cannot appear in any valid file-name. Then split, and replace \0 with space
DEP_FILE_ESCAPE = re.compile(r"\\\s")
DEP_FILE_REPL = "\0"
DEP_FILE_ESCAPE_ANY = re.compile(r"\\(.)")
XCOMPILER_FLAG = re.compile(r"-((Xcompiler)|(-compiler-options))=?")
XPTXAS_FLAG = re.compile(r"-((Xptxas)|(-ptxas-options))=?")
# any options that may have equal signs in nvcc but not in clang
# add those options here if you find any
OPTIONS_NO_EQUAL_SIGN = ['-isystem']
SEPARATOR = "-" * 8
END_SEPARATOR = "*" * 64
FP_INSTANCE = re.compile(r"_(bf|fp)(8|16|32|64|128)")
I_INSTANCE = re.compile(r"_(s|u)(8|16|32|64|128)")
EIDX_INSTANCE = re.compile(r"_eidx")
def parse_args():
argparser = argparse.ArgumentParser("Runs clang-tidy on a cmake project")
argparser.add_argument(
"cdb", nargs='+',
help="Path to cmake-generated compilation database(s)")
argparser.add_argument(
"-exe", type=str, default="clang-tidy", help="Path to clang-tidy exe")
argparser.add_argument(
"-ignore", type=str, default=None,
help="Regex used to ignore files from checking")
argparser.add_argument(
"-select", type=str, default=None,
help="Regex used to select files for checking")
argparser.add_argument(
"-j", type=int, default=-1,
help="Number of parallel jobs to launch. "
"If this is <= 0, it is set to CPU core count")
argparser.add_argument(
"-root", type=str, default=None,
help="Root path to cmake build files, which can be separate from "
"repo root. It must be a common root for all compilation databases. "
"By default, the working directory of this script "
"(which must be the git repo root).")
argparser.add_argument(
"-git_modified_only", action="store_true",
help="If set, only check files that were modified in the current PR "
"(CI environment) or uncommited files (non-CI environment).")
argparser.add_argument(
"-check_once", action="store_true",
help="If set, we attempt to check instantiations at most once. "
"Useful for local development, since it is much faster and only "
"warnings in specific template instantiations will not be caught.")
argparser.add_argument("-launcher", type=str, default=None,
help="Compiler launcher such as ccache or sccache. By default, none.")
argparser.add_argument("-header", nargs="*", required=False,
help="Name(s) - not entire paths - of headers to include. "
"They must be part of the default sources except a select list "
"(see clangutils script for details and config).")
argparser.add_argument("-warn", choices=["none", "all", "same"],
default="all",
help="If set to 'none', do not report any compiler warnings. "
"If set to 'all' (current default), report all compiler warnings. "
"If set to 'same', report as in command from compilation database.")
argparser.add_argument(
"-v", action="store_true", help="Verbose output.")
args = argparser.parse_args()
if args.j <= 0:
args.j = mp.cpu_count()
args.ignore_compiled = re.compile(args.ignore) if args.ignore else None
args.select_compiled = re.compile(args.select) if args.select else None
args.compiler = shutil.which(clangutils.CLANG_COMPILER)
if args.compiler is None:
raise Exception(
"Unable to find clang compiler %s" % clangutils.CLANG_COMPILER
)
args.exe = shutil.which(args.exe)
if args.exe is None:
raise Exception("Unable to find clang-tidy %s" % args.exe)
# we check clang's version so that it will work in CI
clangutils.check_clang_version(args.compiler)
for cdb in args.cdb:
if not os.path.isfile(cdb):
raise Exception("Compilation database '%s' missing" % cdb)
# by default, CDB root is also the repo root (current working directory)
if args.root is None:
args.root = os.getcwd()
args.root = os.path.realpath(os.path.expanduser(args.root))
if args.header:
args.headers = set(args.header)
else:
args.headers = set(
os.path.basename(h) for h in clangutils.list_all_headers()
)
# get modified files if necessary
args.modified_files = dict()
if args.git_modified_only:
args.modified_files = {
os.path.realpath(
os.path.expanduser(os.path.join(args.root, f))
): True
for f in modifiedFiles()
}
elif args.check_once:
args.modified_files = {
f: True for f in clangutils.list_all_src_files()
}
if args.v:
print("Using {} ({} processes)".format(
args.compiler, args.j))
print("Using git modified files only: {}".format(
args.git_modified_only))
print("ROOT dir: {}\nLauncher: {}".format(args.root, args.launcher))
print("Ignore regex: {}\nSelect regex: {}".format(
args.ignore_compiled, args.select_compiled))
return args
def get_no_instance_path(f):
f_dir, f_name = os.path.split(f)
f_name_base = FP_INSTANCE.sub("", f_name)
f_name_base = I_INSTANCE.sub("", f_name_base)
f_name_base = EIDX_INSTANCE.sub("", f_name_base)
return os.path.join(f_dir, f_name_base)
def update_include_search_dirs(command, root):
# first we extract (and remove) paths from CPATH, C_INCLUDE_PATH and
# CPLUS_INCLUDE_PATH
env = os.environ.copy()
dirs = []
for var in ["CPATH", "C_INCLUDE_PATH", "CPLUS_INCLUDE_PATH"]:
p = env.pop(var, "")
dirs.extend(d for d in p.split(os.pathsep) if d and os.path.isdir(d))
to_remove = []
for i, flag in enumerate(command):
if flag.startswith("-I") and os.path.isdir(flag[2:]):
to_remove.append(i)
dirs.append(flag[2:])
for i in sorted(to_remove, reverse=True):
del command[i]
default_dirs = clangutils.get_default_dirs_with_sources(
[root, os.getcwd()]
)
new_dir_cmds = []
for d in dirs:
if any(os.path.commonpath([dd, d]) == dd for dd in default_dirs):
new_dir_cmds.append("-I" + d)
else:
new_dir_cmds.extend(["-isystem", d])
command[1:1] = new_dir_cmds
return env
def get_clang_arch_flag(command):
# clang only accepts a single architecture, and does not distinguish
# between virtual and physical architecture.
# So we just list all architecture numbers, then get the minimum value
archs = []
for loc in range(len(command)):
if (command[loc] != "-gencode" and command[loc] != "--generate-code"
and not command[loc].startswith("--generate-code=")):
continue
if command[loc].startswith("--generate-code="):
arch_flag = command[loc][len("--generate-code="):]
else:
arch_flag = command[loc + 1]
match = GPU_ARCH_REGEX.search(arch_flag)
if match is not None:
archs.append(int(match.group(1)))
return "--cuda-gpu-arch=sm_%d" % min(archs)
def get_index(arr, item_options):
return set(i for i, s in enumerate(arr) for item in item_options
if s == item)
def remove_items(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
del arr[i]
def remove_items_plus_one(arr, item_options):
for i in sorted(get_index(arr, item_options), reverse=True):
if i < len(arr) - 1:
del arr[i + 1]
del arr[i]
idx = set(i for i, s in enumerate(arr) for item in item_options
if s.startswith(item + "="))
for i in sorted(idx, reverse=True):
del arr[i]
def add_cuda_path(command, nvcc):
# Check if we are using conda compilers. If yes, we need to use cuda-gdb
# path as nvcc path refer to the fake nvcc shell script in the conda env
# Modified in WholeGraph: always use cuda-gdb path.
# if "_build_env" in nvcc:
# nvcc_path = shutil.which("cuda-gdb")
# else:
# nvcc_path = shutil.which(nvcc)
nvcc_path = shutil.which("cuda-gdb")
if not nvcc_path:
raise Exception("Command %s has invalid compiler %s" % (command, nvcc))
cuda_root = os.path.dirname(os.path.dirname(nvcc_path))
command.append('--cuda-path=%s' % cuda_root)
def get_tidy_args(cmd, gcc_root, launcher, compiler, warn, root):
command, f_path = cmd["command"], cmd["file"]
is_cuda = f_path.endswith(".cu")
command = SPACES.split(command)
# get and replace original compiler
cc_orig = command[0]
command[0] = compiler
# either add -Werror, remove -Werror flags or keep command as-is
if warn == "all":
# treat all compiler warnings as errors
# however, we never want to warn about the CUDA version or command line
# itself to be able to support newer features from CUDA
command[1:1] = [
"-Werror",
"-Wno-error=unknown-cuda-version",
"-Wno-error=unused-command-line-argument"
]
elif warn == "none":
# remove any -Werror flags
for i, x in reversed(list(enumerate(command))):
if x.startswith("-Werror"):
del command[i]
# in any case, move -I to -isystem if the paths are not below our default
# dirs to avoid reporting warnings from other libraries
env = update_include_search_dirs(command, root)
if launcher:
command.insert(0, launcher)
# remove compilation and output targets from the original command
remove_items_plus_one(command, ["--compile", "-c"])
remove_items_plus_one(command, ["--output-file", "-o"])
if is_cuda:
# replace nvcc's "-gencode ..." with clang's "--cuda-gpu-arch ..."
# also, clang only supports single arch, so we use the lowest one
command.append(get_clang_arch_flag(command))
# provide proper cuda path to clang
add_cuda_path(command, cc_orig)
# remove all kinds of nvcc flags clang doesn't know about
remove_items_plus_one(command, [
"--generate-code",
"-gencode",
"--x",
"-x",
"--compiler-bindir",
"-ccbin",
"--diag_suppress",
"-diag-suppress",
"--default-stream",
"-default-stream",
"--Werror",
])
remove_items(command, [
"-extended-lambda",
"--extended-lambda",
"-expt-extended-lambda",
"--expt-extended-lambda",
"-expt-relaxed-constexpr",
"--expt-relaxed-constexpr",
"--device-debug",
"-G",
"--generate-line-info",
"-lineinfo",
])
# "-x cuda" is the right usage in clang
command.extend(["-x", "cuda"])
# we remove -Xcompiler flags: here we basically have to hope for the
# best that clang++ will accept any flags which nvcc passed to gcc
for i, c in reversed(list(enumerate(command))):
new_c = XCOMPILER_FLAG.sub('', c)
if new_c == c:
continue
command[i:i + 1] = new_c.split(',')
# we also change -Xptxas to -Xcuda-ptxas, always adding space here
for i, c in reversed(list(enumerate(command))):
if XPTXAS_FLAG.search(c):
if not c.endswith("=") and i < len(command) - 1:
del command[i + 1]
command[i] = '-Xcuda-ptxas'
command.insert(i + 1, XPTXAS_FLAG.sub('', c))
# several options like isystem don't expect `=`
for opt in OPTIONS_NO_EQUAL_SIGN:
opt_eq = opt + '='
# make sure that we iterate from back to front here for insert
for i, c in reversed(list(enumerate(command))):
if not c.startswith(opt_eq):
continue
x = c.split('=')
# we only care about the first `=`
command[i] = x[0]
command.insert(i + 1, '='.join(x[1:]))
# use extensible whole program, to avoid ptx resolution/linking
command.extend(["-Xcuda-ptxas", "-ewp"])
# for libcudacxx, we need to allow variadic functions
command.extend(["-Xclang", "-fcuda-allow-variadic-functions"])
# add some additional CUDA intrinsics
cuda_intrinsics_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"__clang_cuda_additional_intrinsics.h")
command.extend(["-include", cuda_intrinsics_file])
# remove flags for NVCC/GCC that clang doesn't know about
remove_items(command, [
"--forward-unknown-to-host-compiler",
"-forward-unknown-to-host-compiler",
"-fvar-tracking-assignments"
])
# try to figure out which GCC CMAKE used, and tell clang all about it
command.append("--gcc-toolchain=%s" % gcc_root)
return command, is_cuda, env
def check_output_for_errors(output):
# there shouldn't really be any allowed errors
warnings_found = 0
errors = []
for line in output.splitlines():
if line.find("error:") >= 0:
errors.append(line)
if line.find("warning:") >= 0:
warnings_found += 1
return warnings_found, errors
def run_clang_tidy_command(tidy_cmd, cwd, env):
cmd = " ".join(tidy_cmd)
result = subprocess.run(cmd, check=False, shell=True, cwd=cwd, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result.stdout = result.stdout.decode("utf-8").strip()
out = "CMD: " + cmd + "\n"
out += "EXIT-CODE: %d\n" % result.returncode
n_warnings, errors = check_output_for_errors(result.stdout)
status = n_warnings == 0 and not errors
out += result.stdout
return status, out, errors
def get_dependencies(clang_cmd, f_path, cwd):
# -MM prints user dependency files (not system) and stops after
# pre-processor. We also set the name of main file output, just to make
# sure it will be included in the dependencies itself
dep_cmd = " ".join(clang_cmd + ["-MM", "-MT" + f_path, f_path])
# we cannot capture warnings/errors here since parsing output is difficult
# if any error happens, we have to rely on return code and will just
# re-run things
result = subprocess.run(dep_cmd, check=False, shell=True, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if result.returncode != 0:
subprocess.check_call(dep_cmd, shell=True, cwd=cwd)
# make sure that we raise here no matter what
e = ("Got EXIT-CODE %d while trying to get dependencies of file "
"%s\nCMD: %s\nCWD: %s")
raise Exception(e % (result.returncode, f_path, clang_cmd, cwd))
# first replace all escaped spaces with special character
result = DEP_FILE_ESCAPE.sub(DEP_FILE_REPL, result.stdout.decode("utf-8"))
deps = set()
# simply split on spaces
for dep_name in result.split():
# replace special character with regular space again
dep = dep_name.replace(DEP_FILE_REPL, " ")
# strip and check if anything is left
dep = dep.strip()
if not dep:
continue
# remove colon if we have one at the end of the file-name
# also un-escape any other characters
dep = dep.rstrip(":")
dep = DEP_FILE_ESCAPE_ANY.sub(r"\1", dep)
dep = os.path.realpath(os.path.expanduser(os.path.join(cwd, dep)))
deps.add(dep)
return deps
class LockContext(object):
def __init__(self, lock=None) -> None:
self._lock = lock
def __enter__(self):
if self._lock:
self._lock.acquire()
return self
def __exit__(self, _, __, ___):
if self._lock:
self._lock.release()
return False # we don't handle exceptions
def print_result(passed, stdout, f_name, errors, verbose):
if any(errors):
raise Exception(
"File %s: got %d errors:\n%s" % (f_name, len(errors), stdout))
status_str = "PASSED" if passed else "FAILED"
print("%s File:%s %s %s" % (SEPARATOR, f_name, status_str, SEPARATOR))
if (verbose or not passed) and stdout:
print(stdout)
print("%s\n" % END_SEPARATOR)
return stdout.splitlines() if stdout else []
def run_clang_tidy(lock, modified_files, args, build_dir, db_cmd):
f_path = db_cmd["file"]
gcc_root = clangutils.get_gcc_root(args, build_dir)
cmd, is_cuda, env = get_tidy_args(
db_cmd, gcc_root, args.launcher, args.compiler, args.warn, args.root
)
rel_path = os.path.relpath(f_path, start=args.root)
# first check if we should skip this file entirely
if modified_files:
deps = get_dependencies(cmd, f_path, build_dir)
mod = set(f for f, valid in modified_files.items() if valid)
dep_mod = deps.intersection(mod)
if not dep_mod:
print("%s File:%s %s %s" % (
SEPARATOR, rel_path, "SKIPPED", SEPARATOR)
)
return True, []
# remove the intersection + files with same name except fp32/s32 etc.
if args.check_once:
f_path_no_inst = get_no_instance_path(f_path)
for f in mod:
if f_path_no_inst == get_no_instance_path(f):
dep_mod.add(f)
modified_files.update({f: False for f in dep_mod})
sub_dirs = "|".join(clangutils.HEADER_SUB_DIRS)
sep = re.escape(os.sep)
sub_dirs = sep + "[^/]+" + sep + "(" + sub_dirs + ")" + sep + ".*" + sep
cpp_modernize = "--extra-arg=-std=c++17"
header_filter = "--header-filter='.*%s[^%s]+[.](cuh|h|hpp)$'" % (
os.path.basename(args.root) + sub_dirs, sep
)
filter_headers = [{"name": h} for h in args.headers]
line_filter = "--line-filter='%s'" % json.dumps(filter_headers)
tidy_cmd = [args.exe, cpp_modernize, header_filter, line_filter, f_path, "--"]
tidy_cmd.extend(cmd)
status = True
out = ""
if is_cuda:
tidy_cmd.append("--cuda-device-only")
tidy_cmd.append(f_path)
ret, out1, errors1 = run_clang_tidy_command(tidy_cmd, build_dir, env)
out += out1
out += "\n%s\n" % SEPARATOR
status = status and ret
tidy_cmd[-2] = "--cuda-host-only"
ret, out1, errors2 = run_clang_tidy_command(tidy_cmd, build_dir, env)
status = status and ret
out += out1
errors = errors1 + errors2
else:
tidy_cmd.append(f_path)
ret, out1, errors = run_clang_tidy_command(tidy_cmd, build_dir, env)
status = status and ret
out += out1
# we immediately print the result since this is more interactive for user
with lock:
lines = print_result(status, out, rel_path, errors, args.v)
return status, lines
def parse_results(results):
return all(r[0] for r in results), [s for r in results for s in r[1]]
# mostly used for debugging purposes
def run_sequential(args, build_dir, all_cmds):
lock = LockContext()
results = []
# actual tidy checker
for cmd in all_cmds:
results.append(
run_clang_tidy(lock, args.modified_files, args, build_dir, cmd)
)
return parse_results(results)
def run_parallel(args, build_dir, all_cmds):
results = []
with mp.Manager() as manager:
lock = manager.Lock()
if args.check_once:
modified_files = manager.dict(args.modified_files)
else:
modified_files = args.modified_files
with manager.Pool(args.j) as pool:
for cmd in all_cmds:
results.append(
pool.apply_async(
run_clang_tidy, args=(
lock, modified_files, args, build_dir, cmd
)
)
)
results_final = [r.get() for r in results]
return parse_results(results_final)
def list_all_cmds(args, cdb):
with open(cdb, "r") as fp:
all_cmds = json.load(fp)
to_remove = []
# ensure that we use only the real paths, filter and get the clang commands
for i, cmd in enumerate(all_cmds):
cmd["file"] = os.path.realpath(os.path.expanduser(cmd["file"]))
if os.path.commonpath([args.root, cmd["file"]]) != args.root:
# this may happen with dependencies that we build into our
# libraries/executables like nanobind
if args.v:
print(
"%s File:%s ignored (not in root %s) %s" % (
SEPARATOR, cmd["file"], args.root, SEPARATOR)
)
to_remove.append(i)
if args.ignore_compiled is not None and \
re.search(args.ignore_compiled, cmd["file"]) is not None:
to_remove.append(i)
if args.select_compiled is not None and \
re.search(args.select_compiled, cmd["file"]) is None:
to_remove.append(i)
for i in sorted(to_remove, reverse=True):
del all_cmds[i]
return all_cmds
def main():
args = parse_args()
if args.git_modified_only and not args.modified_files:
print("No modified files detected. Nothing to do.")
return
# Attempt to making sure that we run this script from root of repo always
if not os.path.exists(".git"):
raise Exception("This needs to always be run from the root of repo")
for cdb in args.cdb:
build_dir = os.path.dirname(cdb)
all_cmds = list_all_cmds(args, cdb)
print("Checking %d files/compilation commands" % len(all_cmds))
if args.j == 1:
status, lines = run_sequential(args, build_dir, all_cmds)
else:
status, lines = run_parallel(args, build_dir, all_cmds)
if not status:
# first get a list of all checks that were run
ret = subprocess.check_output(
args.exe + " --list-checks", shell=True
)
ret = ret.decode("utf-8")
checks = [line.strip() for line in ret.splitlines()
if line.startswith(' ' * 4)]
max_check_len = max(len(c) for c in checks)
check_counts = dict()
content = os.linesep.join(lines)
for check in checks:
check_counts[check] = content.count(check)
sorted_counts = sorted(
check_counts.items(), key=lambda x: x[1], reverse=True)
print("Failed {} check(s) in total. Counts as per below:".format(
sum(1 for _, count in sorted_counts if count > 0)))
for check, count in sorted_counts:
if count <= 0:
break
n_space = max_check_len - len(check) + 4
print("{}:{}{}".format(check, ' ' * n_space, count))
raise Exception("clang-tidy failed! Refer to the errors above.")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/wholegraph
|
rapidsai_public_repos/wholegraph/cpp/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
set(RAPIDS_VERSION "23.12")
set(WHOLEGRAPH_VERSION "${RAPIDS_VERSION}.00")
cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR)
include(FetchContent)
FetchContent_Declare(
rapids-cmake
GIT_REPOSITORY https://github.com/rapidsai/rapids-cmake.git
GIT_TAG origin/branch-${RAPIDS_VERSION}
)
FetchContent_MakeAvailable(rapids-cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
set(CMAKE_CUDA_ARCHITECTURES 70-real 80-real 86)
endif ()
rapids_cuda_init_architectures(WHOLEGRAPH)
project(WHOLEGRAPH VERSION ${WHOLEGRAPH_VERSION} LANGUAGES CXX CUDA)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_C_USE_RESPONSE_FILE_FOR_INCLUDES FALSE)
set(CMAKE_CXX_USE_RESPONSE_FILE_FOR_INCLUDES FALSE)
set(CMAKE_CUDA_USE_RESPONSE_FILE_FOR_INCLUDES FALSE)
# Write the version header
rapids_cmake_write_version_file(include/wholegraph/version_config.hpp)
##############################################################################
# - User Options ------------------------------------------------------------
option(BUILD_SHARED_LIBS "Build libwholegraph shared libraries" ON)
option(CMAKE_CUDA_LINEINFO "Enable the -lineinfo option for nvcc (useful for cuda-memcheck / profiler" OFF)
option(BUILD_TESTS "Configure CMake to build tests" ON)
option(CUDA_STATIC_RUNTIME "Statically link the CUDA toolkit runtime and libraries" OFF)
option(BUILD_BENCHMARKS "Configure CMake to build benchmark" ON)
##############################################################################
# - Set options based on user defined one -----------------------------------
set(_ctk_static_suffix "")
if(CUDA_STATIC_RUNTIME)
set(_ctk_static_suffix "_static")
endif()
##############################################################################
# - Base rapids options ------------------------------------------------------
# default build type
#rapids_cmake_build_type(Release)
rapids_cmake_build_type(RelWithDebInfo)
# CUDA runtime
rapids_cuda_init_runtime(USE_STATIC ${CUDA_STATIC_RUNTIME})
message("CMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}")
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env( conda_env MODIFY_PREFIX_PATH )
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(STATUS "No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}")
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
################################################################################
# - compiler options -----------------------------------------------------------
# * find CUDAToolkit package
# * determine GPU architectures
# * enable the CMake CUDA language
# * set other CUDA compilation flags
rapids_find_package(CUDAToolkit REQUIRED
BUILD_EXPORT_SET wholegraph-exports
INSTALL_EXPORT_SET wholegraph-exports
)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time")
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "${CMAKE_COMMAND} -E time")
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(WHOLEGRAPH_CXX_FLAGS "")
set(WHOLEGRAPH_CUDA_FLAGS "")
if(CMAKE_COMPILER_IS_GNUCXX)
list(APPEND WHOLEGRAPH_CXX_FLAGS -Werror -Wno-error=deprecated-declarations)
endif(CMAKE_COMPILER_IS_GNUCXX)
message("-- Building for GPU_ARCHS = ${CMAKE_CUDA_ARCHITECTURES}")
#list(APPEND WHOLEGRAPH_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
#list(APPEND WHOLEGRAPH_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -Xptxas=--disable-warnings)
#list(APPEND WHOLEGRAPH_CUDA_FLAGS -Xcompiler=-Wall,-Wno-error=sign-compare,-Wno-error=unused-but-set-variable)
#list(APPEND WHOLEGRAPH_CUDA_FLAGS -Xfatbin=-compress-all)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
if (CMAKE_CUDA_LINEINFO)
list(APPEND WHOLEGRAPH_CUDA_FLAGS -lineinfo)
endif()
# Debug options
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(STATUS "Building with debugging flags")
list(APPEND WHOLEGRAPH_CUDA_FLAGS -G -Xcompiler=-rdynamic)
endif()
################################################################################
# - find CPM based dependencies -----------------------------------------------
rapids_cpm_init()
include(./cmake/thirdparty/get_raft.cmake)
include(./cmake/thirdparty/get_nccl.cmake)
file(GLOB WHOLEGRAPH_SOURCES
"src/*.cpp"
"src/wholememory/*.cpp"
"src/wholememory_ops/*.cpp"
"src/wholememory_ops/*.cu"
"src/wholememory_ops/functions/*.cu"
"src/wholegraph_ops/*.cpp"
"src/wholegraph_ops/*.cu"
"src/graph_ops/*.cu"
"src/graph_ops/*.cpp")
add_library(wholegraph)
add_library(wholegraph::wholegraph ALIAS wholegraph)
target_sources(wholegraph PRIVATE ${WHOLEGRAPH_SOURCES})
set_target_properties(wholegraph
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(wholegraph
PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${WHOLEGRAPH_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${WHOLEGRAPH_CUDA_FLAGS}>"
)
#target_link_libraries(wholegraph PRIVATE -static-libgcc -static-libstdc++)
################################################################################
# - include paths --------------------------------------------------------------
set(WHOLEGRAPH_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE STRING
"Path to wholegraph include directory")
target_include_directories(wholegraph
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/src"
PUBLIC
"$<BUILD_INTERFACE:${NCCL_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${WHOLEGRAPH_INCLUDE_DIR}>"
"$<INSTALL_INTERFACE:include>"
)
################################################################################
# - link libraries -------------------------------------------------------------
target_link_libraries(wholegraph
PUBLIC
CUDA::cuda_driver
CUDA::cudart${_ctk_static_suffix}
raft::raft
PRIVATE
NCCL::NCCL
)
# optionally build tests
if(BUILD_TESTS AND CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
include(./cmake/thirdparty/get_gtest.cmake)
include(CTest) # calls enable_testing()
add_subdirectory(tests)
endif()
# optionally build benchmark
if (BUILD_BENCHMARKS AND CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME)
add_subdirectory(bench)
endif()
##############################################################################
# - code checker -------------------------------------------------------------
include(./cmake/CodeChecker.cmake)
set(CLANG_FORMAT_EXE "clang-format")
set(CLANG_TIDY_EXE "clang-tidy")
set(FLAKE8_EXE "flake8")
if(CLANG_TOOL_PATH)
set(CLANG_FORMAT_EXE ${CLANG_TOOL_PATH}/clang-format)
set(CLANG_TIDY_EXE ${CLANG_TOOL_PATH}/clang-tidy)
endif()
add_code_checks(
CWD ${PROJECT_SOURCE_DIR}
CLANG_FORMAT ${CLANG_FORMAT_EXE}
CLANG_TIDY ${CLANG_TIDY_EXE}
FLAKE8 ${FLAKE8_EXE})
##############################################################################
# - install targets ----------------------------------------------------------
set(doc_string [=[
]=])
set(code_string
[=[
]=])
set(WHOLEGRAPH_PUBLIC_HEADERS
wholememory/device_reference.cuh
wholememory/embedding.h
wholememory/env_func_ptrs.h
wholememory/global_reference.h
wholememory/tensor_description.h
wholememory/wholememory.h
wholememory/wholememory_tensor.h
wholememory/wholememory_op.h
wholememory/wholegraph_op.h
wholememory/graph_op.h
)
foreach(file IN LISTS WHOLEGRAPH_PUBLIC_HEADERS)
cmake_path(GET file PARENT_PATH file_out_)
string(JOIN "/" out_path "include" ${file_out_})
install(FILES "${WHOLEGRAPH_INCLUDE_DIR}/${file}"
DESTINATION "${out_path}")
#install(FILES "${WHOLEGRAPH_INCLUDE_DIR}/${file}"
# DESTINATION "${file}")
endforeach()
rapids_cmake_install_lib_dir( lib_dir )
set(PROGRAM_PERMISSIONS_DEFAULT
OWNER_WRITE OWNER_READ OWNER_EXECUTE
GROUP_READ GROUP_EXECUTE
WORLD_READ WORLD_EXECUTE)
install(
TARGETS wholegraph
DESTINATION ${lib_dir}
PERMISSIONS ${PROGRAM_PERMISSIONS_DEFAULT}
EXPORT wholegraph-exports)
rapids_export(
INSTALL wholegraph
EXPORT_SET wholegraph-exports
GLOBAL_TARGETS wholegraph
NAMESPACE wholegraph::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK code_string)
# build export targets
rapids_export(
BUILD wholegraph
EXPORT_SET wholegraph-exports
GLOBAL_TARGETS wholegraph
NAMESPACE wholegraph::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK code_string)
################################################################################
# - make documentation ---------------------------------------------------------
# requires doxygen and graphviz to be installed
# from build directory, run make doxygen
##############################################################################
# - doxygen docs -------------------------------------------------------------
find_package(Doxygen 1.8.11)
if(Doxygen_FOUND)
add_custom_command(OUTPUT WHOLEGRAPH_DOXYGEN
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND doxygen Doxyfile
VERBATIM)
add_custom_target(doxygen DEPENDS WHOLEGRAPH_DOXYGEN)
endif()
| 0 |
rapidsai_public_repos/wholegraph
|
rapidsai_public_repos/wholegraph/cpp/.clang-tidy
|
---
# Refer to the following link for the explanation of each parameter:
# https://releases.llvm.org/11.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
#
# Checks we don't consider because they are irrelevant to this project:
# abseil-*, android-*, boost-*, clang-analyzer-*, darwin-*, fuchsia-*, hicpp-*,
# linuxkernel-*, mpi-*, objc-*, openmp-*, zircon-*
#
# Add all bugprone checks, except:
# bugprone-easily-swappable-parameters:
# this seems unavailable in clang-tidy 11 but in any case, we have many
# internal and external APIs that rely on order of arguments,
# so we remove this by default.
# bugprone-misplaced-widening-cast:
# there are too many instances where we want to shorten things like
# int64_t(int32_t * int32_t) for performance if we know that int32_t * int32_t
# is small enough through asserts.
# It may be possible to introduce this rule at a later time.
# bugprone-narrowing-conversions:
# there is no way to ignore this for blockIdx/threadIdx which should be
# converted to int for optimization purposes by default.
# If at some point, we can specify special variables to ignore (such as
# blockIdx/threadIdx), or we can run it only on host code, we can add it back.
# bugprone-unhandled-self-assignment:
# removed as alias of cert-oop54-cpp (which is included and slightly more
# restrictive since WarnOnlyIfThisHasSuspiciousField=0)
#
# Add all cert checks, except:
# cert-dcl03-c:
# removed as alias of misc-static-assert (which is included)
# cert-dcl16-c:
# removed as alias of readability-uppercase-literal-suffix (which is included
# and more restrictive since it checks for more suffixes)
# cert-dcl37-c:
# removed as alias of bugprone-reserved-identifier (which is included)
# cert-dcl51-cpp:
# removed as alias of bugprone-reserved-identifier (which is included)
# cert-dcl54-cpp:
# removed as alias of misc-new-delete-overloads (which is included)
# cert-dcl59-cpp:
# removed as alias of google-build-namespaces (which is included)
# cert-err09-cpp & cert-err61-cpp:
# removed as alias of misc-throw-by-value-catch-by-reference (which is included)
# cert-fio38-c:
# removed as alias of misc-non-copyable-objects (which is included)
# cert-msc32-c:
# removed as alias of cert-msc51-cpp (which is included)
# cert-oop11-cpp:
# removed as alias of performance-move-constructor-init (which is included)
# cert-str34-c:
# removed as alias of bugprone-signed-char-misuse (which is included and
# slightly more restrictive since DiagnoseSignedUnsignedCharComparisons=1)
#
# Add all cppcoreguidelines checks, except:
# cppcoreguidelines-avoid-c-arrays:
# in general, I'd discourage use of C-style arrays, but there are simply too
# many reasonable uses in device code, for registers/shared memory, and we
# cannot easily parameterize this rule / only run it for host code.
# cppcoreguidelines-avoid-magic-numbers:
# removed as alias of readability-magic-numbers (which is included)
# cppcoreguidelines-narrowing-conversions:
# removed as alias of bugprone-narrowing-conversions (which is excluded)
# cppcoreguidelines-init-variables:
# in device code, I think the default should be not to initialize variables
# when it makes sense, since this can enable compiler optimizations relying
# on undefined behavior (similar to using signed int vs unsigned int).
# For now, this should be excluded.
# cppcoreguidelines-non-private-member-variables-in-classes:
# this warns about all protected members, which I do think have reasonable
# uses, so this is excluded
# cppcoreguidelines-pro-bounds-array-to-pointer-decay:
# this would only make sense if on host only, C-style arrays and pointer
# arithmetic make device code more readable IMO
# cppcoreguidelines-pro-bounds-constant-array-index:
# excluded for same reason as cppcoreguidelines-pro-bounds-array-to-pointer-decay
# cppcoreguidelines-pro-bounds-pointer-arithmetic:
# excluded for same reason as cppcoreguidelines-pro-bounds-array-to-pointer-decay
# cppcoreguidelines-pro-type-reinterpret-cast:
# there are many reasonable uses of reinterpret cast, as long as it's explicit
# cppcoreguidelines-pro-type-vararg:
# this may be included once we eliminate all macors using vararg, but for now
# we exclude it
#
# Add all google checks, except:
# google-readability-braces-around-statements:
# this is handled by clang-format, leave it to clang-format
# google-runtime-references:
# this is obsolete as it has been removed from Google's style guide in 2020
#
# Add all llvm checks, except:
# llvm-else-after-return:
# removed as alias of readability-else-after-return (which is included)
# llvm-header-guard:
# we use #pragma once instead
# llvm-qualified-auto:
# removed as alias of readability-qualified-auto (which is included)
# llvm-namespace-comment:
# removed as alias of google-readability-namespace-comments (which is included)
#
# Add all misc checks, except:
# misc-non-private-member-variables-in-classes:
# removed as alias of cppcoreguidelines-non-private-member-variables-in-classes
# (which is excluded)
#
# Add all modernize checks, except:
# modernize-avoid-c-arrays:
# removed as alias of cppcoreguidelines-avoid-c-arrays (which is excluded)
# modernize-use-trailing-return-type:
# IMO, this only adds characters, and it doesn't make code look better.
# It also isn't part of Google's style guide, so we exclude this and
# encourage the C-style declaration of functions.
#
# Add all performance checks
#
# Add all readability checks, except:
# readability-braces-around-statements:
# removed as alias of google-readability-braces-around-statements (which is excluded)
# readability-function-cognitive-complexity:
# readability-function-size already does something similar
# readability-identifier-length:
# this should be up to programmers, IMO
# readability-isolate-declaration:
# this should be up to programmers, IMO
# readability-static-accessed-through-instance:
# incompatible with CUDA, since blockIdx/threadIdx are static and we cannot
# easily parameterize this rule / only run it for host code
#
# The following list checks are removed because they don't seem relevant
# to this particular project and only delay CI and development in general:
# bugprone-bad-signal-to-kill-thread
# bugprone-dynamic-static-initializers
# bugprone-no-escape
# bugprone-posix-return
# bugprone-spuriously-wake-up-functions
# cert-env33-c
# cert-err52-cpp
# cppcoreguidelines-owning-memory
# google-objc-*
# TODO(mjoux) re-enable the next 2 if we move to gtest
# google-readability-avoid-underscore-in-googletest-name
# google-upgrade-googletest-case
# llvm-prefer-isa-or-dyn-cast-in-conditionals
# llvm-prefer-register-over-unsigned
# llvm-twine-local
# modernize-avoid-bind
# modernize-deprecated-ios-base-aliases
# modernize-replace-disallow-copy-and-assign-macro
# performance-inefficient-algorithm
#
# Because we add whole check groups, future clang versions may use more
# checks. For now, only supported clang version is 11, however, it's likely
# that more recent versions work fine.
Checks: >-
-*,
altera-struct-pack-align,
bugprone-*,
-bugprone-easily-swappable-parameters,
-bugprone-misplaced-widening-cast,
-bugprone-narrowing-conversions,
-bugprone-unhandled-self-assignment,
cert-*,
-cert-dcl03-c,
-cert-dcl16-c,
-cert-dcl37-c,
-cert-dcl51-cpp,
-cert-dcl54-cpp,
-cert-dcl59-cpp,
-cert-err09-cpp,
-cert-err61-cpp,
-cert-fio38-c,
-cert-msc32-c,
-cert-oop11-cpp,
-cert-str34-c
cppcoreguidelines-*,
-cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-init-variables,
-cppcoreguidelines-non-private-member-variables-in-classes,
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
-cppcoreguidelines-pro-bounds-constant-array-index,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-pro-type-vararg,
google-*,
-google-readability-braces-around-statements,
-google-runtime-references,
llvm-*,
-llvm-else-after-return,
-llvm-header-guard,
-llvm-namespace-comment,
-llvm-qualified-auto,
misc-*,
-misc-non-private-member-variables-in-classes,
modernize-*,
-modernize-avoid-c-arrays,
-modernize-use-trailing-return-type,
performance-*,
readability-*,
-readability-braces-around-statements,
-readability-function-cognitive-complexity,
-readability-identifier-length,
-readability-isolate-declaration,
-readability-static-accessed-through-instance,
-bugprone-bad-signal-to-kill-thread,
-bugprone-dynamic-static-initializers,
-bugprone-no-escape,
-bugprone-posix-return,
-bugprone-spuriously-wake-up-functions,
-cert-env33-c,
-cert-err52-cpp,
-cppcoreguidelines-owning-memory,
-google-objc-*,
-google-readability-avoid-underscore-in-googletest-name,
-google-upgrade-googletest-case,
-llvm-prefer-isa-or-dyn-cast-in-conditionals,
-llvm-prefer-register-over-unsigned,
-llvm-twine-local,
-modernize-avoid-bind,
-modernize-deprecated-ios-base-aliases,
-modernize-replace-disallow-copy-and-assign-macro,
-performance-inefficient-algorithm,
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
User: wholegraph
CheckOptions:
- key: cppcoreguidelines-macro-usage.AllowedRegexp
value: WHOLEMEMORY*|WHOLEGRAPH*|ASSERT|SET_ERROR_MSG|THROW|RAFT_*
- key: cppcoreguidelines-special-member-functions.AllowMissingMoveFunctions
value: true
- key: cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
value: true
# in case we re-include this rule, we definitely ignore classes with public-only members (structs)
- key: misc-non-private-member-variables-in-classes.IgnoreClassesWithAllMemberVariablesBeingPublic
value: true
# many preceding numbers of powers of 2 added here as they are useful for bit-wise ops
# powers of 10 are typically used in tests
- key: readability-magic-numbers.IgnoredIntegerValues
value: 1;2;3;4;5;6;7;8;9;24;31;63;2147483647;4294967295;10;100;1000;10000;100000;1000000
# any powers of 2 are typically useful for number of threads, warps, etc.
- key: readability-magic-numbers.IgnorePowersOf2IntegerValues
value: true
# useful for simple constants (sqrt(2), etc.)
- key: readability-magic-numbers.IgnoredFloatingPointValues
value: 1.0;2.0;3.0;4.0;5.0;6.0;7.0;8.0;9.0;10.0;0.5;0.25
# only force-replace very long names with auto
- key: modernize-use-auto.MinTypeNameLength
value: 9
- key: readability-identifier-naming.AbstractClassCase
value: lower_case
- key: readability-identifier-naming.AbstractClassPrefix
value: ''
- key: readability-identifier-naming.AbstractClassSuffix
value: ''
- key: readability-identifier-naming.ClassCase
value: lower_case
- key: readability-identifier-naming.ClassPrefix
value: ''
- key: readability-identifier-naming.ClassSuffix
value: ''
- key: readability-identifier-naming.ClassConstantCase
value: CamelCase
- key: readability-identifier-naming.ClassConstantPrefix
value: 'k'
- key: readability-identifier-naming.ClassConstantSuffix
value: ''
- key: readability-identifier-naming.ClassMemberCase
value: lower_case
- key: readability-identifier-naming.ClassMemberPrefix
value: ''
- key: readability-identifier-naming.ClassMemberSuffix
value: ''
- key: readability-identifier-naming.ClassMethodCase
value: lower_case
- key: readability-identifier-naming.ClassMethodPrefix
value: ''
- key: readability-identifier-naming.ClassMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprFunctionCase
value: lower_case
- key: readability-identifier-naming.ConstexprFunctionPrefix
value: ''
- key: readability-identifier-naming.ConstexprFunctionSuffix
value: ''
- key: readability-identifier-naming.ConstexprMethodCase
value: lower_case
- key: readability-identifier-naming.ConstexprMethodPrefix
value: ''
- key: readability-identifier-naming.ConstexprMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: UPPER_CASE
- key: readability-identifier-naming.ConstexprVariablePrefix
value: ''
- key: readability-identifier-naming.ConstexprVariableSuffix
value: ''
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.EnumPrefix
value: ''
- key: readability-identifier-naming.EnumSuffix
value: ''
- key: readability-identifier-naming.EnumConstantCase
value: CamelCase
- key: readability-identifier-naming.EnumConstantPrefix
value: 'k'
- key: readability-identifier-naming.EnumConstantSuffix
value: ''
- key: readability-identifier-naming.FunctionCase
value: lower_case
- key: readability-identifier-naming.FunctionPrefix
value: ''
- key: readability-identifier-naming.FunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalConstantCase
value: UPPER_CASE
- key: readability-identifier-naming.GlobalConstantPrefix
value: ''
- key: readability-identifier-naming.GlobalConstantSuffix
value: ''
- key: readability-identifier-naming.LocalVariableCase
value: 'lower_case'
- key: readability-identifier-naming.LocalVariablePrefix
value: ''
- key: readability-identifier-naming.LocalVariableSuffix
value: ''
- key: readability-identifier-naming.MemberCase
value: lower_case
- key: readability-identifier-naming.MemberPrefix
value: ''
- key: readability-identifier-naming.MemberSuffix
value: ''
- key: readability-identifier-naming.NamespaceCase
value: lower_case
- key: readability-identifier-naming.NamespacePrefix
value: ''
- key: readability-identifier-naming.NamespaceSuffix
value: ''
- key: readability-identifier-naming.PrivateMemberCase
value: lower_case
- key: readability-identifier-naming.PrivateMemberPrefix
value: ''
- key: readability-identifier-naming.PrivateMemberSuffix
value: '_'
- key: readability-identifier-naming.ProtectedMemberCase
value: lower_case
- key: readability-identifier-naming.ProtectedMemberPrefix
value: ''
- key: readability-identifier-naming.ProtectedMemberSuffix
value: '_'
- key: readability-identifier-naming.StaticConstantCase
value: CamelCase
- key: readability-identifier-naming.StaticConstantPrefix
value: 'k'
- key: readability-identifier-naming.StaticConstantSuffix
value: ''
- key: readability-identifier-naming.StructCase
value: lower_case
- key: readability-identifier-naming.StructPrefix
value: ''
- key: readability-identifier-naming.StructSuffix
value: ''
- key: readability-identifier-naming.TemplateParameterCase
value: UPPER_CASE
- key: readability-identifier-naming.TemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypeAliasCase
value: lower_case
- key: readability-identifier-naming.TypeAliasPrefix
value: ''
- key: readability-identifier-naming.TypeAliasSuffix
value: '_t'
- key: readability-identifier-naming.TypeTemplateParameterCase
value: CamelCase
- key: readability-identifier-naming.TypeTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterSuffix
value: 'T'
- key: readability-identifier-naming.TemplateTemplateParameterCase
value: CamelCase
- key: readability-identifier-naming.TemplateTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TemplateTemplateParameterSuffix
value: 'T'
- key: readability-identifier-naming.TypedefCase
value: lower_case
- key: readability-identifier-naming.TypedefPrefix
value: ''
- key: readability-identifier-naming.TypedefSuffix
value: '_t'
- key: readability-identifier-naming.VariableCase
value: lower_case
- key: readability-identifier-naming.VariablePrefix
value: ''
- key: readability-identifier-naming.VariableSuffix
value: ''
- key: bugprone-suspicious-include.HeaderFileExtensions
value: ';h;hh;hpp;hxx;cuh'
- key: bugprone-suspicious-include.ImplementationFileExtensions
value: 'c;cc;cpp;cxx;cu'
- key: google-build-namespaces.HeaderFileExtensions
value: ';h;hh;hpp;hxx;cuh'
- key: google-global-names-in-headers.HeaderFileExtensions
value: ';h;hh;hpp;hxx;cuh'
- key: misc-definitions-in-headers.HeaderFileExtensions
value: ';h;hh;hpp;hxx;cuh'
...
| 0 |
rapidsai_public_repos/wholegraph
|
rapidsai_public_repos/wholegraph/cpp/Doxyfile
|
# Doxyfile 1.8.20
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "WholeGraph C API"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = 23.12
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# By default Python docstrings are displayed as preformatted text and doxygen's
# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
# doxygen's special commands can be used and the contents of the docstring
# documentation blocks is shown as doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
ALIASES =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
# as Fortran files (default is PHP), and .f files as C (default is Fortran),
# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
# during processing. When set to 0 doxygen will based this on the number of
# cores available in the system. You can set it explicitly to a value larger
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which efficively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
NUM_PROC_THREADS = 1
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = NO
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# (including Cygwin) and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered.
# The default value is: NO.
WARN_AS_ERROR = YES
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = ./include ./src
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.cpp \
*.h \
*.hpp \
*.hxx \
*.cu \
*.cuh
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS = columnWiseSort.cuh \
smoblocksolve.h
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH = # @CMAKE_CURRENT_SOURCE_DIR@/doxygen/images
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = NO
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: https://developer.apple.com/xcode/), introduced with OSX
# 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
# the HTML output. These images will generally look nicer at scaled resolutions.
# Possible values are: png (the default) and svg (looks nicer but requires the
# pdf2svg or inkscape tool).
# The default value is: png.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FORMULA_FORMAT = png
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = YES
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# LaTeX style sheets that are included after the standard style sheets created
# by doxygen. Using this option one can overrule certain style aspects. Doxygen
# will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_STYLESHEET =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
# files. Set this option to YES, to get a higher quality PDF documentation.
#
# See also section LATEX_CMD_NAME for selecting the engine.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = YES
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
# the class index. If set to NO, only the inherited external classes will be
# listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = YES
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = YES
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for plantuml.
PLANTUML_CFG_FILE =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.
PLANTUML_INCLUDE_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
| 0 |
rapidsai_public_repos/wholegraph
|
rapidsai_public_repos/wholegraph/cpp/.clang-format
|
---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/graph_op.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Append Unique op
* @param target_nodes_tensor : Wholememory Tensor of graph csr_row_ptr
* @param neighbor_nodes_tensor : Wholememory Tensor of graph csr_col_ptr
* @param output_unique_node_memory_context : memory context to output dest nodes
* @param output_neighbor_raw_to_unique_mapping_tensor : pointer to output sample offset, optional
* output
* @param p_env_fns : pointers to environment functions.
* @param stream : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t graph_append_unique(
wholememory_tensor_t target_nodes_tensor,
wholememory_tensor_t neighbor_nodes_tensor,
void* output_unique_node_memory_context,
wholememory_tensor_t output_neighbor_raw_to_unique_mapping_tensor,
wholememory_env_func_t* p_env_fns,
void* stream);
/**
* Csr Add Self Loop Op
* @param csr_row_ptr_tensor : Wholememory Tensor of local graph csr_row_ptr
* @param csr_col_ptr_tensor : Wholememory Tensor of csr_col_ptr
* @param output_csr_row_ptr_tensor : Wholememory Tensor of output_csr_row_ptr
* @param output_csr_col_ptr_tensor : Wholememory Tensor of output_csr_col_ptr
* @param stream : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t csr_add_self_loop(wholememory_tensor_t csr_row_ptr_tensor,
wholememory_tensor_t csr_col_ptr_tensor,
wholememory_tensor_t output_csr_row_ptr_tensor,
wholememory_tensor_t output_csr_col_ptr_tensor,
void* stream);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/wholememory_op.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Gather Op
* @param wholememory_tensor : WholeMemory Tensor of embedding table.
* @param indices_tensor : indices to gather from, should NOT be WholeMemory Tensor
* @param output_tensor : output tensor to gather to, should NOT be WholeMemoryTensor
* @param p_env_fns : pointers to environment functions.
* @param stream : cudaStream_t to use.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_gather(wholememory_tensor_t wholememory_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t output_tensor,
wholememory_env_func_t* p_env_fns,
void* stream);
/**
* Scatter Op
* @param input_tensor : input tensor tor scatter from, should NOT be WholeMemory Tensor
* @param indices_tensor : indices to scatter to, should NOT be WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor of embedding table.
* @param p_env_fns : pointers to environment functions.
* @param stream : cudaStream_t to use.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_scatter(wholememory_tensor_t input_tensor,
wholememory_tensor_t indices_tensor,
wholememory_tensor_t wholememory_tensor,
wholememory_env_func_t* p_env_fns,
void* stream);
/**
* Just a test function,
* @param input_tensor : input tensor
* @param output_fixed_tensor : fixed size tensor of output
* @param output_variable_device_tensor_handle : device version variable tensor
* @param output_variable_pinned_tensor_handle : pinned version variable tensor
* @param output_variable_host_tensor_handle : host version variable tensor
* @param output_variable_entry_count : output entry count
* @param p_env_fns : pointers to environment functions.
* @param stream : cudaStream_t to use.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_env_test_op(wholememory_tensor_t input_tensor,
wholememory_tensor_t output_fixed_tensor,
void* output_variable_device_tensor_handle,
void* output_variable_pinned_tensor_handle,
void* output_variable_host_tensor_handle,
int64_t output_variable_entry_count,
wholememory_env_func_t* p_env_fns,
void* stream);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/tensor_description.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @enum wholememory_dtype_t
* @brief defines WholeMemory data type for tensors
*/
enum wholememory_dtype_t {
WHOLEMEMORY_DT_UNKNOWN = 0, /*!< Unknown type */
WHOLEMEMORY_DT_FLOAT, /*!< 32-bit float type */
WHOLEMEMORY_DT_HALF, /*!< 16-bit half float type */
WHOLEMEMORY_DT_DOUBLE, /*!< 64-bit double type */
WHOLEMEMORY_DT_BF16, /*!< 16-bit bfloat type */
WHOLEMEMORY_DT_INT, /*!< 32-bit signed integer type */
WHOLEMEMORY_DT_INT64, /*!< 64-bit signed integer type */
WHOLEMEMORY_DT_INT16, /*!< 16-bit signed integer type */
WHOLEMEMORY_DT_INT8, /*!< 8-bit signed integer type */
WHOLEMEMORY_DT_COUNT, /*!< total count if types */
};
/**
* Get element size of wholememory_dtype_t
* @param dtype : wholememory_dtype_t
* @return : element size of dtype, -1 on invalid dtype.
*/
size_t wholememory_dtype_get_element_size(wholememory_dtype_t dtype);
/**
* Check if dtype is floating number
* @param dtype : wholememory_dtype_t
* @return : True if dtype is WHOLEMEMORY_DT_FLOAT, WHOLEMEMORY_DT_HALF, WHOLEMEMORY_DT_DOUBLE or
* WHOLEMEMORY_DT_BF16. False otherwise.
*/
bool wholememory_dtype_is_floating_number(wholememory_dtype_t dtype);
/**
* Check if dtype is integer number
* @param dtype : wholememory_dtype_t
* @return : True if dtype is WHOLEMEMORY_DT_INT, WHOLEMEMORY_DT_INT64, WHOLEMEMORY_DT_INT16 or
* WHOLEMEMORY_DT_INT8, False otherwise.
*/
bool wholememory_dtype_is_integer_number(wholememory_dtype_t dtype);
/**
* @struct wholememory_array_description_t
* @brief wrapper for array in WholeMemory
*/
struct wholememory_array_description_t {
int64_t size; /*!< size of the array in elements. */
int64_t storage_offset; /*!< offset in number of elements, NOT in bytes. */
wholememory_dtype_t dtype; /*!< data type of the array */
};
/**
* @struct wholememory_matrix_description_t
* @brief wrapper for matrix in WholeMemory
*/
struct wholememory_matrix_description_t {
int64_t sizes[2]; /*!< sizes[0] is row of the matrix, sizes[1] is column of the matrix */
int64_t stride; /*!< stride of first dimension, in number of elements */
int64_t storage_offset; /*!< offset in number of elements, NOT in bytes. */
wholememory_dtype_t dtype; /*!< data type of the matrix */
};
#define WHOLEMEMORY_MAX_TENSOR_DIM (8)
/**
* @struct wholememory_tensor_description_t
* @brief Tensor description in WholeMemory, dimension 0 is the slowest changed dimension
*/
struct wholememory_tensor_description_t {
int64_t sizes[WHOLEMEMORY_MAX_TENSOR_DIM]; /*!< size of each dimension of the tensor, in number of
elements */
int64_t strides[WHOLEMEMORY_MAX_TENSOR_DIM]; /*!< stride of the tensor, in number of elements */
int64_t storage_offset; /*!< offset in number of elements, NOT in bytes. */
int dim; /*!< dim of the tensor */
wholememory_dtype_t dtype; /*!< data type of the tensor */
};
/*!
* Create wholememory_array_description_t object
* @param size : array size in number of elements
* @param storage_offset : storage offset in number of elements
* @param dtype : data type of array elements
* @return created wholememory_array_description_t
*/
wholememory_array_description_t wholememory_create_array_desc(int64_t size,
int64_t storage_offset,
wholememory_dtype_t dtype);
/*!
* Create wholememory_matrix_description_t object
* @param sizes : matrix sizes array, counted in number of elements, sizes[1] changes fastest.
* @param stride : stride of first dimension(slower changed dimension), stride is counted in number
* of elements
* @param storage_offset : storage offset in number of elements
* @param dtype : data type of matrix elements
* @return created wholememory_matrix_description_t
*/
wholememory_matrix_description_t wholememory_create_matrix_desc(int64_t sizes[2],
int64_t stride,
int64_t storage_offset,
wholememory_dtype_t dtype);
/*!
* Initialize wholememory_tensor_description_t, set sizes and strides to all ones, and set
* storage_offset to 0, set dtype to WHOLEMEMORY_DT_UNKNOWN, set dim to 0.
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
*/
void wholememory_initialize_tensor_desc(wholememory_tensor_description_t* p_tensor_description);
/**
* Copy array description to tensor description
* @param p_matrix_description : pointer to wholememory_matrix_description_t.
* @param p_array_description : pointer to wholememory_array_description_t.
*/
void wholememory_copy_array_desc_to_matrix(wholememory_matrix_description_t* p_matrix_description,
wholememory_array_description_t* p_array_description);
/*!
* Copy array description to tensor description
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @param p_array_description : pointer to wholememory_array_description_t.
*/
void wholememory_copy_array_desc_to_tensor(wholememory_tensor_description_t* p_tensor_description,
wholememory_array_description_t* p_array_description);
/*!
* Copy matrix description to tensor description
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @param p_matrix_description : pointer to wholememory_matrix_description_t.
*/
void wholememory_copy_matrix_desc_to_tensor(wholememory_tensor_description_t* p_tensor_description,
wholememory_matrix_description_t* p_matrix_description);
/*!
* Convert tensor description to array description
* @param p_array_description : pointer to wholememory_array_description_t.
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @return : Return true if convertible else false.
*/
bool wholememory_convert_tensor_desc_to_array(
wholememory_array_description_t* p_array_description,
wholememory_tensor_description_t* p_tensor_description);
/*!
* Convert tensor description to matrix description
* @param p_matrix_description : pointer to wholememory_matrix_description_t.
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @return : Return true if convertible else false.
*/
bool wholememory_convert_tensor_desc_to_matrix(
wholememory_matrix_description_t* p_matrix_description,
wholememory_tensor_description_t* p_tensor_description);
/*!
* Get total element count from array description.
* @param p_array_description : pointer to wholememory_array_description_t.
* @return : Return element count.
*/
int64_t wholememory_get_memory_element_count_from_array(
wholememory_array_description_t* p_array_description);
/*!
* Get total memory size from array description.
* @param p_array_description : pointer to wholememory_array_description_t.
* @return : Return memory size.
*/
int64_t wholememory_get_memory_size_from_array(
wholememory_array_description_t* p_array_description);
/*!
* Get total element count from matrix description.
* @param p_matrix_description : pointer to wholememory_matrix_description_t.
* @return : Return element count.
*/
int64_t wholememory_get_memory_element_count_from_matrix(
wholememory_matrix_description_t* p_matrix_description);
/*!
* Get total memory size from matrix description.
* @param p_matrix_description : pointer to wholememory_matrix_description_t.
* @return : Return memory size.
*/
int64_t wholememory_get_memory_size_from_matrix(
wholememory_matrix_description_t* p_matrix_description);
/*!
* Get total element count from tensor description.
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @return : Return element count.
*/
int64_t wholememory_get_memory_element_count_from_tensor(
wholememory_tensor_description_t* p_tensor_description);
/*!
* Get total memory size from tensor description.
* @param p_tensor_description : pointer to wholememory_tensor_description_t.
* @return : Return memory size.
*/
int64_t wholememory_get_memory_size_from_tensor(
wholememory_tensor_description_t* p_tensor_description);
/**
* Squeeze tensor
* @param p_tensor_description : pointer to wholememory_tensor_description_t
* @param dim : which dim to squeeze
* @return : true if success else false
*/
bool wholememory_squeeze_tensor(wholememory_tensor_description_t* p_tensor_description, int dim);
/**
* Unsqueeze tensor
* @param p_tensor_description : pointer to wholememory_tensor_description_t
* @param dim : unsqueeze at which dim
* @return : true if success else false
*/
bool wholememory_unsqueeze_tensor(wholememory_tensor_description_t* p_tensor_description, int dim);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/global_reference.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Global reference of a WholeMemory object
*
* A global reference is for Continuous of Chunked WholeMemory Type, in these types, each rank can
* directly access all memory from all ranks. The global reference is used to do this direct access.
*/
struct wholememory_gref_t {
void* pointer; /*!< pointer to data for CONTINUOUS WholeMemory or pointer to data pointer array
for CHUNKED WholeMemory */
size_t
stride; /*!< must be 0 for CONTINUOUS WholeMemory or memory size in byte for each pointer */
};
/**
* @brief Create global reference for continuous memory
* @param ptr : pointer to the memory
* @return : wholememory_gref_t
*/
wholememory_gref_t wholememory_create_continuous_global_reference(void* ptr);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/wholememory_tensor.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/tensor_description.h>
#include <wholememory/wholememory.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Opaque handle to WholeMemoryTensor
*
* An Opaque handle to WholeMemoryTensor
*/
typedef struct wholememory_tensor_* wholememory_tensor_t;
/**
* Create WholeMemory Tensor
* @param wholememory_tensor : returned WholeMemory Tensor handle
* @param tensor_description : description of the WholeMemory Tensor, should be 1-D or 2-D
* continuous tensor without offset.
* @param comm : WholeMemory Communicator
* @param memory_type : Memory Type of the underlying WholeMemory
* @param memory_location : Memory Location of the underlying WholeMemory
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_tensor(
wholememory_tensor_t* wholememory_tensor,
wholememory_tensor_description_t* tensor_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location);
/**
* Destroy WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor to destroy
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_destroy_tensor(wholememory_tensor_t wholememory_tensor);
/**
* Make WholeMemory Tensor from local memory
* @param wholememory_tensor : returned WholeMemory Tensor handle
* @param storage_ptr : pointer to underlying storage memory. Note: storage pointer may be not same
* as data pointer.
* @param tensor_description : description of the WholeMemory Tensor, should be 1-D or 2-D
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_make_tensor_from_pointer(
wholememory_tensor_t* wholememory_tensor,
void* storage_ptr,
wholememory_tensor_description_t* tensor_description);
/**
* Make WholeMemory Tensor from local memory
* @param wholememory_tensor : returned WholeMemory Tensor handle
* @param wholememory_handle : WholeMemory Handle
* @param tensor_description : description of the WholeMemory Tensor, should be 1-D or 2-D
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_make_tensor_from_handle(
wholememory_tensor_t* wholememory_tensor,
wholememory_handle_t wholememory_handle,
wholememory_tensor_description_t* tensor_description);
/**
* Check if has WholeMemory Handle, WholeMemory Tensor created by wholememory_make_tensor has no
* Handle
* @param wholememory_tensor : WholeMemory Tensor
* @return : if has WholeMemory Handle
*/
bool wholememory_tensor_has_handle(wholememory_tensor_t wholememory_tensor);
/**
* Get WholeMemory handle from WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @return : WholeMemory handle
*/
wholememory_handle_t wholememory_tensor_get_memory_handle(wholememory_tensor_t wholememory_tensor);
/**
* Get tensor description from WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @return : pointer to the underlying wholememory_tensor_description_t
*/
wholememory_tensor_description_t* wholememory_tensor_get_tensor_description(
wholememory_tensor_t wholememory_tensor);
/**
* Get global reference from WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @param wholememory_gref : global reference
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_tensor_get_global_reference(
wholememory_tensor_t wholememory_tensor, wholememory_gref_t* wholememory_gref);
/**
* Map local tensor of WholeMemory Tensor.
* Only support 1D and 2D tensor with WholeMemory Handle.
* For 1D tensor, storage_offset should be 0
* For 2D tensor, storage_offset + size[1] should <= stride[0]
*
* @param wholememory_tensor : WholeMemory Tensor.
* @param local_tensor : returned local tensor, need to be destroyed.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_tensor_map_local_tensor(
wholememory_tensor_t wholememory_tensor, wholememory_tensor_t* local_tensor);
/**
* Get data pointer from WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @return : Pointer to first data for CONTINUOUS WholeMemory or not WholeMemory.
*/
void* wholememory_tensor_get_data_pointer(wholememory_tensor_t wholememory_tensor);
/**
* Get entry count per rank of a WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @return : entry count per rank
*/
size_t wholememory_tensor_get_entry_per_partition(wholememory_tensor_t wholememory_tensor);
/**
* Get sub tensor of a WholeMemory Tensor
* @param wholememory_tensor : WholeMemory Tensor
* @param starts : starts of each dim, length should be the dim of wholememory_tensor.
* @param ends : ends of each dim, length should be the dim of wholememory_tensor
* @param sub_wholememory_tensor : pointer to returned sub tensor
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_tensor_get_subtensor(
wholememory_tensor_t wholememory_tensor,
int64_t* starts,
int64_t* ends,
wholememory_tensor_t* sub_wholememory_tensor);
/**
* Get root tensor of a WholeMemory Tensor, root means it is not a sub tensor of any WholeMemory
* Tensor.
* @param wholememory_tensor : WholeMemory Tensor
* @return : the root of current WholeMemory tensor, maybe same as wholememory_tensor.
*/
wholememory_tensor_t wholememory_tensor_get_root(wholememory_tensor_t wholememory_tensor);
#define WM_TENSOR_COUNT_DEBUG
int64_t get_wholememory_tensor_count();
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/env_func_ptrs.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <wholememory/tensor_description.h>
/**
* Function pointers for memory allocation.
* Input tensor memory should be allocated and use void* pointer to the memory and
* wholememory_array_description_t or wholememory_matrix_description_t to specify the shape Output
* tensor with fixed size should be the same as Input tensor. Output tensor with shape determined by
* Op should has void* memory_context input and allocated by wholememory_malloc_func_t functions.
*/
#ifdef __cplusplus
extern "C" {
#endif
enum wholememory_memory_allocation_type_t {
WHOLEMEMORY_MA_NONE = 0,
WHOLEMEMORY_MA_DEVICE,
WHOLEMEMORY_MA_HOST,
WHOLEMEMORY_MA_PINNED,
};
/**
* Function pointer to create temporary memory context.
*/
typedef void (*wholememory_create_memory_context_func_t)(void** memory_context,
void* global_context);
typedef void (*wholememory_destroy_memory_context_func_t)(void* memory_context,
void* global_context);
typedef void* (*wholememory_malloc_func_t)(
wholememory_tensor_description_t* desc,
wholememory_memory_allocation_type_t memory_allocation_type,
void* memory_context,
void* global_context);
typedef void (*wholememory_free_func_t)(void* memory_context, void* global_context);
struct wholememory_temp_memory_func_t {
wholememory_create_memory_context_func_t create_memory_context_fn;
wholememory_destroy_memory_context_func_t destroy_memory_context_fn;
wholememory_malloc_func_t malloc_fn;
wholememory_free_func_t free_fn;
void* global_context;
};
struct wholememory_output_memory_func_t {
wholememory_malloc_func_t malloc_fn;
wholememory_free_func_t free_fn;
void* global_context;
};
struct wholememory_env_func_t {
wholememory_temp_memory_func_t temporary_fns; /* function pointers to create temporary memory */
wholememory_output_memory_func_t output_fns; /* function pointers to create Op output memory */
};
cudaDeviceProp* get_device_prop(int dev_id);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/device_reference.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <assert.h>
#include "global_reference.h"
namespace wholememory {
template <typename DataTypeT>
class device_reference {
public:
__device__ __forceinline__ explicit device_reference(const wholememory_gref_t& gref)
: pointer_(static_cast<DataTypeT*>(gref.pointer)),
typed_stride_(gref.stride / sizeof(DataTypeT))
{
assert(gref.stride % sizeof(DataTypeT) == 0);
}
__device__ device_reference() = delete;
__device__ __forceinline__ DataTypeT& operator[](size_t index)
{
if (typed_stride_ == 0) { return pointer_[index]; }
size_t rank = index / typed_stride_;
return static_cast<DataTypeT**>(
static_cast<void*>(pointer_))[rank][index - rank * typed_stride_];
}
private:
DataTypeT* pointer_;
size_t typed_stride_;
};
} // namespace wholememory
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/embedding.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory_tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Opaque handle to WholeMemory Embedding Cache Policy
*
* An Opaque handle to WholeMemory Embedding Cache Policy
*/
typedef struct wholememory_embedding_cache_policy_* wholememory_embedding_cache_policy_t;
/**
* @brief Opaque handle to WholeMemory Embedding Optimizer
*
* An Opaque handle to WholeMemory Embedding Optimizer
*/
typedef struct wholememory_embedding_optimizer_* wholememory_embedding_optimizer_t;
/**
* @brief Opaque handle to WholeMemory Embedding
*
* An Opaque handle to WholeMemory Embedding
*/
typedef struct wholememory_embedding_* wholememory_embedding_t;
/**
* @enum wholememory_access_type_t
* @brief defines access type of WholeMemory Embedding
*/
enum wholememory_access_type_t {
WHOLEMEMORY_AT_NONE = 0, /*!< Not defined */
WHOLEMEMORY_AT_READONLY, /*!< Only have readonly access to the WholeMemory */
WHOLEMEMORY_AT_READWRITE, /*!< May have write access to the WholeMemory */
};
/**
* @enum wholememory_optimizer_type_t
* @brief defines optimizer type for WholeMemory Embedding
*/
enum wholememory_optimizer_type_t {
WHOLEMEMORY_OPT_NONE = 0, /*!< No optimizer needed */
WHOLEMEMORY_OPT_SGD, /*!< Use SGD optimizer */
WHOLEMEMORY_OPT_LAZY_ADAM, /*!< Use Lazy Adam optimizer */
WHOLEMEMORY_OPT_RMSPROP, /*!< Use RMSProp optimizer */
WHOLEMEMORY_OPT_ADAGRAD, /*!< Use AdaGrad optimizer */
};
/**
* Create Optimizer
* @param optimizer : Returned wholememory_embedding_optimizer_t
* @param optimizer_type : Optimizer type
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_embedding_optimizer(
wholememory_embedding_optimizer_t* optimizer, wholememory_optimizer_type_t optimizer_type);
/**
* Set parameter for optimizer.
* @param optimizer : Optimizer to set parameter
* @param parameter_name : parameter name
* @param value : parameter value
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_optimizer_set_parameter(
wholememory_embedding_optimizer_t optimizer, const char* parameter_name, void* value);
/**
* Destroy optimizer
* @param optimizer : optimizer to destroy.
*/
void wholememory_destroy_embedding_optimizer(wholememory_embedding_optimizer_t optimizer);
/**
* Create WholeMemory Embedding Cache Policy
* @param cache_policy : Returned wholememory_embedding_cache_policy_t
* @param cache_level_comm : At which level to cache the full embedding. In most cases it should be
* same as wholememory_embedding_t's comm. If access_type is WHOLEMEMORY_AT_READONLY, it can be
* different for multiple readonly caches. E.g. for a multi-node WHOLEMEMORY_MT_DISTRIBUTED
* WHOLEMEMORY_AT_READONLY embedding, it can have a intra-node WHOLEMEMORY_MT_CHUNKED cache. or a
* multi-node WHOLEMEMORY_MT_DISTRIBUTED cache.
* @param memory_type : Memory Type of the underlying WholeMemory for cache
* @param memory_location : Memory Location of the underlying WholeMemory for cache
* @param access_type : ReadOnly or ReadWrite
* @param cache_ratio : suggested cache ratio, values should be in range [1.0 / 512, 1.0]
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_embedding_cache_policy(
wholememory_embedding_cache_policy_t* cache_policy,
wholememory_comm_t cache_level_comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_access_type_t access_type,
float cache_ratio);
/**
* Destroy WholeMemory Embedding Cache Policy
* @param cache_policy : WholeMemory Embedding Cache Policy to destroy.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_destroy_embedding_cache_policy(
wholememory_embedding_cache_policy_t cache_policy);
/**
* Create WholeMemory Embedding
* @param wholememory_embedding : Returned wholememory_embedding_t
* @param embedding_tensor_description : Description of the embedding, sizes and dtype used, stride
* and storage_offset ignored. Must be matrix
* @param comm : WholeMemory Communicator
* @param memory_type : Memory Type of the underlying WholeMemory
* @param memory_location : Memory Location of the underlying WholeMemory
* @param optimizer : Optimizer to use for training, if don't train embedding, use nullptr
* @param cache_policy : Cache policy for this embedding, if don't use cache, use nullptr
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_embedding(
wholememory_embedding_t* wholememory_embedding,
wholememory_tensor_description_t* embedding_tensor_description,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
wholememory_embedding_optimizer_t optimizer,
wholememory_embedding_cache_policy_t cache_policy);
/**
* Destroy WholeMemory Embedding
* @param wholememory_embedding : WholeMemory Embedding to destroy
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_destroy_embedding(
wholememory_embedding_t wholememory_embedding);
/**
* Get WholeMemory Tensor from WholeMemory Embedding.
* @param wholememory_embedding : WholeMemory Embedding
* @return : WholeMemory Tensor
*/
wholememory_tensor_t wholememory_embedding_get_embedding_tensor(
wholememory_embedding_t wholememory_embedding);
/**
* Gather from WholeMemory Embedding
* @param wholememory_embedding : WholeMemory Embedding
* @param indices : indices to gather
* @param output : output tensor
* @param adjust_cache : if we should adjust cache in this gather
* @param p_env_fns : env fns
* @param stream_int : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_embedding_gather(wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t output,
bool adjust_cache,
wholememory_env_func_t* p_env_fns,
int64_t stream_int);
/**
* Gather backward for WholeMemory Embedding
* @param wholememory_embedding : WholeMemory Embedding
* @param indices : indices to gather
* @param grads : gradient of output tensor
* @param adjust_cache : if we should adjust cache in this gather
* @param lr : learning rate of current step.
* @param p_env_fns : env fns
* @param stream_int : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_embedding_gather_gradient_apply(
wholememory_embedding_t wholememory_embedding,
wholememory_tensor_t indices,
wholememory_tensor_t grads,
bool adjust_cache,
float lr,
wholememory_env_func_t* p_env_fns,
int64_t stream_int);
/**
* Get optimizer internal state names
* @param wholememory_embedding : WholeMemory Embedding
* @return : nullptr terminated names.
*/
const char* const* wholememory_embedding_get_optimizer_state_names(
wholememory_embedding_t wholememory_embedding);
/**
* Get optimizer internal state
* @param wholememory_embedding : WholeMemory Embedding
* @param name : state name
* @return : internal state, nullptr for not exist.
*/
wholememory_tensor_t wholememory_embedding_get_optimizer_state(
wholememory_embedding_t wholememory_embedding, const char* name);
/**
* Writeback all cache WholeMemory Embedding
* @param wholememory_embedding : WholeMemory Embedding
* @param stream_int : CUDA stream to use.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_embedding_writeback_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int);
/**
* Drop all cache in WholeMemory Embedding
* @param wholememory_embedding : WholeMemory Embedding
* @param stream_int : CUDA stream to use.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_embedding_drop_all_cache(
wholememory_embedding_t wholememory_embedding, int64_t stream_int);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/wholememory.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdio.h>
#include <unistd.h>
#include <wholememory/global_reference.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief WholeMemory Error Code definition
*
* Defines error code of WholeMemory library.
*/
enum wholememory_error_code_t {
WHOLEMEMORY_SUCCESS = 0, /*!< success */
WHOLEMEMORY_UNKNOW_ERROR, /*!< unknown error */
WHOLEMEMORY_NOT_IMPLEMENTED, /*!< method is not implemented */
WHOLEMEMORY_LOGIC_ERROR, /*!< logic error */
WHOLEMEMORY_CUDA_ERROR, /*!< CUDA error */
WHOLEMEMORY_COMMUNICATION_ERROR, /*!< communication error */
WHOLEMEMORY_INVALID_INPUT, /*!< input is invalid, e.g. nullptr */
WHOLEMEMORY_INVALID_VALUE, /*!< input value is invalid */
WHOLEMEMORY_OUT_OF_MEMORY, /*!< out of memory */
WHOLEMEMORY_NOT_SUPPORTED, /*!< not supported */
};
#define WHOLEMEMORY_RETURN_ON_FAIL(X) \
do { \
auto err = X; \
if (err != WHOLEMEMORY_SUCCESS) { \
const char* error_str = #X; \
fprintf(stderr, "File %s line %d %s failed.\n", __FILE__, __LINE__, error_str); \
return err; \
} \
} while (0)
/**
* @brief Memory Type of WholeMemory
*
* Memory Type is the Memory Address Mapping Type of WholeMemory
*/
enum wholememory_memory_type_t {
WHOLEMEMORY_MT_NONE = 0, /*!< Not defined. */
WHOLEMEMORY_MT_CONTINUOUS, /*!< Memory from all ranks are mapped in continuous address space */
WHOLEMEMORY_MT_CHUNKED, /*!< Memory from all ranks are mapped in chunked address space */
WHOLEMEMORY_MT_DISTRIBUTED, /*!< Memory from other ranks are not mapped. */
};
/**
* @brief Memory Location of WholeMemory
*
* Memory Location of WholeMemory can be host or device.
*/
enum wholememory_memory_location_t {
WHOLEMEMORY_ML_NONE = 0, /*!< Not defined */
WHOLEMEMORY_ML_DEVICE, /*!< Device Memory */
WHOLEMEMORY_ML_HOST, /*!< Host Memory */
};
/**
* Initialize WholeMemory library
* @param flags : reserved should be 0
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_init(unsigned int flags);
/**
* Finalize WholeMemory library
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_finalize();
/**
* @brief Opaque handle to communicator
*
* An Opaque handle to communicator
*/
typedef struct wholememory_comm_* wholememory_comm_t;
#define WHOLEMEMORY_UNIQUE_ID_BYTES (128)
/**
* @brief Unique ID for WholeMemory Communicators
*
* An Opaque handle to WholeMemory Communicators, exposes as char array.
* Underlying implementation may be ncclUniqueId_t
*/
struct wholememory_unique_id_t {
char internal[WHOLEMEMORY_UNIQUE_ID_BYTES];
};
/**
* Create UniqueID for WholeMemory Communicator
* @param unique_id : returned UniqueID
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_unique_id(wholememory_unique_id_t* unique_id);
/**
* Create WholeMemory Communicator
* @param comm : returned WholeMemory Communicator
* @param unique_id : UniqueID
* @param rank : rank of this process.
* @param size : number of processes in this Communicator
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_create_communicator(wholememory_comm_t* comm,
wholememory_unique_id_t unique_id,
int rank,
int size);
/**
* Destroy WholeMemory Communicator
* @param comm : WholeMemory Communicator to destroy
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_destroy_communicator(wholememory_comm_t comm);
/**
* Check if combination of WholeMemory type and location is supported in the communicator
* @param comm : WholeMemory Communicator
* @param memory_type : WholeMemory type
* @param memory_location : WholeMemory Location
* @return WHOLEMEMORY_SUCCESS if supported else WHOLEMEMORY_NOT_SUPPORTED
*/
wholememory_error_code_t wholememory_communicator_support_type_location(
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location);
/**
* Get the rank of current process in the WholeMemory Communicator
* @param rank : returned rank
* @param comm : WholeMemory Communicator
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_communicator_get_rank(int* rank, wholememory_comm_t comm);
/**
* Get the size of WholeMemory Communicator
* @param size : returned size
* @param comm : WholeMemory Communicator
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_communicator_get_size(int* size, wholememory_comm_t comm);
/**
* Barrier on WholeMemory Communicator
* @param comm : WholeMemory Communicator
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_communicator_barrier(wholememory_comm_t comm);
/**
* @brief Opaque handle to WholeMemory
*
* An Opaque handle to WholeMemory
*/
typedef struct wholememory_handle_* wholememory_handle_t;
/**
* Malloc WholeMemory
* @param wholememory_handle_ptr : returned WholeMemory Handle
* @param total_size : total allocated size in bytes.
* @param comm : WholeMemory Communicator
* @param memory_type : WholeMemory type
* @param memory_location : memory location, host or device
* @param data_granularity : granularity size of data, which is guaranteed not to be partitioned.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_malloc(wholememory_handle_t* wholememory_handle_ptr,
size_t total_size,
wholememory_comm_t comm,
wholememory_memory_type_t memory_type,
wholememory_memory_location_t memory_location,
size_t data_granularity);
/**
* Free allocated WholeMemory Handle
* @param wholememory_handle : WholeMemory Handle to free
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_free(wholememory_handle_t wholememory_handle);
/**
* Get underlying WholeMemory Communicator from WholeMemory Handle
* @param comm : returned WholeMemory Communicator
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_communicator(wholememory_comm_t* comm,
wholememory_handle_t wholememory_handle);
/**
* Get WholeMemory Type
* @param wholememory_handle : WholeMemory Handle
* @return : WholeMemory Type
*/
wholememory_memory_type_t wholememory_get_memory_type(wholememory_handle_t wholememory_handle);
/**
* Get WholeMemory Location
* @param wholememory_handle : WholeMemory Handle
* @return : WholeMemory Location
*/
wholememory_memory_location_t wholememory_get_memory_location(
wholememory_handle_t wholememory_handle);
/**
* Get total size of WholeMemory
* @param wholememory_handle : WholeMemory Handle
* @return : total size
*/
size_t wholememory_get_total_size(wholememory_handle_t wholememory_handle);
/**
* Get data granularity of WholeMemory Handle
* @param wholememory_handle : WholeMemory Handle
* @return : data granularity size
*/
size_t wholememory_get_data_granularity(wholememory_handle_t wholememory_handle);
/**
* Get local memory from WholeMemory Handle of current rank, local memory has direct access to the
* memory. But local memory doesn't have to be on local GPU.
* @param local_ptr : returned local memory pointer
* @param local_size : returned local memory size
* @param local_offset : returned local memory offset from WholeMemory
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_local_memory(void** local_ptr,
size_t* local_size,
size_t* local_offset,
wholememory_handle_t wholememory_handle);
/**
* Get local memory of specified rank from WholeMemory Handle
* @param rank_memory_ptr : returned local memory pointer of specified rank
* @param rank_memory_size : returned local memory size of specified rank
* @param rank_memory_offset : returned local memory offset of specified rank from WholeMemory
* @param rank : rank specified
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_rank_memory(void** rank_memory_ptr,
size_t* rank_memory_size,
size_t* rank_memory_offset,
int rank,
wholememory_handle_t wholememory_handle);
/**
* Get global memory pointer from WholeMemory Handle.
* Only Continuous memory type or Chunked Host memory has global pointer.
* @param global_ptr : returned pointer of WholeMemory
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_global_pointer(void** global_ptr,
wholememory_handle_t wholememory_handle);
/**
* Get global reference from WholeMemory Handle
* WholeMemory global reference is common data structure for Continuous and Chunked Memory Types.
* @param wholememory_gref : returned WholeMemory global reference
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_global_reference(wholememory_gref_t* wholememory_gref,
wholememory_handle_t wholememory_handle);
/**
* Get the partition plan WholeMemory will use
* @param size_per_rank : returned size per rank
* @param total_size : total size
* @param data_granularity : data granularity
* @param world_size : communicator world size
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_determine_partition_plan(size_t* size_per_rank,
size_t total_size,
size_t data_granularity,
int world_size);
/**
* Get the partition plan WholeMemory will use based on entry count.
* Entry is number of data granularity
* @param entry_per_rank : returned entry count per rank
* @param total_entry_count : total entry count
* @param world_size : communicator world size
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_determine_entry_partition_plan(size_t* entry_per_rank,
size_t total_entry_count,
int world_size);
/**
* Get the partition plan used in WholeMemory Handle
* @param size_per_rank : returned size per rank
* @param wholememory_handle : WholeMemory Handle
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_get_partition_plan(size_t* size_per_rank,
wholememory_handle_t wholememory_handle);
/**
* Fork a new process and get device count. Should be called before other CUDA call
* @return : CUDA device count, -1 on error
*/
int fork_get_device_count();
/**
* Load WholeMemory from binary files, all rank should be called together
* @param wholememory_handle : WholeMemory Handle
* @param memory_offset : load to memory offset
* @param memory_entry_size : entry size of WholeMemory
* @param file_entry_size : entry size in file, should be less than or equal to memory_entry_size
* @param file_names : file names, all binary files will be logically concatenated and loaded.
* @param file_count : number of files.
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_load_from_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_size,
size_t file_entry_size,
const char** file_names,
int file_count);
/**
* Store local WholeMemory to file, this should be called by all ranks, with different
* local_file_name.
* @param wholememory_handle : WholeMemory Handle
* @param memory_offset : memory offset to store
* @param memory_entry_stride : entry size of WholeMemory
* @param file_entry_size : entry size in file, should be less than or equal to memory_entry_size
* @param local_file_name : local file to store to
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholememory_store_to_file(wholememory_handle_t wholememory_handle,
size_t memory_offset,
size_t memory_entry_stride,
size_t file_entry_size,
const char* local_file_name);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp/include
|
rapidsai_public_repos/wholegraph/cpp/include/wholememory/wholegraph_op.h
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <wholememory/env_func_ptrs.h>
#include <wholememory/wholememory.h>
#include <wholememory/wholememory_tensor.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Unweighted sample without replacement kernel op
* @param wm_csr_row_ptr_tensor : Wholememory Tensor of graph csr_row_ptr
* @param wm_csr_col_ptr_tensor : Wholememory Tensor of graph csr_col_ptr
* @param center_nodes_tensor : None Wholememory Tensor of center node to sample
* @param max_sample_count : maximum sample count
* @param output_sample_offset_tensor : pointer to output sample offset
* @param output_dest_memory_context : memory context to output dest nodes
* @param output_center_localid_memory_context : memory context to output center local id
* @param output_edge_gid_memory_context : memory context to output edge global id
* @param random_seed: random number generator seed
* @param p_env_fns : pointers to environment functions.
* @param stream : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholegraph_csr_unweighted_sample_without_replacement(
wholememory_tensor_t wm_csr_row_ptr_tensor,
wholememory_tensor_t wm_csr_col_ptr_tensor,
wholememory_tensor_t center_nodes_tensor,
int max_sample_count,
wholememory_tensor_t output_sample_offset_tensor,
void* output_dest_memory_context,
void* output_center_localid_memory_context,
void* output_edge_gid_memory_context,
unsigned long long random_seed,
wholememory_env_func_t* p_env_fns,
void* stream);
/**
* Unweighted sample without replacement kernel op
* @param wm_csr_row_ptr_tensor : Wholememory Tensor of graph csr_row_ptr
* @param wm_csr_col_ptr_tensor : Wholememory Tensor of graph csr_col_ptr
* @param wm_csr_weight_ptr_tensor : Wholememory Tensor of graph edge weight
* @param center_nodes_tensor : None Wholememory Tensor of center node to sample
* @param max_sample_count : maximum sample count
* @param output_sample_offset_tensor : pointer to output sample offset
* @param output_dest_memory_context : memory context to output dest nodes
* @param output_center_localid_memory_context : memory context to output center local id
* @param output_edge_gid_memory_context : memory context to output edge global id
* @param random_seed: random number generator seed
* @param p_env_fns : pointers to environment functions.
* @param stream : CUDA stream to use
* @return : wholememory_error_code_t
*/
wholememory_error_code_t wholegraph_csr_weighted_sample_without_replacement(
wholememory_tensor_t wm_csr_row_ptr_tensor,
wholememory_tensor_t wm_csr_col_ptr_tensor,
wholememory_tensor_t wm_csr_weight_ptr_tensor,
wholememory_tensor_t center_nodes_tensor,
int max_sample_count,
wholememory_tensor_t output_sample_offset_tensor,
void* output_dest_memory_context,
void* output_center_localid_memory_context,
void* output_edge_gid_memory_context,
unsigned long long random_seed,
wholememory_env_func_t* p_env_fns,
void* stream);
/**
* raft_pcg_generator_random_int cpu op
* @param random_seed : random seed
* @param subsequence : subsequence for generating random value
* @param output : Wholememory Tensor of output
* @return : wholememory_error_code_t
*/
wholememory_error_code_t generate_random_positive_int_cpu(int64_t random_seed,
int64_t subsequence,
wholememory_tensor_t output);
/**
* raft_pcg_generator_random_float cpu op
* @param random_seed : random seed
* @param subsequence : subsequence for generating random value
* @param output : Wholememory Tensor of output
* @return : wholememory_error_code_t
*/
wholememory_error_code_t generate_exponential_distribution_negative_float_cpu(
int64_t random_seed, int64_t subsequence, wholememory_tensor_t output);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/wholegraph/cpp
|
rapidsai_public_repos/wholegraph/cpp/tests/CMakeLists.txt
|
#=============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
# Build options
option(DISABLE_DEPRECATION_WARNING "Disable warnings generated from deprecated declarations." OFF)
option(CODE_COVERAGE "Enable generating code coverage with gcov." OFF)
# This function takes in a test name and test source and handles setting all of the associated
# properties and linking to build the test
function(ConfigureTestInternal TEST_NAME)
add_executable(${TEST_NAME} ${ARGN})
target_include_directories(${TEST_NAME} PRIVATE "$<BUILD_INTERFACE:${WHOLEGRAPH_SOURCE_DIR}>/src")
target_link_libraries(${TEST_NAME} GTest::gmock GTest::gtest GTest::gmock_main GTest::gtest_main
wholegraph raft::raft rmm::rmm pthread)
set_target_properties(
${TEST_NAME}
PROPERTIES POSITION_INDEPENDENT_CODE ON
RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${WHOLEGRAPH_BINARY_DIR}/gtests>"
CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}"
INSTALL_RPATH "\$ORIGIN/../../../lib")
target_compile_definitions(${TEST_NAME}
PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${WHOLEGRAPH_LOGGING_LEVEL}")
target_compile_options(${TEST_NAME} PUBLIC $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:-Wall -Werror
-Wno-error=deprecated-declarations>)
if(DISABLE_DEPRECATION_WARNING)
target_compile_options(
${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-Wno-deprecated-declarations>)
target_compile_options(${TEST_NAME}
PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-Wno-deprecated-declarations>)
endif()
if(CODE_COVERAGE)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(KEEP_DIR ${CMAKE_CURRENT_BINARY_DIR}/tmp)
make_directory(${KEEP_DIR})
target_compile_options(${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:--keep
--keep-dir=${KEEP_DIR}>)
target_compile_options(
${TEST_NAME}
PUBLIC
$<$<COMPILE_LANGUAGE:CUDA>:-O0
-Xcompiler=--coverage,-fprofile-abs-path,-fkeep-inline-functions,-fno-elide-constructors>)
target_compile_options(
${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-O0 --coverage -fprofile-abs-path
-fkeep-inline-functions -fno-elide-constructors>)
target_link_options(${TEST_NAME} PRIVATE --coverage)
target_link_libraries(${TEST_NAME} gcov)
endif()
# Add coverage-generated files to clean target
list(APPEND COVERAGE_CLEAN_FILES "**/*.gcno" "**/*.gcda")
set_property(
TARGET ${TEST_NAME}
APPEND
PROPERTY ADDITIONAL_CLEAN_FILES ${COVERAGE_CLEAN_FILES})
endif()
add_test(NAME ${TEST_NAME} COMMAND ${TEST_NAME})
install(
TARGETS ${TEST_NAME}
COMPONENT testing
DESTINATION bin/gtests/libwholegraph
EXCLUDE_FROM_ALL)
endfunction()
# Wrapper around `ConfigureTestInternal` that builds tests both with and without per thread default
# stream
function(ConfigureTest TEST_NAME)
# Test with legacy default stream.
ConfigureTestInternal(${TEST_NAME} ${ARGN})
endfunction()
# parallel_utils tests
ConfigureTest(PARALLEL_UTILS_TEST parallel_utils_tests.cpp)
# wholememory communicator tests
ConfigureTest(WHOLEMEMORY_COMM_TEST wholememory/wholememory_comm_tests.cpp)
# wholememory handle tests
ConfigureTest(WHOLEMEMORY_HANDLE_TEST wholememory/wholememory_handle_tests.cpp)
# wholememory tensor tests
ConfigureTest(WHOLEMEMORY_TENSOR_TEST wholememory/wholememory_tensor_tests.cpp)
# wholememory gather op tests
ConfigureTest(WHOLEMEMORY_GATHER_TEST wholememory_ops/wholememory_gather_tests.cu wholememory_ops/embedding_test_utils.cu)
# wholememory scatter op tests
ConfigureTest(WHOLEMEMORY_SCATTER_TEST wholememory_ops/wholememory_scatter_tests.cu wholememory_ops/embedding_test_utils.cu)
#wholegraph unweighted samping op tests
ConfigureTest(WHOLEGRAPH_CSR_UNWEIGHTED_SAMPLE_WITHOUT_REPLACEMENT_TEST wholegraph_ops/wholegraph_csr_unweighted_sample_without_replacement_tests.cu wholegraph_ops/graph_sampling_test_utils.cu)
#wholegraph weighted samping op tests
ConfigureTest(WHOLEGRAPH_CSR_WEIGHTED_SAMPLE_WITHOUT_REPLACEMENT_TEST wholegraph_ops/wholegraph_csr_weighted_sample_without_replacement_tests.cu wholegraph_ops/graph_sampling_test_utils.cu)
#wholegraph cache set tests
ConfigureTest(WHOLEGRAPH_CACHESET_TEST wholememory_ops/cacheset_tests.cu)
#wholegraph embedding tests
ConfigureTest(WHOLEGRAPH_EMBEDDING_TEST wholememory_ops/wholememory_embedding_tests.cu wholememory_ops/embedding_test_utils.cu)
#wholegraph embedding gradient apply tests
ConfigureTest(WHOLEGRAPH_EMBEDDING_GRADIENT_APPLY_TEST wholememory_ops/wholememory_embedding_gradient_apply_tests.cu wholememory_ops/embedding_test_utils.cu)
#graph append unique op tests
ConfigureTest(GRAPH_APPEND_UNIQUE_TEST graph_ops/append_unique_tests.cu graph_ops/append_unique_test_utils.cu wholegraph_ops/graph_sampling_test_utils.cu)
#graph csr add self loop op tests
ConfigureTest(GRAPH_CSR_ADD_SELF_LOOP_TEST graph_ops/csr_add_self_loop_tests.cu graph_ops/csr_add_self_loop_utils.cu wholegraph_ops/graph_sampling_test_utils.cu)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.