repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_gtest.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/gtest.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_fmt.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/fmt.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_gbench.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/gbench.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/patches.json
|
{
"patches" : [
{
"file" : "Thrust/cub_odr.diff",
"issue" : "cub kernel dispatch ODR [https://github.com/NVIDIA/cub/issues/545]",
"fixed_in" : ""
}
]
}
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_thrust.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/thrust.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_spdlog.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/spdlog.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/packages/rapids_cpm_rmm.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/rmm.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cuda_set_architectures.rst
|
.. cmake-module:: ../../rapids-cmake/cuda/set_architectures.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_parse_version.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/parse_version.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_find_package_root.rst
|
.. cmake-module:: ../../rapids-cmake/export/find_package_root.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/supported_cuda_architectures_values.txt
|
``NATIVE`` or ``""``:
When passed as the value for :cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>` or :cmake:envvar:`ENV{CUDAARCHS} <cmake:envvar:CUDAARCHS>`
will compile for all GPU architectures present on the current machine.
``RAPIDS``, ``ALL``, or no value in :cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>` and :cmake:envvar:`ENV{CUDAARCHS} <cmake:envvar:CUDAARCHS>`:
When passed as the value for :cmake:variable:`CMAKE_CUDA_ARCHITECTURES <cmake:variable:CMAKE_CUDA_ARCHITECTURES>` or :cmake:envvar:`ENV{CUDAARCHS} <cmake:envvar:CUDAARCHS>` will compile for all supported RAPIDS GPU architectures.
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cython_init.rst
|
.. cmake-module:: ../../rapids-cmake/cython/init.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cython_add_rpath_entries.rst
|
.. cmake-module:: ../../rapids-cmake/cython/add_rpath_entries.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cpm_find.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/find.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_find_package_file.rst
|
.. cmake-module:: ../../rapids-cmake/export/find_package_file.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cpm_init.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/init.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cuda_init_architectures.rst
|
.. cmake-module:: ../../rapids-cmake/cuda/init_architectures.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_test_gpu_requirements.rst
|
.. cmake-module:: ../../rapids-cmake/test/gpu_requirements.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cuda_patch_toolkit.rst
|
.. cmake-module:: ../../rapids-cmake/cuda/patch_toolkit.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_test_add.rst
|
.. cmake-module:: ../../rapids-cmake/test/add.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cuda_init_runtime.rst
|
.. cmake-module:: ../../rapids-cmake/cuda/init_runtime.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cython_create_modules.rst
|
.. cmake-module:: ../../rapids-cmake/cython/create_modules.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_build_type.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/build_type.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_test_init.rst
|
.. cmake-module:: ../../rapids-cmake/test/init.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_write_version_file.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/write_version_file.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_install_lib_dir.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/install_lib_dir.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_test_install_relocatable.rst
|
.. cmake-module:: ../../rapids-cmake/test/install_relocatable.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_write_git_revision_file.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/write_git_revision_file.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_write_dependencies.rst
|
.. cmake-module:: ../../rapids-cmake/export/write_dependencies.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export.rst
|
.. cmake-module:: ../../rapids-cmake/export/export.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_support_conda_env.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/support_conda_env.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_cpm.rst
|
.. cmake-module:: ../../rapids-cmake/export/cpm.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cuda_set_runtime.rst
|
.. cmake-module:: ../../rapids-cmake/cuda/set_runtime.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_find_generate_module.rst
|
.. cmake-module:: ../../rapids-cmake/find/generate_module.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_write_language.rst
|
.. cmake-module:: ../../rapids-cmake/export/write_language.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cmake_make_global.rst
|
.. cmake-module:: ../../rapids-cmake/cmake/make_global.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_cpm_package_override.rst
|
.. cmake-module:: ../../rapids-cmake/cpm/package_override.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_export_package.rst
|
.. cmake-module:: ../../rapids-cmake/export/package.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_test_generate_resource_spec.rst
|
.. cmake-module:: ../../rapids-cmake/test/generate_resource_spec.cmake
| 0 |
rapidsai_public_repos/rapids-cmake/docs
|
rapidsai_public_repos/rapids-cmake/docs/command/rapids_find_package.rst
|
.. cmake-module:: ../../rapids-cmake/find/package.cmake
| 0 |
rapidsai_public_repos/rapids-cmake
|
rapidsai_public_repos/rapids-cmake/ci/test_cpp.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh
rapids-logger "Generate C++ testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file_key test \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch)" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n test
# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u
# Disable `sccache` S3 backend since compile times are negligible
unset SCCACHE_BUCKET
rapids-print-env
rapids-logger "Check GPU usage"
nvidia-smi
rapids-logger "Begin cpp tests"
cmake -S testing -B build
cd build
EXITCODE=0
trap "EXITCODE=1" ERR
set +e
ctest -j20 --schedule-random --output-on-failure
rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
| 0 |
rapidsai_public_repos/rapids-cmake
|
rapidsai_public_repos/rapids-cmake/ci/check_style.sh
|
#!/bin/bash
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create checks conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key checks \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n checks
set +u
conda activate checks
set -u
# Run pre-commit checks
pre-commit run --all-files --show-diff-on-failure
| 0 |
rapidsai_public_repos/rapids-cmake
|
rapidsai_public_repos/rapids-cmake/ci/build_cpp.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
source rapids-env-update
export CMAKE_GENERATOR=Ninja
rapids-print-env
rapids-logger "Begin cpp build"
rapids-conda-retry mambabuild conda/recipes/rapids_core_dependencies
rapids-upload-conda-to-s3 cpp
| 0 |
rapidsai_public_repos/rapids-cmake
|
rapidsai_public_repos/rapids-cmake/ci/build_docs.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
set -euo pipefail
rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh
rapids-dependency-file-generator \
--output conda \
--file_key docs \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml
rapids-mamba-retry env create --force -f env.yaml -n docs
conda activate docs
rapids-print-env
export RAPIDS_VERSION_NUMBER="24.02"
export RAPIDS_DOCS_DIR="$(mktemp -d)"
rapids-logger "Build Sphinx docs"
pushd docs
sphinx-build -b dirhtml . _html -W
sphinx-build -b text . _text -W
mkdir -p "${RAPIDS_DOCS_DIR}/rapids-cmake/"{html,txt}
mv _html/* "${RAPIDS_DOCS_DIR}/rapids-cmake/html"
mv _text/* "${RAPIDS_DOCS_DIR}/rapids-cmake/txt"
popd
rapids-upload-docs
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/release/update-version.sh
|
#!/bin/bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
################################
# rapids-cmake Version Updater #
################################
## Usage
# bash update-version.sh <new_version>
set -e
# Format is YY.MM.PP - no leading 'v' or trailing 'a'
NEXT_FULL_TAG=$1
# Get current version
CURRENT_TAG=$(git tag --merged HEAD | grep -xE '^v.*' | sort --version-sort | tail -n 1 | tr -d 'v')
CURRENT_MAJOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[1]}')
CURRENT_MINOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[2]}')
CURRENT_PATCH=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[3]}')
CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR}
#Get <major>.<minor> for next version
NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}')
NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}')
NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR}
echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG"
# Inplace sed replace; workaround for Linux and Mac
function sed_runner() {
sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak
}
sed_runner 's/'"rapids-cmake-version .*)"'/'"rapids-cmake-version ${NEXT_SHORT_TAG})"'/g' RAPIDS.cmake
sed_runner 's/'"rapids-cmake-version .*)"'/'"rapids-cmake-version ${NEXT_SHORT_TAG})"'/g' rapids-cmake/rapids-version.cmake
sed_runner 's/'"version =.*"'/'"version = \"${NEXT_SHORT_TAG}\""'/g' docs/conf.py
sed_runner 's/'"release =.*"'/'"release = \"${NEXT_FULL_TAG}\""'/g' docs/conf.py
sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' docs/basics.rst
for FILE in .github/workflows/*.yaml; do
sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" "${FILE}"
done
sed_runner "s/RAPIDS_VERSION_NUMBER=\".*/RAPIDS_VERSION_NUMBER=\"${NEXT_SHORT_TAG}\"/g" ci/build_docs.sh
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/checks/cmake_config_format.json
|
{
"format": {
"line_width": 100,
"tab_size": 2,
"command_case": "unchanged",
"dangle_parens": false,
"max_subgroups_hwrap": 4,
"min_prefix_chars": 32,
"max_pargs_hwrap": 999
}
}
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/checks/copyright.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import datetime
import os
import re
import sys
import git
FilesToCheck = [
re.compile(r"[.](cmake|cpp|cu|cuh|h|hpp|sh|pxd|py|pyx)$"),
re.compile(r"CMakeLists[.]txt$"),
re.compile(r"meta[.]yaml$"),
]
ExemptFiles = []
# this will break starting at year 10000, which is probably OK :)
CheckSimple = re.compile(
r"Copyright *(?:\(c\))? *(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)"
)
CheckDouble = re.compile(
r"Copyright *(?:\(c\))? *(\d{4})-(\d{4}),? *NVIDIA C(?:ORPORATION|orporation)" # noqa: E501
)
def checkThisFile(f):
if isinstance(f, git.Diff):
if f.deleted_file or f.b_blob.size == 0:
return False
f = f.b_path
elif not os.path.exists(f) or os.stat(f).st_size == 0:
# This check covers things like symlinks which point to files that DNE
return False
for exempt in ExemptFiles:
if exempt.search(f):
return False
for checker in FilesToCheck:
if checker.search(f):
return True
return False
def modifiedFiles():
"""Get a set of all modified files, as Diff objects.
The files returned have been modified in git since the merge base of HEAD
and the upstream of the target branch. We return the Diff objects so that
we can read only the staged changes.
"""
repo = git.Repo()
# Use the environment variable TARGET_BRANCH or RAPIDS_BASE_BRANCH (defined in CI) if possible
target_branch = os.environ.get("TARGET_BRANCH", os.environ.get("RAPIDS_BASE_BRANCH"))
if target_branch is None:
# Fall back to the closest branch if not on CI
target_branch = repo.git.describe(
all=True, tags=True, match="branch-*", abbrev=0
).lstrip("heads/")
upstream_target_branch = None
if target_branch in repo.heads:
# Use the tracking branch of the local reference if it exists. This
# returns None if no tracking branch is set.
upstream_target_branch = repo.heads[target_branch].tracking_branch()
if upstream_target_branch is None:
# Fall back to the remote with the newest target_branch. This code
# path is used on CI because the only local branch reference is
# current-pr-branch, and thus target_branch is not in repo.heads.
# This also happens if no tracking branch is defined for the local
# target_branch. We use the remote with the latest commit if
# multiple remotes are defined.
candidate_branches = [
remote.refs[target_branch] for remote in repo.remotes
if target_branch in remote.refs
]
if len(candidate_branches) > 0:
upstream_target_branch = sorted(
candidate_branches,
key=lambda branch: branch.commit.committed_datetime,
)[-1]
else:
# If no remotes are defined, try to use the local version of the
# target_branch. If this fails, the repo configuration must be very
# strange and we can fix this script on a case-by-case basis.
upstream_target_branch = repo.heads[target_branch]
merge_base = repo.merge_base("HEAD", upstream_target_branch.commit)[0]
diff = merge_base.diff()
changed_files = {f for f in diff if f.b_path is not None}
return changed_files
def getCopyrightYears(line):
res = CheckSimple.search(line)
if res:
return int(res.group(1)), int(res.group(1))
res = CheckDouble.search(line)
if res:
return int(res.group(1)), int(res.group(2))
return None, None
def replaceCurrentYear(line, start, end):
# first turn a simple regex into double (if applicable). then update years
res = CheckSimple.sub(r"Copyright (c) \1-\1, NVIDIA CORPORATION", line)
res = CheckDouble.sub(
rf"Copyright (c) {start:04d}-{end:04d}, NVIDIA CORPORATION",
res,
)
return res
def checkCopyright(f, update_current_year):
"""Checks for copyright headers and their years."""
errs = []
thisYear = datetime.datetime.now().year
lineNum = 0
crFound = False
yearMatched = False
if isinstance(f, git.Diff):
path = f.b_path
lines = f.b_blob.data_stream.read().decode().splitlines(keepends=True)
else:
path = f
with open(f, encoding="utf-8") as fp:
lines = fp.readlines()
for line in lines:
lineNum += 1
start, end = getCopyrightYears(line)
if start is None:
continue
crFound = True
if start > end:
e = [
path,
lineNum,
"First year after second year in the copyright "
"header (manual fix required)",
None,
]
errs.append(e)
elif thisYear < start or thisYear > end:
e = [
path,
lineNum,
"Current year not included in the copyright header",
None,
]
if thisYear < start:
e[-1] = replaceCurrentYear(line, thisYear, end)
if thisYear > end:
e[-1] = replaceCurrentYear(line, start, thisYear)
errs.append(e)
else:
yearMatched = True
# copyright header itself not found
if not crFound:
e = [
path,
0,
"Copyright header missing or formatted incorrectly "
"(manual fix required)",
None,
]
errs.append(e)
# even if the year matches a copyright header, make the check pass
if yearMatched:
errs = []
if update_current_year:
errs_update = [x for x in errs if x[-1] is not None]
if len(errs_update) > 0:
lines_changed = ", ".join(str(x[1]) for x in errs_update)
print(f"File: {path}. Changing line(s) {lines_changed}")
for _, lineNum, __, replacement in errs_update:
lines[lineNum - 1] = replacement
with open(path, "w", encoding="utf-8") as out_file:
out_file.writelines(lines)
return errs
def getAllFilesUnderDir(root, pathFilter=None):
retList = []
for dirpath, dirnames, filenames in os.walk(root):
for fn in filenames:
filePath = os.path.join(dirpath, fn)
if pathFilter(filePath):
retList.append(filePath)
return retList
def checkCopyright_main():
"""
Checks for copyright headers in all the modified files. In case of local
repo, this script will just look for uncommitted files and in case of CI
it compares between branches "$PR_TARGET_BRANCH" and "current-pr-branch"
"""
retVal = 0
argparser = argparse.ArgumentParser(
"Checks for a consistent copyright header in git's modified files"
)
argparser.add_argument(
"--update-current-year",
dest="update_current_year",
action="store_true",
required=False,
help="If set, "
"update the current year if a header is already "
"present and well formatted.",
)
argparser.add_argument(
"--git-modified-only",
dest="git_modified_only",
action="store_true",
required=False,
help="If set, "
"only files seen as modified by git will be "
"processed.",
)
args, dirs = argparser.parse_known_args()
if args.git_modified_only:
files = [f for f in modifiedFiles() if checkThisFile(f)]
else:
files = []
for d in [os.path.abspath(d) for d in dirs]:
if not os.path.isdir(d):
raise ValueError(f"{d} is not a directory.")
files += getAllFilesUnderDir(d, pathFilter=checkThisFile)
errors = []
for f in files:
errors += checkCopyright(f, args.update_current_year)
if len(errors) > 0:
if any(e[-1] is None for e in errors):
print("Copyright headers incomplete in some of the files!")
for e in errors:
print(" %s:%d Issue: %s" % (e[0], e[1], e[2]))
print("")
n_fixable = sum(1 for e in errors if e[-1] is not None)
path_parts = os.path.abspath(__file__).split(os.sep)
file_from_repo = os.sep.join(path_parts[path_parts.index("ci") :])
if n_fixable > 0 and not args.update_current_year:
print(
f"You can run `python {file_from_repo} --git-modified-only "
"--update-current-year` and stage the results in git to "
f"fix {n_fixable} of these errors.\n"
)
retVal = 1
return retVal
if __name__ == "__main__":
sys.exit(checkCopyright_main())
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/checks/run-cmake-format.sh
|
#!/bin/bash
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
RAPIDS_CMAKE_ROOT="$(realpath $(dirname $0)/../..)"
DEFAULT_RAPIDS_CMAKE_FORMAT_FILE="${RAPIDS_CMAKE_ROOT}/cmake-format-rapids-cmake.json"
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
RAPIDS_CMAKE_FORMAT_FILE="${DEFAULT_RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found in the default location: "
echo ""
echo "${DEFAULT_RAPIDS_CMAKE_FORMAT_FILE}"
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
# We cannot pass multiple input files because of a bug in cmake-format.
# See: https://github.com/cheshirekow/cmake_format/issues/284
for cmake_file in "${@:2}"; do
cmake-format --in-place --first-comment-is-literal --config-files ${RAPIDS_CMAKE_FORMAT_FILE} ${RAPIDS_CMAKE_ROOT}/ci/checks/cmake_config_format.json -- ${cmake_file}
done
elif [[ $1 == "cmake-lint" ]]; then
# Since the pre-commit hook is verbose, we have to be careful to only
# present cmake-lint's output (which is quite verbose) if we actually
# observe a failure.
OUTPUT=$(cmake-lint --config-files ${RAPIDS_CMAKE_FORMAT_FILE} ${RAPIDS_CMAKE_ROOT}/ci/checks/cmake_config_format.json ${RAPIDS_CMAKE_ROOT}/ci/checks/cmake_config_lint.json -- ${@:2})
status=$?
if ! [ ${status} -eq 0 ]; then
echo "${OUTPUT}"
fi
exit ${status}
fi
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/checks/cmake_config_lint.json
|
{
"lint": {
"disabled_codes": ["C0301", "C0112"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "rapids[_-][A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "rapids[_-][0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos/rapids-cmake/ci
|
rapidsai_public_repos/rapids-cmake/ci/checks/cmake_config_testing_lint.json
|
{
"lint": {
"disabled_codes": ["C0301", "C0111","C0112"],
"function_pattern": "[0-9A-z_]+",
"macro_pattern": "[0-9A-z_]+",
"global_var_pattern": "[A-z][0-9A-z_]+",
"internal_var_pattern": "_[A-z][0-9A-z_]+",
"local_var_pattern": "[A-z][A-z0-9_]+",
"private_var_pattern": "_[0-9A-z_]+",
"public_var_pattern": "[A-z][0-9A-z_]+",
"argument_var_pattern": "[A-z][A-z0-9_]+",
"keyword_pattern": "[A-z][0-9A-z_]+"
}
}
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/.pre-commit-config.yaml
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
repos:
- repo: https://github.com/psf/black
rev: 23.7.0
hooks:
- id: black
files: python/cuxfilter/.*
# Explicitly specify the pyproject.toml at the repo root, not per-project.
args: ["--config", "pyproject.toml"]
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
args: ["--config=.flake8"]
files: python/.*$
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.7.1
hooks:
- id: rapids-dependency-file-generator
args: ["--clean"]
default_language_version:
python: python3
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/pyproject.toml
|
[tool.black]
line-length = 79
target-version = ["py39"]
include = '\.py?$'
force-exclude = '''
/(
thirdparty |
\.eggs |
\.git |
\.hg |
\.mypy_cache |
\.tox |
\.venv |
_build |
buck-out |
build |
dist
)/
'''
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/.flake8
|
# Copyright (c) 2023, NVIDIA CORPORATION.
[flake8]
filename = *.py,
exclude = __init__.py, *.egg, build, docs, .git
ignore =
# line break before binary operator
W503,
# whitespace before :
E203
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/README.md
|
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> cuxfilter
cuxfilter ( ku-cross-filter ) is a [RAPIDS](https://github.com/rapidsai) framework to connect web visualizations to GPU accelerated crossfiltering. Inspired by the javascript version of the [original](https://github.com/crossfilter/crossfilter), it enables interactive and super fast multi-dimensional filtering of 100 million+ row tabular datasets via [cuDF](https://github.com/rapidsai/cudf).
## RAPIDS Viz
cuxfilter is one of the core projects of the “RAPIDS viz” team. Taking the axiom that “a slider is worth a thousand queries” from @lmeyerov to heart, we want to enable fast exploratory data analytics through an easier-to-use pythonic notebook interface.
As there are many fantastic visualization libraries available for the web, our general principle is not to create our own viz library, but to enhance others with faster acceleration, larger datasets, and better dev UX. **Basically, we want to take the headache out of interconnecting multiple charts to a GPU backend, so you can get to visually exploring data faster.**
By the way, cuxfilter is best used to interact with large (1 million+) tabular datasets. GPU’s are fast, but accessing that speedup requires some architecture overhead that isn’t worthwhile for small datasets.
For more detailed requirements, see below.
## cuxfilter Architecture
The current version of cuxfilter leverages jupyter notebook and bokeh server to reduce architecture and installation complexity.

### Open Source Projects
cuxfilter wouldn’t be possible without using these great open source projects:
- [Bokeh](https://docs.bokeh.org/en/latest/)
- [DataShader](http://datashader.org/)
- [Panel](https://panel.pyviz.org/)
- [Falcon](https://github.com/uwdata/falcon)
- [Jupyter](https://jupyter.org/about)
### Where is the original cuxfilter and Mortgage Viz Demo?
The original version (0.2) of cuxfilter, most known for the backend powering the Mortgage Viz Demo, has been moved into the [`GTC-2018-mortgage-visualization branch`](https://github.com/rapidsai/cuxfilter/tree/GTC-2018-mortgage-visualization) branch. As it has a much more complicated backend and javascript API, we’ve decided to focus more on the streamlined notebook focused version here.
## Usage
### Example 1
[](https://studiolab.sagemaker.aws/import/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/auto_accidents_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Studio Lab Environment-gray.svg">](./notebooks/README.md#amazon-sagemaker-studio-lab)
[](https://colab.research.google.com/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/auto_accidents_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Colab Environment-gray.svg">](./notebooks/README.md#google-colab)
```python
import cuxfilter
#update data_dir if you have downloaded datasets elsewhere
DATA_DIR = './data'
from cuxfilter.sampledata import datasets_check
datasets_check('auto_accidents', base_dir=DATA_DIR)
cux_df = cuxfilter.DataFrame.from_arrow(DATA_DIR+'/auto_accidents.arrow')
cux_df.data['ST_CASE'] = cux_df.data['ST_CASE'].astype('float64')
label_map = {1: 'Sunday', 2: 'Monday', 3: 'Tuesday', 4: 'Wednesday', 5: 'Thursday', 6: 'Friday', 7: 'Saturday', 9: 'Unknown'}
cux_df.data['DAY_WEEK_STR'] = cux_df.data.DAY_WEEK.map(label_map)
gtc_demo_red_blue_palette = [ "#3182bd", "#6baed6", "#7b8ed8", "#e26798", "#ff0068" , "#323232" ]
#declare charts
chart1 = cuxfilter.charts.scatter(x='dropoff_x', y='dropoff_y', aggregate_col='DAY_WEEK', aggregate_fn='mean',
color_palette=gtc_demo_red_blue_palette, tile_provider='CartoLight', unselected_alpha=0.2,
pixel_shade_type='linear')
chart2 = cuxfilter.charts.multi_select('YEAR')
chart3 = cuxfilter.charts.bar('DAY_WEEK_STR')
chart4 = cuxfilter.charts.bar('MONTH')
#declare dashboard
d = cux_df.dashboard([chart1, chart3, chart4], sidebar=[chart2], layout=cuxfilter.layouts.feature_and_double_base, title='Auto Accident Dataset')
# run the dashboard as a webapp:
# Bokeh and Datashader based charts also have a `save` tool on the side toolbar, which can download and save the individual chart when interacting with the dashboard.
# d.show('jupyter-notebook/lab-url')
#run the dashboard within the notebook cell
d.app()
```

### Example 2
[](https://studiolab.sagemaker.aws/import/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/Mortgage_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Studio Lab Environment-gray.svg">](./notebooks/README.md#amazon-sagemaker-studio-lab)
[](https://colab.research.google.com/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/Mortgage_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Colab Environment-gray.svg">](./notebooks/README.md#google-colab)
```python
import cuxfilter
#update data_dir if you have downloaded datasets elsewhere
DATA_DIR = './data'
from cuxfilter.sampledata import datasets_check
datasets_check('mortgage', base_dir=DATA_DIR)
cux_df = cuxfilter.DataFrame.from_arrow(DATA_DIR + '/146M_predictions_v2.arrow')
geoJSONSource='https://raw.githubusercontent.com/rapidsai/cuxfilter/GTC-2018-mortgage-visualization/javascript/demos/GTC%20demo/src/data/zip3-ms-rhs-lessprops.json'
chart0 = cuxfilter.charts.choropleth( x='zip', color_column='delinquency_12_prediction', color_aggregate_fn='mean',
elevation_column='current_actual_upb', elevation_factor=0.00001, elevation_aggregate_fn='sum',
geoJSONSource=geoJSONSource
)
chart2 = cuxfilter.charts.bar('delinquency_12_prediction',data_points=50)
chart3 = cuxfilter.charts.range_slider('borrower_credit_score',data_points=50)
chart1 = cuxfilter.charts.drop_down('dti')
#declare dashboard
d = cux_df.dashboard([chart0, chart2],sidebar=[chart3, chart1], layout=cuxfilter.layouts.feature_and_double_base,theme = cuxfilter.themes.dark, title='Mortgage Dashboard')
# run the dashboard within the notebook cell
# Bokeh and Datashader based charts also have a `save` tool on the side toolbar, which can download and save the individual chart when interacting with the dashboard.
# d.app()
#run the dashboard as a webapp:
# if running on a port other than localhost:8888, run d.show(jupyter-notebook-lab-url:port)
d.show()
```

## Documentation
Full documentation can be found [on the RAPIDS docs page](https://docs.rapids.ai/api/cuxfilter/stable/).
Troubleshooting help can be found [on our troubleshooting page](https://docs.rapids.ai/api/cuxfilter/stable/installation.html#troubleshooting).
## General Dependencies
- python
- cudf
- datashader
- cupy
- panel
- bokeh
- pyproj
- geopandas
- pyppeteer
- jupyter-server-proxy
## Quick Start
Please see the [Demo Docker Repository](https://hub.docker.com/r/rapidsai/rapidsai/), choosing a tag based on the NVIDIA CUDA version you’re running. This provides a ready to run Docker container with example notebooks and data, showcasing how you can utilize cuxfilter, cuDF and other RAPIDS libraries.
## Installation
### CUDA/GPU requirements
- CUDA 11.2+
- NVIDIA driver 450.80.02+
- Pascal architecture or better (Compute Capability >=6.0)
### Conda
cuxfilter can be installed with conda ([miniconda](https://conda.io/miniconda.html), or the full [Anaconda distribution](https://www.anaconda.com/download)) from the `rapidsai` channel:
For nightly version `cuxfilter version == 23.12` :
```bash
# for CUDA 12.0
conda install -c rapidsai-nightly -c conda-forge -c nvidia \
cuxfilter=23.12 python=3.10 cuda-version=12.0
# for CUDA 11.8
conda install -c rapidsai-nightly -c conda-forge -c nvidia \
cuxfilter=23.12 python=3.10 cuda-version=11.8
```
For the stable version of `cuxfilter` :
```bash
# for CUDA 12.0
conda install -c rapidsai -c conda-forge -c nvidia \
cuxfilter python=3.10 cuda-version=12.0
# for CUDA 11.8
conda install -c rapidsai -c conda-forge -c nvidia \
cuxfilter python=3.10 cuda-version=11.8
```
Note: cuxfilter is supported only on Linux, and with Python versions 3.8 and later.
### PyPI
Install cuxfilter from PyPI using pip:
```bash
# for CUDA 12.0
pip install cuxfilter-cu12 -extra-index-url=https://pypi.nvidia.com
# for CUDA 11.8
pip install cuxfilter-cu11 -extra-index-url=https://pypi.nvidia.com
```
See the [Get RAPIDS version picker](https://rapids.ai/start.html) for more OS and version info.
### Build/Install from Source
See [build instructions](CONTRIBUTING.md#setting-up-your-build-environment).
## Troubleshooting
**bokeh server in jupyter lab**
To run the bokeh server in a jupyter lab, install jupyterlab dependencies
```bash
conda install -c conda-forge jupyterlab
jupyter labextension install @pyviz/jupyterlab_pyviz
jupyter labextension install jupyterlab_bokeh
```
## Download Datasets
1. Auto download datasets
The notebooks inside `python/notebooks` already have a check function which verifies whether the example dataset is downloaded, and downloads it if it's not.
2. Download manually
While in the directory you want the datasets to be saved, execute the following
> Note: Auto Accidents dataset has corrupted coordinate data from the years 2012-2014
```bash
#go the the environment where cuxfilter is installed. Skip if in a docker container
source activate test_env
#download and extract the datasets
curl https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2015-01.csv --create-dirs -o ./nyc_taxi.csv
curl https://data.rapids.ai/viz-data/146M_predictions_v2.arrow.gz --create-dirs -o ./146M_predictions_v2.arrow.gz
curl https://data.rapids.ai/viz-data/auto_accidents.arrow.gz --create-dirs -o ./auto_accidents.arrow.gz
python -c "from cuxfilter.sampledata import datasets_check; datasets_check(base_dir='./')"
```
## Guides and Layout Templates
Currently supported layout templates and example code can be found on the [layouts page](https://rapidsai.github.io/cuxfilter/layouts/Layouts.html).
### Currently Supported Charts
| Library | Chart type |
| ------------- | ------------------------------------------------------------------------------------------------ |
| bokeh | bar |
| datashader | scatter, scatter_geo, line, stacked_lines, heatmap, graph |
| panel_widgets | range_slider, date_range_slider, float_slider, int_slider, drop_down, multi_select, card, number |
| custom | view_dataframe |
| deckgl | choropleth(3d and 2d) |
## Contributing Developers Guide
cuxfilter acts like a connector library and it is easy to add support for new libraries. The `python/cuxfilter/charts/core` directory has all the core chart classes which can be inherited and used to implement a few (viz related) functions and support dashboarding in cuxfilter directly.
You can see the examples to implement viz libraries in the bokeh and cudatashader directories. Let us know if you would like to add a chart by opening a feature request issue or submitting a PR.
For more details, check out the [contributing guide](./CONTRIBUTING.md).
## Future Work
cuxfilter development is in early stages and on going. See what we are planning next on the [projects page](https://github.com/rapidsai/cuxfilter/projects).
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/CHANGELOG.md
|
# cuXfilter 23.10.00 (11 Oct 2023)
## 🐛 Bug Fixes
- fix external workflow ([#537](https://github.com/rapidsai/cuxfilter/pull/537)) [@AjayThorve](https://github.com/AjayThorve)
- Use `conda mambabuild` not `mamba mambabuild` ([#535](https://github.com/rapidsai/cuxfilter/pull/535)) [@bdice](https://github.com/bdice)
## 📖 Documentation
- Update docs ([#530](https://github.com/rapidsai/cuxfilter/pull/530)) [@AjayThorve](https://github.com/AjayThorve)
- Add str support to dropdown ([#529](https://github.com/rapidsai/cuxfilter/pull/529)) [@AjayThorve](https://github.com/AjayThorve)
- Manually merge Branch 23.08 into 23.10 ([#518](https://github.com/rapidsai/cuxfilter/pull/518)) [@AjayThorve](https://github.com/AjayThorve)
- Branch 23.10 merge 23.08 ([#510](https://github.com/rapidsai/cuxfilter/pull/510)) [@vyasr](https://github.com/vyasr)
## 🛠️ Improvements
- Update image names ([#540](https://github.com/rapidsai/cuxfilter/pull/540)) [@AyodeAwe](https://github.com/AyodeAwe)
- Simplify wheel build scripts and allow alphas of RAPIDS dependencies ([#534](https://github.com/rapidsai/cuxfilter/pull/534)) [@divyegala](https://github.com/divyegala)
- Use `copy-pr-bot` ([#531](https://github.com/rapidsai/cuxfilter/pull/531)) [@ajschmidt8](https://github.com/ajschmidt8)
- Improve external tests ([#520](https://github.com/rapidsai/cuxfilter/pull/520)) [@AjayThorve](https://github.com/AjayThorve)
# cuXfilter 23.08.00 (9 Aug 2023)
## 🐛 Bug Fixes
- Fix dependencies - pyproj ([#514](https://github.com/rapidsai/cuxfilter/pull/514)) [@AjayThorve](https://github.com/AjayThorve)
- Fix nightly wheel testing workflow. ([#507](https://github.com/rapidsai/cuxfilter/pull/507)) [@bdice](https://github.com/bdice)
- Fix broken symlink ([#502](https://github.com/rapidsai/cuxfilter/pull/502)) [@raydouglass](https://github.com/raydouglass)
- Fix GHA: external dependencies tests ([#498](https://github.com/rapidsai/cuxfilter/pull/498)) [@AjayThorve](https://github.com/AjayThorve)
- Fix scheduled GHA workflow issue ([#493](https://github.com/rapidsai/cuxfilter/pull/493)) [@AjayThorve](https://github.com/AjayThorve)
- fix incorrect xy-coordinate issue in graphs ([#487](https://github.com/rapidsai/cuxfilter/pull/487)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Doc updates ([#516](https://github.com/rapidsai/cuxfilter/pull/516)) [@AjayThorve](https://github.com/AjayThorve)
- Switch Docs to PyData Theme ([#500](https://github.com/rapidsai/cuxfilter/pull/500)) [@exactlyallan](https://github.com/exactlyallan)
- Added visualization guide notebook ([#496](https://github.com/rapidsai/cuxfilter/pull/496)) [@exactlyallan](https://github.com/exactlyallan)
- fix Auto-merge: Branch 23.08 merge 23.06 ([#482](https://github.com/rapidsai/cuxfilter/pull/482)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- Switch to new wheels pipeline ([#506](https://github.com/rapidsai/cuxfilter/pull/506)) [@divyegala](https://github.com/divyegala)
- Followup: Revert CUDA 12.0 CI workflows to branch-23.08 ([#504](https://github.com/rapidsai/cuxfilter/pull/504)) [@AjayThorve](https://github.com/AjayThorve)
- Revert CUDA 12.0 CI workflows to branch-23.08. ([#503](https://github.com/rapidsai/cuxfilter/pull/503)) [@bdice](https://github.com/bdice)
- cuxfilter: Build CUDA 12 packages ([#499](https://github.com/rapidsai/cuxfilter/pull/499)) [@AjayThorve](https://github.com/AjayThorve)
- Add wheel builds to cuxfilter ([#497](https://github.com/rapidsai/cuxfilter/pull/497)) [@AjayThorve](https://github.com/AjayThorve)
- Refactor to use holoviews powered bar charts ([#494](https://github.com/rapidsai/cuxfilter/pull/494)) [@AjayThorve](https://github.com/AjayThorve)
- Improvement/add panel 1.0+, holoviews 1.16+, bokeh 3.1+ support ([#492](https://github.com/rapidsai/cuxfilter/pull/492)) [@AjayThorve](https://github.com/AjayThorve)
- use rapids-upload-docs script ([#489](https://github.com/rapidsai/cuxfilter/pull/489)) [@AyodeAwe](https://github.com/AyodeAwe)
- [Review] Remove Datatiles support ([#488](https://github.com/rapidsai/cuxfilter/pull/488)) [@AjayThorve](https://github.com/AjayThorve)
- Remove documentation build scripts for Jenkins ([#483](https://github.com/rapidsai/cuxfilter/pull/483)) [@ajschmidt8](https://github.com/ajschmidt8)
# cuXfilter 23.06.00 (7 Jun 2023)
## 🚨 Breaking Changes
- Dropping Python 3.8 ([#469](https://github.com/rapidsai/cuxfilter/pull/469)) [@divyegala](https://github.com/divyegala)
## 🐛 Bug Fixes
- fix tests failing due to unsorted results ([#479](https://github.com/rapidsai/cuxfilter/pull/479)) [@AjayThorve](https://github.com/AjayThorve)
## 🚀 New Features
- GHA - external dependency testing workflow: add a schedule to run once every week ([#478](https://github.com/rapidsai/cuxfilter/pull/478)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- Require Numba 0.57.0+ & NumPy 1.21.0+ ([#480](https://github.com/rapidsai/cuxfilter/pull/480)) [@jakirkham](https://github.com/jakirkham)
- run docs nightly too ([#477](https://github.com/rapidsai/cuxfilter/pull/477)) [@AyodeAwe](https://github.com/AyodeAwe)
- Update cupy to >=12 ([#475](https://github.com/rapidsai/cuxfilter/pull/475)) [@raydouglass](https://github.com/raydouglass)
- Revert shared-action-workflows pin ([#472](https://github.com/rapidsai/cuxfilter/pull/472)) [@divyegala](https://github.com/divyegala)
- Dropping Python 3.8 ([#469](https://github.com/rapidsai/cuxfilter/pull/469)) [@divyegala](https://github.com/divyegala)
- Remove usage of rapids-get-rapids-version-from-git ([#468](https://github.com/rapidsai/cuxfilter/pull/468)) [@jjacobelli](https://github.com/jjacobelli)
- Use ARC V2 self-hosted runners for GPU jobs ([#467](https://github.com/rapidsai/cuxfilter/pull/467)) [@jjacobelli](https://github.com/jjacobelli)
# cuXfilter 23.04.00 (6 Apr 2023)
## 🐛 Bug Fixes
- Updates and fixes ([#463](https://github.com/rapidsai/cuxfilter/pull/463)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Add Viz catalogue code to default branch ([#455](https://github.com/rapidsai/cuxfilter/pull/455)) [@AjayThorve](https://github.com/AjayThorve)
- Update datashader version ([#451](https://github.com/rapidsai/cuxfilter/pull/451)) [@AjayThorve](https://github.com/AjayThorve)
- Forward-merge branch-23.02 to branch-23.04 ([#440](https://github.com/rapidsai/cuxfilter/pull/440)) [@GPUtester](https://github.com/GPUtester)
## 🚀 New Features
- Fea/dependency gpu testing ([#456](https://github.com/rapidsai/cuxfilter/pull/456)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- fix input paramter to workflow ([#457](https://github.com/rapidsai/cuxfilter/pull/457)) [@AjayThorve](https://github.com/AjayThorve)
- Update datasets download URL ([#454](https://github.com/rapidsai/cuxfilter/pull/454)) [@jjacobelli](https://github.com/jjacobelli)
- Fix GHA build workflow ([#453](https://github.com/rapidsai/cuxfilter/pull/453)) [@AjayThorve](https://github.com/AjayThorve)
- Reduce error handling verbosity in CI tests scripts ([#447](https://github.com/rapidsai/cuxfilter/pull/447)) [@AjayThorve](https://github.com/AjayThorve)
- Update shared workflow branches ([#446](https://github.com/rapidsai/cuxfilter/pull/446)) [@ajschmidt8](https://github.com/ajschmidt8)
- Remove gpuCI scripts. ([#445](https://github.com/rapidsai/cuxfilter/pull/445)) [@bdice](https://github.com/bdice)
- Move date to build string in `conda` recipe ([#441](https://github.com/rapidsai/cuxfilter/pull/441)) [@ajschmidt8](https://github.com/ajschmidt8)
- CVE-2007-4559 Patch ([#409](https://github.com/rapidsai/cuxfilter/pull/409)) [@TrellixVulnTeam](https://github.com/TrellixVulnTeam)
# cuXfilter 23.02.00 (9 Feb 2023)
## 🐛 Bug Fixes
- fix path for dir to uploaded ([#437](https://github.com/rapidsai/cuxfilter/pull/437)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Docs/update ([#439](https://github.com/rapidsai/cuxfilter/pull/439)) [@AjayThorve](https://github.com/AjayThorve)
- Update channel priority ([#415](https://github.com/rapidsai/cuxfilter/pull/415)) [@bdice](https://github.com/bdice)
## 🚀 New Features
- Fea/add save chart option to individual charts ([#429](https://github.com/rapidsai/cuxfilter/pull/429)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- Update shared workflow branches ([#442](https://github.com/rapidsai/cuxfilter/pull/442)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add docs build to GH actions ([#436](https://github.com/rapidsai/cuxfilter/pull/436)) [@AjayThorve](https://github.com/AjayThorve)
- Re-enable `graphs.ipynb` notebook in CI ([#428](https://github.com/rapidsai/cuxfilter/pull/428)) [@ajschmidt8](https://github.com/ajschmidt8)
- Build CUDA 11.8 and Python 3.10 Packages ([#426](https://github.com/rapidsai/cuxfilter/pull/426)) [@bdice](https://github.com/bdice)
- Update workflows for nightly tests ([#425](https://github.com/rapidsai/cuxfilter/pull/425)) [@ajschmidt8](https://github.com/ajschmidt8)
- Enable `Recently Updated` Check ([#424](https://github.com/rapidsai/cuxfilter/pull/424)) [@ajschmidt8](https://github.com/ajschmidt8)
- remove stale cudatashader build commands ([#423](https://github.com/rapidsai/cuxfilter/pull/423)) [@AjayThorve](https://github.com/AjayThorve)
- Update style checks to use pre-commit. ([#420](https://github.com/rapidsai/cuxfilter/pull/420)) [@bdice](https://github.com/bdice)
- Fix broken symlink ([#419](https://github.com/rapidsai/cuxfilter/pull/419)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add GitHub Actions Workflows ([#418](https://github.com/rapidsai/cuxfilter/pull/418)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add dependencies.yaml ([#416](https://github.com/rapidsai/cuxfilter/pull/416)) [@AjayThorve](https://github.com/AjayThorve)
# cuXfilter 22.12.00 (8 Dec 2022)
## 📖 Documentation
- Create symlink to 10_minutes_to_cuxfilter.ipynb into the notebooks fo… ([#413](https://github.com/rapidsai/cuxfilter/pull/413)) [@taureandyernv](https://github.com/taureandyernv)
## 🛠️ Improvements
- Update `panel` version ([#421](https://github.com/rapidsai/cuxfilter/pull/421)) [@ajschmidt8](https://github.com/ajschmidt8)
- Remove stale labeler ([#410](https://github.com/rapidsai/cuxfilter/pull/410)) [@raydouglass](https://github.com/raydouglass)
# cuXfilter 22.10.00 (12 Oct 2022)
## 🐛 Bug Fixes
- fix test failing on non-matching indices for newer dask version ([#402](https://github.com/rapidsai/cuxfilter/pull/402)) [@AjayThorve](https://github.com/AjayThorve)
- Notebook update: removed spaces in directory name ([#400](https://github.com/rapidsai/cuxfilter/pull/400)) [@mmccarty](https://github.com/mmccarty)
## 🚀 New Features
- Allow cupy 11 ([#401](https://github.com/rapidsai/cuxfilter/pull/401)) [@galipremsagar](https://github.com/galipremsagar)
# cuXfilter 22.08.00 (17 Aug 2022)
## 🐛 Bug Fixes
- fix/incorrect-bokeh-legend-attributes ([#381](https://github.com/rapidsai/cuxfilter/pull/381)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Use common custom `js` & `css` code ([#394](https://github.com/rapidsai/cuxfilter/pull/394)) [@galipremsagar](https://github.com/galipremsagar)
- Branch 22.08 merge 22.06 ([#377](https://github.com/rapidsai/cuxfilter/pull/377)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- Update `pyproj` version specifier ([#392](https://github.com/rapidsai/cuxfilter/pull/392)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update `geopandas` version specificer ([#390](https://github.com/rapidsai/cuxfilter/pull/390)) [@ajschmidt8](https://github.com/ajschmidt8)
- Revert "Allow CuPy 11" ([#388](https://github.com/rapidsai/cuxfilter/pull/388)) [@galipremsagar](https://github.com/galipremsagar)
- Update `nodejs` version specifier ([#385](https://github.com/rapidsai/cuxfilter/pull/385)) [@ajschmidt8](https://github.com/ajschmidt8)
- Allow CuPy 11 ([#383](https://github.com/rapidsai/cuxfilter/pull/383)) [@jakirkham](https://github.com/jakirkham)
# cuXfilter 22.06.00 (7 Jun 2022)
## 🔗 Links
- [Development Branch](https://github.com/rapidsai/cuxfilter/tree/branch-22.06)
- [Compare with `main` branch](https://github.com/rapidsai/cuxfilter/compare/main...branch-22.06)
## 🐛 Bug Fixes
- Fixed native support for dask_cudf dataframes. Seamless integration results in a dask_cudf.DataFrame working as a drop-in replacement for a cudf.DataFrame([#359, #366](https://github.com/rapidsai/cuxfilter/pull/359, #366)) [@AjayThorve
](https://github.com/AjayThorve
)
## 🛠️ Improvements
- added `unseleced_alpha` parameter to all datashader charts, displays unselected data as transparent (default alpha=0.2) ([#366](https://github.com/rapidsai/cuxfilter/pull/366))
- added binary data transfer support for choropleth charts, which results in a much smoother experience interacting with the choropleth charts ([#366](https://github.com/rapidsai/cuxfilter/pull/366))
- Simplify conda recipe ([#373](https://github.com/rapidsai/cuxfilter/pull/373)) [@Ethyling
](https://github.com/Ethyling
)
- Forward-merge branch-22.04 to branch-22.06 ([#370](https://github.com/rapidsai/cuxfilter/pull/370)) [@Ethyling
](https://github.com/Ethyling
)
- Use conda to build python packages during GPU tests ([#368](https://github.com/rapidsai/cuxfilter/pull/368)) [@Ethyling
](https://github.com/Ethyling
)
- Use conda compilers ([#351](https://github.com/rapidsai/cuxfilter/pull/351)) [@Ethyling
](https://github.com/Ethyling
)
- Build packages using mambabuild ([#347](https://github.com/rapidsai/cuxfilter/pull/347)) [@Ethyling](https://github.com/Ethyling)
# cuXfilter 22.04.00 (6 Apr 2022)
## 🐛 Bug Fixes
- update panel version ([#361](https://github.com/rapidsai/cuxfilter/pull/361)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Fix/examples ([#353](https://github.com/rapidsai/cuxfilter/pull/353)) [@AjayThorve](https://github.com/AjayThorve)
- Fix deprecated code changes of `cudf` ([#348](https://github.com/rapidsai/cuxfilter/pull/348)) [@galipremsagar](https://github.com/galipremsagar)
## 🛠️ Improvements
- Temporarily disable new `ops-bot` functionality ([#357](https://github.com/rapidsai/cuxfilter/pull/357)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update `bokeh` version ([#355](https://github.com/rapidsai/cuxfilter/pull/355)) [@ajschmidt8](https://github.com/ajschmidt8)
- Add `.github/ops-bot.yaml` config file ([#352](https://github.com/rapidsai/cuxfilter/pull/352)) [@ajschmidt8](https://github.com/ajschmidt8)
# cuXfilter 22.02.00 (2 Feb 2022)
## 🐛 Bug Fixes
- fix reinit function ([#345](https://github.com/rapidsai/cuxfilter/pull/345)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Documentation & Notebook updates ([#341](https://github.com/rapidsai/cuxfilter/pull/341)) [@AjayThorve](https://github.com/AjayThorve)
- Merge branch-21.12 into branch-22.02 ([#340](https://github.com/rapidsai/cuxfilter/pull/340)) [@AjayThorve](https://github.com/AjayThorve)
- Fix/remove custom extensions ([#324](https://github.com/rapidsai/cuxfilter/pull/324)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- adds layouts to in-notebook dashboards (via d.app()) similar to standalone web apps ([#324](https://github.com/rapidsai/cuxfilter/pull/324)) [@AjayThorve ](https://github.com/AjayThorve )
- enabled google colab and amazon sagemaker studio support for in-notebook dashboards ([#324](https://github.com/rapidsai/cuxfilter/pull/324)) [@AjayThorve ](https://github.com/AjayThorve )
- replace distutils.version class with packaging.version.Version ([#338](https://github.com/rapidsai/cuxfilter/pull/338)) [@AjayThorve](https://github.com/AjayThorve)
- Fix imports tests syntax ([#336](https://github.com/rapidsai/cuxfilter/pull/336)) [@Ethyling](https://github.com/Ethyling)
# cuXfilter 21.12.00 (9 Dec 2021)
## 📖 Documentation
- update docstrings examples to fix #328 ([#329](https://github.com/rapidsai/cuxfilter/pull/329)) [@AjayThorve](https://github.com/AjayThorve)
# cuXfilter 21.10.00 (7 Oct 2021)
## 🐛 Bug Fixes
- revert pyppeteer dependency changes ([#322](https://github.com/rapidsai/cuxfilter/pull/322)) [@AjayThorve](https://github.com/AjayThorve)
- Fix/unique names ([#317](https://github.com/rapidsai/cuxfilter/pull/317)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Branch 21.10 merge 21.08 ([#318](https://github.com/rapidsai/cuxfilter/pull/318)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- fix chart names being saved as incorrect keys prior to initialization ([#325](https://github.com/rapidsai/cuxfilter/pull/325)) [@AjayThorve](https://github.com/AjayThorve)
- Skip imports tests on arm64 ([#320](https://github.com/rapidsai/cuxfilter/pull/320)) [@Ethyling](https://github.com/Ethyling)
- ENH Replace gpuci_conda_retry with gpuci_mamba_retry ([#305](https://github.com/rapidsai/cuxfilter/pull/305)) [@dillon-cullinan](https://github.com/dillon-cullinan)
# cuXfilter 21.08.00 (Date TBD)
## 🐛 Bug Fixes
- Fix/follow up to #303 ([#304](https://github.com/rapidsai/cuxfilter/pull/304)) [@AjayThorve](https://github.com/AjayThorve)
- update pyproj version ([#302](https://github.com/rapidsai/cuxfilter/pull/302)) [@AjayThorve](https://github.com/AjayThorve)
## 🛠️ Improvements
- Fix/update bokeh version ([#303](https://github.com/rapidsai/cuxfilter/pull/303)) [@AjayThorve](https://github.com/AjayThorve)
- Fix `21.08` forward-merge conflicts ([#301](https://github.com/rapidsai/cuxfilter/pull/301)) [@ajschmidt8](https://github.com/ajschmidt8)
- Fix merge conflicts ([#290](https://github.com/rapidsai/cuxfilter/pull/290)) [@ajschmidt8](https://github.com/ajschmidt8)
# cuXfilter 21.06.00 (9 Jun 2021)
## 🛠️ Improvements
- Update `geopandas` version spec ([#292](https://github.com/rapidsai/cuxfilter/pull/292)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update environment variable used to determine `cuda_version` ([#289](https://github.com/rapidsai/cuxfilter/pull/289)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update `CHANGELOG.md` links for calver ([#287](https://github.com/rapidsai/cuxfilter/pull/287)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update docs build script ([#286](https://github.com/rapidsai/cuxfilter/pull/286)) [@ajschmidt8](https://github.com/ajschmidt8)
- support space in workspace ([#267](https://github.com/rapidsai/cuxfilter/pull/267)) [@jolorunyomi](https://github.com/jolorunyomi)
# cuXfilter 0.19.0 (21 Apr 2021)
## 🐛 Bug Fixes
- Bug fix ([#261](https://github.com//rapidsai/cuxfilter/pull/261)) [@AjayThorve](https://github.com/AjayThorve)
- Bug fixes ([#257](https://github.com//rapidsai/cuxfilter/pull/257)) [@AjayThorve](https://github.com/AjayThorve)
## 📖 Documentation
- Fea/sidebar api change ([#262](https://github.com//rapidsai/cuxfilter/pull/262)) [@AjayThorve](https://github.com/AjayThorve)
- Auto-merge branch-0.18 to branch-0.19 ([#237](https://github.com//rapidsai/cuxfilter/pull/237)) [@GPUtester](https://github.com/GPUtester)
## 🛠️ Improvements
- Update Changelog Link ([#258](https://github.com//rapidsai/cuxfilter/pull/258)) [@ajschmidt8](https://github.com/ajschmidt8)
- Prepare Changelog for Automation ([#253](https://github.com//rapidsai/cuxfilter/pull/253)) [@ajschmidt8](https://github.com/ajschmidt8)
- Update 0.18 changelog entry ([#252](https://github.com//rapidsai/cuxfilter/pull/252)) [@ajschmidt8](https://github.com/ajschmidt8)
- Fix merge conflicts in #233 ([#234](https://github.com//rapidsai/cuxfilter/pull/234)) [@ajschmidt8](https://github.com/ajschmidt8)
# cuXfilter 0.18.0 (24 Feb 2021)
## Bug Fixes 🐛
- Add static html (#238) @AjayThorve
## Documentation 📖
- Update docs (#236) @AjayThorve
## Improvements 🛠️
- Update stale GHA with exemptions & new labels (#247) @mike-wendt
- Add GHA to mark issues/prs as stale/rotten (#244) @Ethyling
- Pin Node version (#239) @ajschmidt8
- fix state preserving issue for lasso-select callbacks (#231) @AjayThorve
- Prepare Changelog for Automation (#229) @ajschmidt8
- New charts - Number & Card (#228) @AjayThorve
- Refactor themes (#227) @AjayThorve
- Updated templates using Panel template + React-grid-layout (#226) @AjayThorve
- Auto-label PRs based on their content (#223) @jolorunyomi
- Fix forward-merger conflicts for #218 (#221) @ajschmidt8
- Branch 0.18 merge 0.17 - fix auto merge conflicts (#219) @AjayThorve
# cuXfilter 0.17.0 (10 Dec 2020)
## New Features
- PR #208 Adds support for new dtype - datetime for all chart types except choropleths, Added new chart widget type - DateRangeSlider
## Improvements
- PR #208 refactor - merged BaseLine and BaseBar to BaseAggregate
- PR #215 cleand up gpuCI scripts
## Bug Fixes
- PR #209 remove deprecated cudf methods- `to_gpu_matrix`, `add_column` and groupby parameter `method`
- PR #212 remove redundant docs folders and files, removed bloated notebooks
- PR #214 fix map_style in choropleths, and fix custom_binning param issue in core_aggregate charts
- PR #216 fix dashboard._get_server preventing the dashboard function error for panel>=0.10.0
- PR #217 pin open-ended dependency versions
# cuXfilter 0.16.0 (21 Oct 2020)
## New Features
- PR #177 Add support for lasso selections
- PR #192 Added drop_duplicates for view_dataframe chart type
- PR #194 Added jupyterhub support
## Improvements
- PR #191 Update doc build script for CI
- PR #192 Optimize graph querying logic
- PR #193 Update ci/local/README.md
## Bug Fixes
- PR #190 fix conflicts related to auto-merge with branch-0.15
- PR #192 fixes issues with non_aggregate charts having permanent inplace querying, and query_by_indices
- PR #196 fixes issue with static http scheme applied for dashboard url, now picking scheme from base_url
- PR #198 Fix notebook error handling in gpuCI
- PR #199, #202 fix doc build issues
# cuXfilter 0.15.0 (08 Sep 2020)
## New Features
- PR #164 Added new Graph api, supports (nodes[cuDF], edges[cuDF]) input
- PR #168 Added legends to non_aggregate charts
## Improvements
- PR #158 Add docs build script
- PR #159 Layouts Refactor
- PR #160 Install dependencies via meta packages
- PR #162 Dashboard and templates cleanup and tests
- PR #163 Updated Bokeh version to 2.1.1, added pydeck support
- PR #168 Replaced interactive datashader callback throttling to debouncing
- PR #169 Added Node-Inspect Neighbor widget to graph charts
Added edge-curving
- PR #173 Updates to installation docs
- PR #180 Added documentation for deploying as a multi-user dashboard
## Bug Fixes
- PR #161 fixed layouts bugs
- PR #171 pydeck 0.4.1 fixes and geo_mapper optimizations
- PR #180 Datashader version pin fixing issues with cuDF 0.14+
- PR #186 syntax fixes to avoid CI failure
# cuxfilter 0.14.0 (03 Jun 2020)
## New Features
- PR #136 Local gpuCI build script
- PR #148 Added dask_cudf support to all charts
## Improvements
- PR #129 optimizations to grouby query, using boolean masks
- PR #135 implemented stateless non-aggregate querying
- PR #148 made groupby pre-computations consistent, made dashboard querying stateless
- PR #151 implmented autoscaling true/false for bar, line charts
add_chart now dynamically updates a running dashboard in real-time(page-refresh required)
- PR #155 Add git commit to conda package
## Bug Fixes
- PR #127 fixed logic for calculating datatiles for 2d and 3d choropleth charts
- PR #128, #130 Bug fixes and test updates
- PR #131 Filter fix for non aggregate charts(scatter, scattter-geo, line, stacked-lines, heatmap)
- PR #132 Aggregate filter accuracy fix
- PR #133 Added Nodejs dependency in build files
- PR #148 logic fixes to datatile compute and using vectorized operations instead of numba kernels for datatile compute
- PR #151 docs and minor bug fixes, also fixed dashboard server notebook issues
- PR #165 Fix issue with incorrect docker image being used in local build script
# cuxfilter 0.13.0 (31 March 2020)
## New Features
- PR #111 Add notebooks testing to CI
## Improvements
- PR #95 Faster import time, segregated in-notebook asset loading to save import costs, updated tests
- PR #114 Major refactor - added choropleth(2d and 3d) deckgl chart, updated chart import to skip library names. Major bug fixes
## Bug Fixes
- PR #100 Bug Fixes - Added NaN value handling for custom bin computations in numba kernels
- PR #104 Bug Fixes - fixed naming issue for geo column for choropleth3d charts, which did not allow all-small-caps names
- PR #112 - updated bokeh dependecy to be 1.* instead of >1
- PR #122 Critical bug fix - resolves rendering issue related to deckgl charts
# cuxfilter 0.12.0 (4 Feb 2020)
## New Features
- PR #111 Add notebooks testing to CI
## Improvements
- PR #84 Updated Docs and Readme with conda nightly install instructions for cuxfilter version 0.12
- PR #86 Implemented #79 - cudatashader replaced by datashader(>=0.9) with cudf & dask_cudf support
- PR #90 Implemented deck-gl_bokeh plugin and integrated with cuxfilter with layout and theme options
- PR #93 Added typescript bindings in conda build package and added tests
- PR #89 Fixed headless chrome sandbox for dashboard preview feature issue mentioned in #88
and added full support for deck.gl/polygon layer
- PR #87 Implemented jupyter-server-proxy as discussed in #73
## Bug Fixes
- PR #78 Fix gpuCI GPU build script
- PR #83 Fix conda upload
# cuxfilter 0.2.0 (19 Sep 2019)
## New Features
- Initial release of cuxfilter python package
## Improvements
- Massive refactor and architecture change compared to the js (client-server) architecture
## Bug Fixes
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/build.sh
|
#!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION.
# cuDF build script
# This script is used to build the component(s) in this repo from
# source, and can be called with various options to customize the
# build as needed (see the help output for details)
# Abort script on first error
set -e
NUMARGS=$#
ARGS=$*
# NOTE: ensure all dir changes are relative to the location of this
# script, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)
VALIDARGS="clean cuxfilter -v -g -n --allgpuarch -h"
HELP="$0 [clean] [cuxfilter] [-v] [-g] [-n] [-h]
clean - remove all existing build artifacts and configuration (start
over)
cuxfilter - build the cuxfilter library only
-v - verbose build mode
-g - build for debug
-n - no install step
--allgpuarch - build for all supported GPU architectures
-h - print this text
"
CUXFILTER_BUILD_DIR=${REPODIR}/python/cuxfilter/build
BUILD_DIRS="${CUXFILTER_BUILD_DIR}"
# Set defaults for vars modified by flags to this script
VERBOSE=""
BUILD_TYPE=Release
INSTALL_TARGET=install
BENCHMARKS=OFF
BUILD_ALL_GPU_ARCH=0
# Set defaults for vars that may not have been defined externally
# FIXME: if INSTALL_PREFIX is not set, check PREFIX, then check
# CONDA_PREFIX, but there is no fallback from there!
INSTALL_PREFIX=${INSTALL_PREFIX:=${PREFIX:=${CONDA_PREFIX}}}
PARALLEL_LEVEL=${PARALLEL_LEVEL:=""}
function hasArg {
(( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ")
}
if hasArg -h; then
echo "${HELP}"
exit 0
fi
# Check for valid usage
if (( ${NUMARGS} != 0 )); then
for a in ${ARGS}; do
if ! (echo " ${VALIDARGS} " | grep -q " ${a} "); then
echo "Invalid option: ${a}"
exit 1
fi
done
fi
# Process flags
if hasArg -v; then
VERBOSE=1
fi
if hasArg -g; then
BUILD_TYPE=Debug
fi
if hasArg -n; then
INSTALL_TARGET=""
fi
if hasArg --allgpuarch; then
BUILD_ALL_GPU_ARCH=1
fi
if hasArg benchmarks; then
BENCHMARKS="ON"
fi
# If clean given, run it prior to any other steps
if hasArg clean; then
# If the dirs to clean are mounted dirs in a container, the
# contents should be removed but the mounted dirs will remain.
# The find removes all contents but leaves the dirs, the rmdir
# attempts to remove the dirs but can fail safely.
for bd in ${BUILD_DIRS}; do
if [ -d ${bd} ]; then
find ${bd} -mindepth 1 -delete
rmdir ${bd} || true
fi
done
fi
if (( ${BUILD_ALL_GPU_ARCH} == 0 )); then
GPU_ARCH="-DGPU_ARCHS="
echo "Building for the architecture of the GPU in the system..."
else
GPU_ARCH="-DGPU_ARCHS=ALL"
echo "Building for *ALL* supported GPU architectures..."
fi
################################################################################
# Build and install the cuxfilter Python package
if (( ${NUMARGS} == 0 )) || hasArg cuxfilter; then
cd ${REPODIR}/python
echo "8"
if [[ ${INSTALL_TARGET} != "" ]]; then
python setup.py build_ext --inplace
python setup.py install --single-version-externally-managed --record=record.txt
else
python setup.py build_ext --inplace --library-dir=${LIBCUXFILTER_BUILD_DIR}
fi
fi
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/dependencies.yaml
|
# Dependency list for https://github.com/rapidsai/dependency-file-generator
files:
all:
output: conda
matrix:
cuda: ["11.8", "12.0"]
arch: [x86_64]
includes:
- build_wheels
- cudatoolkit
- checks
- docs
- notebook
- py_version
- run
- test_python
test_python:
output: none
includes:
- cudatoolkit
- py_version
- test_python
test_notebooks:
output: none
includes:
- cudatoolkit
- notebook
- py_version
checks:
output: none
includes:
- checks
- py_version
docs:
output: none
includes:
- cudatoolkit
- docs
- py_version
py_build:
output: pyproject
pyproject_dir: python
extras:
table: build-system
includes:
- build_wheels
py_run:
output: pyproject
pyproject_dir: python
extras:
table: project
includes:
- run
py_test:
output: pyproject
pyproject_dir: python
extras:
table: project.optional-dependencies
key: test
includes:
- test_python
channels:
- rapidsai
- rapidsai-nightly
- conda-forge
- nvidia
dependencies:
build_wheels:
common:
- output_types: pyproject
packages:
- wheel
- setuptools
cudatoolkit:
specific:
- output_types: conda
matrices:
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- matrix:
cuda: "11.8"
packages:
- cuda-version=11.8
- cudatoolkit
- matrix:
cuda: "11.5"
packages:
- cuda-version=11.5
- cudatoolkit
- matrix:
cuda: "11.4"
packages:
- cuda-version=11.4
- cudatoolkit
- matrix:
cuda: "11.2"
packages:
- cuda-version=11.2
- cudatoolkit
checks:
common:
- output_types: [conda, requirements]
packages:
- pre-commit
docs:
common:
- output_types: [conda, requirements]
packages:
- ipykernel
- ipython
- jupyter_sphinx
- nbsphinx
- numpydoc
- pandoc<=2.0.0 # We should check and fix all "<=" pinnings
- pydata-sphinx-theme
- recommonmark
- sphinx>=7.2.5
- sphinx_rtd_theme
- sphinx-markdown-tables
- sphinxcontrib-websupport
notebook:
common:
- output_types: [conda, requirements]
packages:
- ipython
- notebook>=0.5.0
- output_types: [conda]
packages:
- cugraph==23.12.*
- dask-cuda==23.12.*
py_version:
specific:
- output_types: conda
matrices:
- matrix:
py: "3.9"
packages:
- python=3.9
- matrix:
py: "3.10"
packages:
- python=3.10
- matrix:
packages:
- python>=3.9,<3.11
run:
common:
- output_types: [conda, requirements, pyproject]
packages:
- bokeh>=3.1
- cudf==23.12.*
- cuspatial==23.12.*
- dask-cudf==23.12.*
- datashader>=0.15
- geopandas>=0.11.0
- holoviews>=1.16.0
- jupyter-server-proxy
- numba>=0.57
- numpy>=1.21
- packaging
- panel>=1.0
- output_types: conda
packages:
- cupy>=12.0.0
- nodejs>=14
- libwebp
- output_types: [requirements, pyproject]
packages:
- cupy-cuda11x>=12.0.0
test_python:
common:
- output_types: [conda, requirements, pyproject]
packages:
- ipython
- pytest
- pytest-cov
- pytest-xdist
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/EXTERNAL_TESTS.md
|
# External Tests Workflow
This document provides an overview of the GitHub Actions workflow (`.github/workflows/test-external.yaml`) and associated script (`ci/test_external.sh`) for running external tests on specified Python libraries, such as Datashader and Holoviews.
## Purpose
The purpose of this workflow is to perform GPU testing on external party dependencies. It involes the following steps:
1. Create a Conda environment named `test_external`.
2. Install external dependencies specified in `ci/utils/external_dependencies.yaml`.
3. Clone specified Python libraries from their respective GitHub repositories.
4. Install test dependencies for each library.
5. Run GPU tests on the specified libraries using Pytest.
## Workflow Configuration
### Workflow Trigger
The workflow is triggered in two ways:
1. **Manual Trigger:** You can manually trigger the workflow by selecting the "GPU testing for external party dependencies" workflow and providing the following inputs:
- `external-project`: Specify the project to test (`datashader`, `holoviews`, or `all`).
- `pr_number`: (Optional) If testing a pull request, provide the PR number.
2. **Scheduled Trigger:** The workflow runs automatically every Sunday evening (Pacific Time) using a cron schedule (`0 0 * * 1`).
## Script (`test_external.sh`)
The script is responsible for setting up the Conda environment, installing dependencies, cloning specified Python libraries, and running GPU tests. Key steps in the script include:
1. **Create Conda Environment:** Creates a Conda environment named `test_external` and installs external dependencies from `external_dependencies.yaml`.
2. **Clone Repositories:** Clones GitHub repositories of specified Python libraries (`datashader`, `holoviews`, or both).
3. **Install Dependencies:** Installs test dependencies for each library using `python -m pip install -e .[tests]`.
4. **Run Tests:** Gathers GPU tests containing the keywords `cudf` and runs them using Pytest. The number of processes is set to 8 by default, but specific tests (`test_quadmesh.py`) are run separately.
## Running External Tests
To manually trigger the workflow and run external tests:
1. Navigate to the "Actions" tab in your GitHub repository.
2. Select "GPU testing for external party dependencies" workflow.
3. Click the "Run workflow" button.
4. Provide inputs for `external-project` and `pr_number` if needed.
## Contributing
Contributors can use this workflow to test changes in external libraries on the RAPIDS AI ecosystem. When contributing, follow these steps:
1. Make changes to the external library code.
2. Push the changes to your fork or branch.
3. Trigger the workflow manually by selecting the appropriate inputs.
For additional information, refer to the [GitHub Actions documentation](https://docs.github.com/en/actions).
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/CONTRIBUTING.md
|
# Contributing to cuxfilter
If you are interested in contributing to cuxfilter, your contributions will fall
into three categories:
1. You want to report a bug, feature request, or documentation issue
- File an [issue](https://github.com/rapidsai/cuxfilter/issues/new/choose)
describing what you encountered or what you want to see changed.
- The RAPIDS team will evaluate the issues and triage them, scheduling
them for a release. If you believe the issue needs priority attention
comment on the issue to notify the team.
2. You want to propose a new Feature and implement it
- Post about your intended feature, and we shall discuss the design and
implementation.
- Once we agree that the plan looks good, go ahead and implement it, using
the [code contributions](#code-contributions) guide below.
3. You want to implement a feature or bug-fix for an outstanding issue
- Follow the [code contributions](#code-contributions) guide below.
- If you need more context on a particular issue, please ask and we shall
provide.
## Code contributions
### Your first issue
1. Read the project's [README.md](https://github.com/rapidsai/cuxfilter/blob/main/README.md)
to learn how to setup the development environment
2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/cuxfilter/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
or [help wanted](https://github.com/rapidsai/cuxfilter/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels
3. Comment on the issue saying you are going to work on it
4. Code! Make sure to update unit tests!
5. When done, [create your pull request](https://github.com/rapidsai/cuxfilter/compare)
6. Verify that CI passes all [status checks](https://help.github.com/articles/about-status-checks/). Fix if needed
7. Wait for other developers to review your code and update code as needed
8. Once reviewed and approved, a RAPIDS developer will merge your pull request
Remember, if you are unsure about anything, don't hesitate to comment on issues
and ask for clarifications!
### Seasoned developers
Once you have gotten your feet wet and are more comfortable with the code, you
can look at the prioritized issues of our next release in our [project boards](https://github.com/rapidsai/cuxfilter/projects).
> **Pro Tip:** Always look at the release board with the highest number for
issues to work on. This is where RAPIDS developers also focus their efforts.
Look at the unassigned issues, and find an issue you are comfortable with
contributing to. Start with _Step 3_ from above, commenting on the issue to let
others know you are working on it. If you have any questions related to the
implementation of the issue, ask them in the issue instead of the PR.
## Attribution
Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md
## Setting up your build environment
The following instructions are for developers and contributors to cuxfilter OSS development. These instructions are tested on Linux Ubuntu 16.04 & 18.04. Use these instructions to build cuxfilter from source and contribute to its development. Other operating systems may be compatible, but are not currently tested.
### Code Formatting
#### Python
cuxfilter uses [Black](https://black.readthedocs.io/en/stable/) and
[flake8](http://flake8.pycqa.org/en/latest/) to ensure a consistent code format
throughout the project. `Black` and `flake8` can be installed with
`conda` or `pip`:
```bash
conda install black flake8
```
```bash
pip install black flake8
```
These tools are used to auto-format the Python code in the repository.
Additionally, there is a CI check in place to enforce
that committed code follows our standards. You can use the tools to
automatically format your python code by running:
```bash
black python/cuxfilter
```
and then check the syntax of your Python by running:
```bash
flake8 python/cuxfilter
```
Additionally, many editors have plugins that will apply `Black` as
you edit files, as well as use `flake8` to report any style / syntax issues.
Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com/)
to automatically run `Black`, and `flake8` when you make a git commit.
This can be done by installing `pre-commit` via `conda` or `pip`:
```bash
conda install -c conda-forge pre_commit
```
```bash
pip install pre-commit
```
and then running:
```bash
pre-commit install
```
from the root of the cuxfilter repository. Now `Black` and `flake8` will be
run each time you commit changes.
## Script to build cuxfilter from source
### Build from Source
To install cuxfilter from source, ensure the dependencies are met and follow the steps below:
- Clone the repository and submodules
```bash
CUXFILTER_HOME=$(pwd)/cuxfilter
git clone https://github.com/rapidsai/cuxfilter.git $CUXFILTER_HOME
cd $CUXFILTER_HOME
```
- Create the conda development environment `cuxfilter_dev`:
```bash
# create the conda environment (assuming in base `cuxfilter` directory)
conda env create --name cuxfilter_dev --file conda/environments/all_cuda-118_arch-x86_64.yaml
# activate the environment
source activate cuxfilter_dev
```
- Build the `cuxfilter` python packages, in the `python` folder:
```bash
$ cd $CUXFILTER_HOME/python
$ python setup.py install
```
- To run tests (Optional):
```bash
$ cd $CUXFILTER_HOME/python/cuxfilter/tests
$ pytest
```
Done! You are ready to develop for the cuxfilter OSS project
### Adding support for new viz libraries
cuxfilter.py acts like a connector library and it is easy to add support for new libraries. The cuxfilter/charts/core directory has all the core chart classes which can be inherited and used to implement a few (viz related) functions and support dashboarding in cuxfilter directly.
You can see the examples to implement viz libraries in the bokeh and datashader directories.
Current plan is to add support for the following libraries apart from bokeh and datashader:
1. deckgl
Open a feature request for requesting support for libraries other than the above mentioned ones.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos
|
rapidsai_public_repos/cuxfilter/VERSION
|
23.12.00
| 0 |
rapidsai_public_repos/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/pyproject.toml
|
# Copyright (c) 2023, NVIDIA CORPORATION.
[build-system]
build-backend = "setuptools.build_meta"
requires = [
"setuptools",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project]
name = "cuxfilter"
dynamic = ["version"]
description = "GPU accelerated cross filtering with cuDF"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{name = "NVIDIA Corporation"},
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
"bokeh>=3.1",
"cudf==23.12.*",
"cupy-cuda11x>=12.0.0",
"cuspatial==23.12.*",
"dask-cudf==23.12.*",
"datashader>=0.15",
"geopandas>=0.11.0",
"holoviews>=1.16.0",
"jupyter-server-proxy",
"numba>=0.57",
"numpy>=1.21",
"packaging",
"panel>=1.0",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.optional-dependencies]
test = [
"ipython",
"pytest",
"pytest-cov",
"pytest-xdist",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../dependencies.yaml and run `rapids-dependency-file-generator`.
[project.urls]
Homepage = "https://github.com/rapidsai/cuxfilter"
Documentation = "https://docs.rapids.ai/api/cuxfilter/stable/"
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic]
version = {file = "cuxfilter/VERSION"}
| 0 |
rapidsai_public_repos/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/README.md
|
# <div align="left"><img src="https://rapids.ai/assets/images/rapids_logo.png" width="90px"/> cuxfilter
cuxfilter ( ku-cross-filter ) is a [RAPIDS](https://github.com/rapidsai) framework to connect web visualizations to GPU accelerated crossfiltering. Inspired by the javascript version of the [original](https://github.com/crossfilter/crossfilter), it enables interactive and super fast multi-dimensional filtering of 100 million+ row tabular datasets via [cuDF](https://github.com/rapidsai/cudf).
## RAPIDS Viz
cuxfilter is one of the core projects of the “RAPIDS viz” team. Taking the axiom that “a slider is worth a thousand queries” from @lmeyerov to heart, we want to enable fast exploratory data analytics through an easier-to-use pythonic notebook interface.
As there are many fantastic visualization libraries available for the web, our general principle is not to create our own viz library, but to enhance others with faster acceleration, larger datasets, and better dev UX. **Basically, we want to take the headache out of interconnecting multiple charts to a GPU backend, so you can get to visually exploring data faster.**
By the way, cuxfilter is best used to interact with large (1 million+) tabular datasets. GPU’s are fast, but accessing that speedup requires some architecture overhead that isn’t worthwhile for small datasets.
For more detailed requirements, see below.
## cuxfilter Architecture
The current version of cuxfilter leverages jupyter notebook and bokeh server to reduce architecture and installation complexity.

### Open Source Projects
cuxfilter wouldn’t be possible without using these great open source projects:
- [Bokeh](https://docs.bokeh.org/en/latest/)
- [DataShader](http://datashader.org/)
- [Panel](https://panel.pyviz.org/)
- [Falcon](https://github.com/uwdata/falcon)
- [Jupyter](https://jupyter.org/about)
### Where is the original cuxfilter and Mortgage Viz Demo?
The original version (0.2) of cuxfilter, most known for the backend powering the Mortgage Viz Demo, has been moved into the [`GTC-2018-mortgage-visualization branch`](https://github.com/rapidsai/cuxfilter/tree/GTC-2018-mortgage-visualization) branch. As it has a much more complicated backend and javascript API, we’ve decided to focus more on the streamlined notebook focused version here.
## Usage
### Example 1
[](https://studiolab.sagemaker.aws/import/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/auto_accidents_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Studio Lab Environment-gray.svg">](./notebooks/README.md#amazon-sagemaker-studio-lab)
[](https://colab.research.google.com/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/auto_accidents_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Colab Environment-gray.svg">](./notebooks/README.md#google-colab)
```python
import cuxfilter
#update data_dir if you have downloaded datasets elsewhere
DATA_DIR = './data'
from cuxfilter.sampledata import datasets_check
datasets_check('auto_accidents', base_dir=DATA_DIR)
cux_df = cuxfilter.DataFrame.from_arrow(DATA_DIR+'/auto_accidents.arrow')
cux_df.data['ST_CASE'] = cux_df.data['ST_CASE'].astype('float64')
label_map = {1: 'Sunday', 2: 'Monday', 3: 'Tuesday', 4: 'Wednesday', 5: 'Thursday', 6: 'Friday', 7: 'Saturday', 9: 'Unknown'}
cux_df.data['DAY_WEEK_STR'] = cux_df.data.DAY_WEEK.map(label_map)
gtc_demo_red_blue_palette = [ "#3182bd", "#6baed6", "#7b8ed8", "#e26798", "#ff0068" , "#323232" ]
#declare charts
chart1 = cuxfilter.charts.scatter(x='dropoff_x', y='dropoff_y', aggregate_col='DAY_WEEK', aggregate_fn='mean',
color_palette=gtc_demo_red_blue_palette, tile_provider='CartoLight', unselected_alpha=0.2,
pixel_shade_type='linear')
chart2 = cuxfilter.charts.multi_select('YEAR')
chart3 = cuxfilter.charts.bar('DAY_WEEK_STR')
chart4 = cuxfilter.charts.bar('MONTH')
#declare dashboard
d = cux_df.dashboard([chart1, chart3, chart4], sidebar=[chart2], layout=cuxfilter.layouts.feature_and_double_base, title='Auto Accident Dataset')
# run the dashboard as a webapp:
# Bokeh and Datashader based charts also have a `save` tool on the side toolbar, which can download and save the individual chart when interacting with the dashboard.
# d.show('jupyter-notebook/lab-url')
#run the dashboard within the notebook cell
d.app()
```

### Example 2
[](https://studiolab.sagemaker.aws/import/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/Mortgage_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Studio Lab Environment-gray.svg">](./notebooks/README.md#amazon-sagemaker-studio-lab)
[](https://colab.research.google.com/github/rapidsai/cuxfilter/blob/branch-22.02/notebooks/Mortgage_example.ipynb) [<img src="https://img.shields.io/badge/-Setup Colab Environment-gray.svg">](./notebooks/README.md#google-colab)
```python
import cuxfilter
#update data_dir if you have downloaded datasets elsewhere
DATA_DIR = './data'
from cuxfilter.sampledata import datasets_check
datasets_check('mortgage', base_dir=DATA_DIR)
cux_df = cuxfilter.DataFrame.from_arrow(DATA_DIR + '/146M_predictions_v2.arrow')
geoJSONSource='https://raw.githubusercontent.com/rapidsai/cuxfilter/GTC-2018-mortgage-visualization/javascript/demos/GTC%20demo/src/data/zip3-ms-rhs-lessprops.json'
chart0 = cuxfilter.charts.choropleth( x='zip', color_column='delinquency_12_prediction', color_aggregate_fn='mean',
elevation_column='current_actual_upb', elevation_factor=0.00001, elevation_aggregate_fn='sum',
geoJSONSource=geoJSONSource
)
chart2 = cuxfilter.charts.bar('delinquency_12_prediction',data_points=50)
chart3 = cuxfilter.charts.range_slider('borrower_credit_score',data_points=50)
chart1 = cuxfilter.charts.drop_down('dti')
#declare dashboard
d = cux_df.dashboard([chart0, chart2],sidebar=[chart3, chart1], layout=cuxfilter.layouts.feature_and_double_base,theme = cuxfilter.themes.dark, title='Mortgage Dashboard')
# run the dashboard within the notebook cell
# Bokeh and Datashader based charts also have a `save` tool on the side toolbar, which can download and save the individual chart when interacting with the dashboard.
# d.app()
#run the dashboard as a webapp:
# if running on a port other than localhost:8888, run d.show(jupyter-notebook-lab-url:port)
d.show()
```

## Documentation
Full documentation can be found [on the RAPIDS docs page](https://docs.rapids.ai/api/cuxfilter/stable/).
Troubleshooting help can be found [on our troubleshooting page](https://docs.rapids.ai/api/cuxfilter/stable/installation.html#troubleshooting).
## General Dependencies
- python
- cudf
- datashader
- cupy
- panel
- bokeh
- pyproj
- geopandas
- pyppeteer
- jupyter-server-proxy
## Quick Start
Please see the [Demo Docker Repository](https://hub.docker.com/r/rapidsai/rapidsai/), choosing a tag based on the NVIDIA CUDA version you’re running. This provides a ready to run Docker container with example notebooks and data, showcasing how you can utilize cuxfilter, cuDF and other RAPIDS libraries.
## Installation
### CUDA/GPU requirements
- CUDA 11.2+
- NVIDIA driver 450.80.02+
- Pascal architecture or better (Compute Capability >=6.0)
### Conda
cuxfilter can be installed with conda ([miniconda](https://conda.io/miniconda.html), or the full [Anaconda distribution](https://www.anaconda.com/download)) from the `rapidsai` channel:
For nightly version `cuxfilter version == 23.12` :
```bash
# for CUDA 12.0
conda install -c rapidsai-nightly -c conda-forge -c nvidia \
cuxfilter=23.12 python=3.10 cuda-version=12.0
# for CUDA 11.8
conda install -c rapidsai-nightly -c conda-forge -c nvidia \
cuxfilter=23.12 python=3.10 cuda-version=11.8
```
For the stable version of `cuxfilter` :
```bash
# for CUDA 12.0
conda install -c rapidsai -c conda-forge -c nvidia \
cuxfilter python=3.10 cuda-version=12.0
# for CUDA 11.8
conda install -c rapidsai -c conda-forge -c nvidia \
cuxfilter python=3.10 cuda-version=11.8
```
Note: cuxfilter is supported only on Linux, and with Python versions 3.8 and later.
### PyPI
Install cuxfilter from PyPI using pip:
```bash
# for CUDA 12.0
pip install cuxfilter-cu12 -extra-index-url=https://pypi.nvidia.com
# for CUDA 11.8
pip install cuxfilter-cu11 -extra-index-url=https://pypi.nvidia.com
```
See the [Get RAPIDS version picker](https://rapids.ai/start.html) for more OS and version info.
### Build/Install from Source
See [build instructions](CONTRIBUTING.md#setting-up-your-build-environment).
## Troubleshooting
**bokeh server in jupyter lab**
To run the bokeh server in a jupyter lab, install jupyterlab dependencies
```bash
conda install -c conda-forge jupyterlab
jupyter labextension install @pyviz/jupyterlab_pyviz
jupyter labextension install jupyterlab_bokeh
```
## Download Datasets
1. Auto download datasets
The notebooks inside `python/notebooks` already have a check function which verifies whether the example dataset is downloaded, and downloads it if it's not.
2. Download manually
While in the directory you want the datasets to be saved, execute the following
> Note: Auto Accidents dataset has corrupted coordinate data from the years 2012-2014
```bash
#go the the environment where cuxfilter is installed. Skip if in a docker container
source activate test_env
#download and extract the datasets
curl https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2015-01.csv --create-dirs -o ./nyc_taxi.csv
curl https://data.rapids.ai/viz-data/146M_predictions_v2.arrow.gz --create-dirs -o ./146M_predictions_v2.arrow.gz
curl https://data.rapids.ai/viz-data/auto_accidents.arrow.gz --create-dirs -o ./auto_accidents.arrow.gz
python -c "from cuxfilter.sampledata import datasets_check; datasets_check(base_dir='./')"
```
## Guides and Layout Templates
Currently supported layout templates and example code can be found on the [layouts page](https://rapidsai.github.io/cuxfilter/layouts/Layouts.html).
### Currently Supported Charts
| Library | Chart type |
| ------------- | ------------------------------------------------------------------------------------------------ |
| bokeh | bar |
| datashader | scatter, scatter_geo, line, stacked_lines, heatmap, graph |
| panel_widgets | range_slider, date_range_slider, float_slider, int_slider, drop_down, multi_select, card, number |
| custom | view_dataframe |
| deckgl | choropleth(3d and 2d) |
## Contributing Developers Guide
cuxfilter acts like a connector library and it is easy to add support for new libraries. The `python/cuxfilter/charts/core` directory has all the core chart classes which can be inherited and used to implement a few (viz related) functions and support dashboarding in cuxfilter directly.
You can see the examples to implement viz libraries in the bokeh and cudatashader directories. Let us know if you would like to add a chart by opening a feature request issue or submitting a PR.
For more details, check out the [contributing guide](./CONTRIBUTING.md).
## Future Work
cuxfilter development is in early stages and on going. See what we are planning next on the [projects page](https://github.com/rapidsai/cuxfilter/projects).
| 0 |
rapidsai_public_repos/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/setup.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from setuptools import find_packages, setup
packages = find_packages(
include=["cuxfilter", "cuxfilter.*"],
exclude=("tests", "docs", "notebooks"),
)
setup(
packages=packages,
package_data={key: ["VERSION"] for key in packages},
zip_safe=False,
)
| 0 |
rapidsai_public_repos/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/.coveragerc
|
# Configuration file for Python coverage tests
[run]
source = cuxfilter
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/dataframe.py
|
import cudf
import dask_cudf
import pyarrow as pa
from typing import Type
from cuxfilter.dashboard import DashBoard
from cuxfilter.layouts import single_feature
from cuxfilter.themes import default
from cuxfilter.assets import notebook_assets
def read_arrow(source):
# print('reading arrow file as arrow table from disk')
reader = pa.RecordBatchStreamReader(source)
pa_df = reader.read_all()
return pa_df
# class DataFrame:
class DataFrame:
"""
A cuxfilter GPU DataFrame object
"""
data: Type[cudf.DataFrame] = None
is_graph = False
edges: Type[cudf.DataFrame] = None
@classmethod
def from_arrow(cls, dataframe_location):
"""
read an arrow file from disk as cuxfilter.DataFrame
Parameters
----------
dataframe_location: str or arrow in-memory table
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe as an arrow file from disk
>>> import cuxfilter
>>> import pyarrow as pa
>>> # create a temporary arrow table
>>> arrowTable = pa.Table.from_arrays([['foo', 'bar']], names=['name'])
>>> # read arrow table, can also ready .arrow file paths directly
>>> cux_df = cuxfilter.DataFrame.from_arrow(df)
"""
if isinstance(dataframe_location, str):
df = cudf.DataFrame.from_arrow(read_arrow(dataframe_location))
else:
df = cudf.DataFrame.from_arrow(dataframe_location)
return cls(df)
@classmethod
def from_dataframe(cls, dataframe):
"""
create a cuxfilter.DataFrame from cudf.DataFrame/dask_cudf.DataFrame
(zero-copy reference)
Parameters
----------
dataframe_location: cudf.DataFrame or dask_cudf.DataFrame
Returns
-------
cuxfilter.DataFrame object
Examples
--------
Read dataframe from a cudf.DataFrame/dask_cudf.DataFrame
>>> import cuxfilter
>>> import cudf
>>> cudf_df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(cudf_df)
"""
return cls(dataframe)
@classmethod
def load_graph(cls, graph):
"""
create a cuxfilter.DataFrame from cudf.DataFrame/dask_cudf.DataFrame
(zero-copy reference) from a graph object
Parameters
----------
tuple object (nodes, edges) where nodes and edges are cudf DataFrames
Returns
-------
cuxfilter.DataFrame object
Examples
--------
load graph from cugraph object
>>> import cuxfilter
>>> import cudf, cugraph
>>> edges = cudf.DataFrame(
>>> {
>>> 'source': [0, 1, 2, 3, 4],
>>> 'target':[0,1,2,3,4],
>>> 'weight':[4,4,2,6,7],
>>> }
>>> )
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(edges, destination='target')
>>> cux_df = cuxfilter.DataFrame.load_graph((G.nodes(), G.edges()))
load graph from (nodes, edges)
>>> import cuxfilter
>>> import cudf
>>> nodes = cudf.DataFrame(
>>> {
>>> 'vertex': [0, 1, 2, 3, 4],
>>> 'x':[0,1,2,3,4],
>>> 'y':[4,4,2,6,7],
>>> 'attr': [0,1,1,1,1]
>>> }
>>> )
>>> edges = cudf.DataFrame(
>>> {
>>> 'source': [0, 1, 2, 3, 4],
>>> 'target':[0,1,2,3,4],
>>> 'weight':[4,4,2,6,7],
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.load_graph((nodes,edges))
"""
if isinstance(graph, tuple):
nodes, edges = graph
df = cls(nodes)
df.is_graph = True
df.edges = edges
return df
raise ValueError(
"Expected value for graph - (nodes[cuDF], edges[cuDF])"
)
def __init__(self, data):
self.data = data
def validate_dask_index(self, data):
if isinstance(data, dask_cudf.DataFrame) and not (
data.known_divisions
):
return data.set_index(data.index.to_series(), npartitions=2)
return data
def preprocess_data(self):
self.data = self.validate_dask_index(self.data)
if self.is_graph:
self.edges = self.validate_dask_index(self.edges)
def dashboard(
self,
charts: list,
sidebar: list = [],
layout=single_feature,
theme=default,
title="Dashboard",
data_size_widget=True,
warnings=False,
layout_array=None,
):
"""
Creates a cuxfilter.DashBoard object
Parameters
----------
charts: list
list of cuxfilter.charts
layout: cuxfilter.layouts
theme: cuxfilter.themes, default cuxfilter.themes.default.
title: str
title of the dashboard, default "Dashboard"
data_size_widget: boolean
flag to determine whether to diplay the current datapoints
selected in the dashboard, default True
warnings: boolean
flag to disable or enable runtime warnings related to layouts,
default False
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> # create a dashboard object
>>> d = cux_df.dashboard([line_chart_1])
Returns
-------
cuxfilter.DashBoard object
"""
if notebook_assets.pn.config.js_files == {}:
notebook_assets.load_notebook_assets()
return DashBoard(
charts=charts,
sidebar=sidebar,
dataframe=self,
layout=layout,
theme=theme,
title=title,
data_size_widget=data_size_widget,
show_warnings=warnings,
layout_array=layout_array,
)
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/default_charts.json
|
{
"bar":"bokeh",
"line":"altair",
"scatter": "datashader",
"choropleth": "bokeh"
}
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/_version.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.resources
__version__ = (
importlib.resources.files("cuxfilter")
.joinpath("VERSION")
.read_text()
.strip()
)
__git_commit__ = ""
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/__init__.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from cuxfilter.dataframe import DataFrame
from cuxfilter.dashboard import DashBoard
from cuxfilter._version import __git_commit__, __version__
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/dashboard.py
|
from typing import Dict, Union
import bokeh.embed.util as u
import cudf
import dask_cudf
import panel as pn
from panel.io.server import get_server
from bokeh.embed import server_document
import os
import urllib
import warnings
from collections import Counter
from cuxfilter.charts.core import BaseChart, BaseWidget, ViewDataFrame
from cuxfilter.layouts import single_feature
from cuxfilter.charts.panel_widgets import data_size_indicator
from cuxfilter.assets import get_open_port, cudf_utils
from cuxfilter.themes import default
DEFAULT_NOTEBOOK_URL = "http://localhost:8888"
def _get_host(url):
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme not in ["http", "https"]:
raise ValueError("url should contain protocol(http or https)")
return parsed_url
def _create_dashboard_url(notebook_url: str, port: int, service_proxy=None):
service_url_path = ""
notebook_url = f"{notebook_url.scheme}://{notebook_url.netloc}"
if service_proxy == "jupyterhub":
if "JUPYTERHUB_SERVICE_PREFIX" not in os.environ:
raise EnvironmentError(
"JUPYTERHUB_SERVICE_PREFIX environment variable "
+ "not set, service_proxy=jupyterhub will only work "
+ "in a jupyterhub environment"
)
service_url_path = os.environ["JUPYTERHUB_SERVICE_PREFIX"]
proxy_url_path = "proxy/%d/" % port
user_url = urllib.parse.urljoin(notebook_url, service_url_path)
full_url = urllib.parse.urljoin(user_url, proxy_url_path)
return full_url
class DuplicateChartsWarning(Warning):
...
def _check_if_duplicates(charts):
_charts = [i.name for i in charts]
dups = [k for k, v in Counter(_charts).items() if v > 1]
if len(dups) > 0:
warnings.warn(
(
f"{dups} \n Only unique chart names "
"are supported, please provide a unique title parameter to "
"each chart"
),
DuplicateChartsWarning,
)
class DashBoard:
"""
A cuxfilter GPU DashBoard object.
Examples
--------
Create a dashboard
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh, panel_widgets
>>> df = cudf.DataFrame(
>>> {'key': [0, 1, 2, 3, 4], 'val':[float(i + 10) for i in range(5)]}
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> line_chart_2 = bokeh.bar(
>>> 'val', 'key', data_points=5, add_interaction=False
>>> )
>>> sidebar_widget = panel_widgets.card("test")
>>> d = cux_df.dashboard(charts=[line_chart_1, line_chart_2],
>>> sidebar=[sidebar_widget])
>>> d
`cuxfilter DashBoard
[title] Markdown(str)
[chart0] Markdown(str, sizing_mode='stretch_both'), ['nav'])
[chart1] Column(sizing_mode='scale_both', width=1600)
[0] Bokeh(Figure)
[chart2] Column(sizing_mode='scale_both', width=1600)
[0] Bokeh(Figure)`
>>> # d.app() for serving within notebook cell,
>>> # d.show() for serving as a separate web-app
>>> d.app() #or d.show()
displays interactive dashboard
do some visual querying/ crossfiltering
"""
_charts: Dict[str, Union[BaseChart, BaseWidget, ViewDataFrame]]
_query_str_dict: Dict[str, str]
_query_local_variables_dict = {}
_dashboard = None
_theme = None
_notebook_url = DEFAULT_NOTEBOOK_URL
_current_server_type = "show"
_layout_array = None
server = None
@property
def queried_indices(self):
"""
Read-only propery queried_indices returns a merged index
of all queried index columns present in self._query_str_dict
as a cudf.Series.
Returns None if no index columns are present.
:meta private:
"""
result = None
df_module = (
cudf
if isinstance(self._cuxfilter_df.data, cudf.DataFrame)
else dask_cudf
)
selected_indices = {
key: value
for (key, value) in self._query_str_dict.items()
if type(value) in [cudf.DataFrame, dask_cudf.DataFrame]
}
if len(selected_indices) > 0:
result = (
df_module.concat(list(selected_indices.values()), axis=1)
.fillna(False)
.all(axis=1)
)
return result
def __init__(
self,
charts=[],
sidebar=[],
dataframe=None,
layout=single_feature,
theme=default,
title="Dashboard",
data_size_widget=True,
show_warnings=False,
layout_array=None,
):
self._cuxfilter_df = dataframe
self._charts = dict()
self._sidebar = dict()
self._query_str_dict = dict()
# check if charts and sidebar lists contain cuxfilter.charts with
# duplicate names
_check_if_duplicates(charts)
_check_if_duplicates(sidebar)
# widgets can be places both in sidebar area AND chart area
# but charts cannot be placed in the sidebar area due to size
# and resolution constraints
# process all main dashboard charts
for chart in charts:
chart.initiate_chart(self)
chart._initialized = True
self._charts[chart.name] = chart
# add data_size_indicator to sidebar if data_size_widget=True
if data_size_widget:
sidebar.insert(0, data_size_indicator(title_size="14pt"))
# process all sidebar widgets
for chart in sidebar:
if chart.is_widget:
chart.initiate_chart(self)
chart._initialized = True
self._sidebar[chart.name] = chart
self.title = title
self._dashboard = layout()
self._theme = theme
self._layout_array = layout_array
# handle dashboard warnings
if not show_warnings:
u.log.disabled = True
warnings.filterwarnings("ignore")
else:
u.log.disabled = False
warnings.filterwarnings("default")
@property
def charts(self):
"""
Charts in the dashboard as a dictionary.
"""
return {**self._charts, **self._sidebar}
def add_charts(self, charts=[], sidebar=[]):
"""
Adding more charts to the dashboard, after it has been initialized.
Parameters
----------
charts: list
list of cuxfilter.charts objects
sidebar: list
list of cuxfilter.charts.panel_widget objects
Notes
-----
After adding the charts, refresh the dashboard app
tab to see the updated charts. Charts of type widget cannot be added
to sidebar but widgets can be added to charts(main layout)
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh, panel_widgets
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> d = cux_df.dashboard([line_chart_1])
>>> line_chart_2 = bokeh.bar(
>>> 'val', 'key', data_points=5, add_interaction=False
>>> )
>>> d.add_charts(charts=[line_chart_2])
>>> # or
>>> d.add_charts(charts=[], sidebar=[panel_widgets.card("test")])
"""
if len(charts) > 0 or len(sidebar) > 0:
for chart in charts:
if chart not in self._charts:
self._charts[chart.name] = chart
for chart in sidebar:
if chart not in self._sidebar and chart.is_widget:
self._sidebar[chart.name] = chart
self._reinit_all_charts()
self._restart_current_server()
def _restart_current_server(self):
if self.server is not None:
self.stop()
getattr(self, self._current_server_type)(
notebook_url=self._notebook_url, port=self.server.port
)
def _reinit_all_charts(self):
self._query_str_dict = dict()
for chart in self.charts.values():
chart.initiate_chart(self)
chart._initialized = True
def _query(self, query_str):
"""
Query the cudf.DataFrame
"""
# filter the source data with current queries: indices and query strs
return cudf_utils.query_df(
self._cuxfilter_df.data,
query_str,
self._query_local_variables_dict,
self.queried_indices,
)
def _generate_query_str(self, query_dict=None, ignore_chart=""):
"""
Generate query string based on current crossfiltered state of
the dashboard.
"""
popped_value = None
query_dict = query_dict or self._query_str_dict
if (
isinstance(ignore_chart, (BaseChart, BaseWidget, ViewDataFrame))
and len(ignore_chart.name) > 0
and ignore_chart.name in query_dict
):
popped_value = query_dict.pop(ignore_chart.name, None)
# extract string queries from query_dict,
# as self.query_dict also contains cudf.Series indices
str_queries_list = [x for x in query_dict.values() if type(x) == str]
return_query_str = " and ".join(str_queries_list)
# adding the popped value to the query_str_dict again
if popped_value is not None:
query_dict[ignore_chart.name] = popped_value
return return_query_str
def export(self):
"""
Export the cudf.DataFrame based on the current filtered state of
the dashboard.
Also prints the query string of the current state of the dashboard.
Returns
-------
cudf.DataFrame based on the current filtered state of the dashboard.
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> line_chart_2 = bokeh.bar(
>>> 'val', 'key', data_points=5, add_interaction=False
>>> )
>>> d = cux_df.dashboard(
>>> [line_chart_1, line_chart_2],
>>> layout=cuxfilter.layouts.double_feature
>>> )
>>> # d.app() for serving within notebook cell,
>>> # d.show() for serving as a separate web-app
>>> d.app() #or d.show()
displays interactive dashboard
>>> queried_df = d.export()
final query 2<=key<=4
"""
if (
len(self._generate_query_str()) > 0
or self.queried_indices is not None
):
print("final query", self._generate_query_str())
if self.queried_indices is not None:
print("polygon selected using lasso selection tool")
return self._query(self._generate_query_str())
else:
print("no querying done, returning original dataframe")
return self._cuxfilter_df.data
def __str__(self):
return self.__repr__()
def __repr__(self):
template_obj = self._dashboard.generate_dashboard(
title=self.title,
charts=self._charts,
sidebar=self._sidebar,
theme=self._theme,
layout_array=self._layout_array,
)
cls = "#### cuxfilter " + type(self).__name__
spacer = "\n "
objs = [
"[%d] %s" % (i, obj.__repr__(1))
for i, obj in enumerate(template_obj)
]
template = "{cls}{spacer}{spacer}{objs}"
return template.format(
cls=cls, objs=("%s" % spacer).join(objs), spacer=spacer
)
def _repr_mimebundle_(self, include=None, exclude=None):
str_repr = self.__repr__()
server_info = pn.pane.HTML("")
return pn.Column(str_repr, server_info, width=800)._repr_mimebundle_(
include, exclude
)
def _get_server(
self,
panel=None,
port=0,
websocket_origin=None,
loop=None,
show=False,
start=False,
**kwargs,
):
server = get_server(
panel=panel,
port=port,
websocket_origin=websocket_origin,
loop=loop,
show=show,
start=start,
title=self.title,
**kwargs,
)
server_document(websocket_origin, resources=None)
return server
def app(self, sidebar_width=280, width=1200, height=800):
"""
Run the dashboard with a bokeh backend server within the notebook.
Parameters
----------
sidebar_width: int, optional, default 280
width of the sidebar in pixels
width: int, optional, default 1200
width of the dashboard in pixels
height: int, optional, default 800
height of the dashboard in pixels
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> d = cux_df.dashboard([line_chart_1])
>>> d.app(sidebar_width=200, width=1000, height=450)
"""
self._reinit_all_charts()
self._current_server_type = "app"
return self._dashboard.generate_dashboard(
title=self.title,
charts=self._charts,
sidebar=self._sidebar,
theme=default if self._theme is not None else None,
layout_array=self._layout_array,
render_location="notebook",
sidebar_width=sidebar_width,
width=width,
height=height,
)
def show(
self,
notebook_url=DEFAULT_NOTEBOOK_URL,
port=0,
threaded=False,
service_proxy=None,
sidebar_width=280,
height=800,
**kwargs,
):
"""
Run the dashboard with a bokeh backend server within the notebook.
Parameters
----------
notebook_url: str, optional, default localhost:8888
- URL where you want to run the dashboard as a web-app,
including the port number.
- Can use localhost instead of ip if running locally.
port: int, optional
Has to be an open port
service_proxy: str, optional, default None,
available options: jupyterhub
threaded: boolean, optional, default False
whether to run the server in threaded mode
sidebar_width: int, optional, default 280
width of the sidebar in pixels
height: int, optional, default 800
height of the dashboard in pixels
**kwargs: dict, optional
additional keyword arguments to pass to the server
Examples
--------
>>> import cudf
>>> import cuxfilter
>>> from cuxfilter.charts import bokeh
>>> df = cudf.DataFrame(
>>> {
>>> 'key': [0, 1, 2, 3, 4],
>>> 'val':[float(i + 10) for i in range(5)]
>>> }
>>> )
>>> cux_df = cuxfilter.DataFrame.from_dataframe(df)
>>> line_chart_1 = bokeh.line(
>>> 'key', 'val', data_points=5, add_interaction=False
>>> )
>>> d = cux_df.dashboard([line_chart_1])
>>> d.show(url='localhost:8889')
"""
if self.server is not None:
if self.server._started:
self.stop()
self._reinit_all_charts()
self._notebook_url = _get_host(notebook_url)
if port == 0:
port = get_open_port()
dashboard_url = _create_dashboard_url(
self._notebook_url, port, service_proxy
)
print("Dashboard running at port " + str(port))
panel = self._dashboard.generate_dashboard(
title=self.title,
charts=self._charts,
sidebar=self._sidebar,
theme=self._theme,
layout_array=self._layout_array,
render_location="web-app",
sidebar_width=sidebar_width,
height=height,
)
try:
self.server = self._get_server(
panel=panel,
port=port,
websocket_origin=self._notebook_url.netloc,
show=False,
start=True,
threaded=threaded,
sidebar_width=sidebar_width,
height=height,
**kwargs,
)
except OSError:
self.server.stop()
self.server = self._get_server(
panel=panel,
port=port,
websocket_origin=self._notebook_url.netloc,
show=False,
start=True,
threaded=threaded,
**kwargs,
)
self._current_server_type = "show"
b = pn.widgets.Button(
name="open cuxfilter dashboard", button_type="success"
)
b.js_on_click(
args={"target": dashboard_url}, code="window.open(target)"
)
return pn.Row(b)
def stop(self):
"""
stop the bokeh server
"""
if self.server._stopped is False:
self.server.stop()
self.server._started = False
self.server._stopped = True
self.server._tornado.stop()
def _reload_charts(self, data=None, include_cols=[], ignore_cols=[]):
"""
Reload charts with current self._cuxfilter_df.data state.
"""
if data is None:
# get current data as per the active queries
data = self._query(self._generate_query_str())
if len(include_cols) == 0:
include_cols = self.charts.keys()
# reloading charts as per current data state
for chart in self.charts.values():
if (
chart.name not in ignore_cols
and chart.name in include_cols
and hasattr(chart, "reload_chart")
):
chart.reload_chart(data)
| 0 |
rapidsai_public_repos/cuxfilter/python
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/VERSION
|
23.12.00
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/themes/rapids.py
|
from bokeh import palettes
from panel.theme.fast import FastDarkTheme, FastDefaultTheme, FastStyle
class RapidsDefaultTheme(FastDefaultTheme):
style = FastStyle(
header_background="#8735fb",
)
map_style = "mapbox://styles/mapbox/light-v9"
map_style_without_token = (
"https://basemaps.cartocdn.com/gl/positron-gl-style/style.json"
)
color_palette = list(palettes.Purples[9])
chart_color = "#8735fb"
datasize_indicator_class = "#8735fb"
class RapidsDarkTheme(FastDarkTheme):
style = FastStyle(
background_color="#181818",
color="#ffffff",
header_background="#1c1c1c",
luminance=0.1,
neutral_fill_card_rest="#212121",
neutral_focus="#717171",
neutral_foreground_rest="#e5e5e5",
shadow=False,
)
map_style = "mapbox://styles/mapbox/dark-v9"
map_style_without_token = (
"https://basemaps.cartocdn.com/gl/dark-matter-gl-style/style.json"
)
color_palette = list(palettes.Purples[9])
chart_color = "#8735fb"
datasize_indicator_class = "#8735fb"
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/themes/default.py
|
from bokeh import palettes
from panel.theme.fast import FastDefaultTheme, FastDarkTheme, FastStyle
class LightTheme(FastDefaultTheme):
map_style = "mapbox://styles/mapbox/light-v9"
map_style_without_token = (
"https://basemaps.cartocdn.com/gl/positron-gl-style/style.json"
)
color_palette = list(palettes.Blues[9])
chart_color = "#4292c6"
datasize_indicator_class = "#4292c6"
class CustomDarkTheme(FastDarkTheme):
style = FastStyle(
background_color="#181818",
color="#ffffff",
header_background="#1c1c1c",
luminance=0.1,
neutral_fill_card_rest="#212121",
neutral_focus="#717171",
neutral_foreground_rest="#e5e5e5",
)
map_style = "mapbox://styles/mapbox/dark-v9"
map_style_without_token = (
"https://basemaps.cartocdn.com/gl/dark-matter-gl-style/style.json"
)
color_palette = list(palettes.Blues[9])
chart_color = "#4292c6"
datasize_indicator_class = "#4292c6"
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/themes/__init__.py
|
from .default import LightTheme as default, CustomDarkTheme as dark
from .rapids import (
RapidsDefaultTheme as rapids,
RapidsDarkTheme as rapids_dark,
)
__all__ = ["default", "dark", "rapids", "rapids_dark"]
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/constants.py
|
import numpy as np
from bokeh.palettes import Viridis10
BOOL_MAP = {0: "False", 1: "True"}
CUXF_NAN_COLOR = "#d3d3d3"
CUXF_DEFAULT_COLOR_PALETTE = list(Viridis10)
CUDF_DATETIME_TYPES = tuple(
f"datetime64[{i}]" for i in ["s", "ms", "us", "ns"]
) + (np.datetime64,)
CUDF_TIMEDELTA_TYPE = np.timedelta64
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/__init__.py
|
from .bokeh import bar
from .core import view_dataframe
from .constants import *
from .datashader import line, scatter, stacked_lines, heatmap, graph
from .deckgl import choropleth
from .panel_widgets import (
card,
data_size_indicator,
drop_down,
date_range_slider,
float_slider,
int_slider,
multi_select,
number,
range_slider,
)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/core_view_dataframe.py
|
import panel as pn
import holoviews as hv
import logging
import dask_cudf
from panel.config import panel_extension
css = """
.dataframe table{
border: none;
}
.panel-df table{
width: 100%;
border-collapse: collapse;
border: none;
}
.panel-df td{
white-space: nowrap;
overflow: auto;
text-overflow: ellipsis;
}
"""
pn.config.raw_css += [css]
class ViewDataFrame:
columns = None
chart = None
source = None
use_data_tiles = False
drop_duplicates = False
_initialized = False
# widget=False can only be rendered the main layout
is_widget = False
title = "Dataset View"
def __init__(
self,
columns=None,
drop_duplicates=False,
force_computation=False,
):
self.columns = columns
self.drop_duplicates = drop_duplicates
self.force_computation = force_computation
@property
def name(self):
return f"{self.chart_type}_{self.columns}"
def initiate_chart(self, dashboard_cls):
data = dashboard_cls._cuxfilter_df.data
if isinstance(data, dask_cudf.core.DataFrame):
if self.force_computation:
self.generate_chart(data.compute())
else:
print(
"displaying only 1st partitions top 1000 rows for ",
"view_dataframe - dask_cudf to avoid partition based ",
"computation use force_computation=True for viewing ",
"top-level view of entire DataFrame. ",
"Warning - would slow the dashboard down significantly",
)
self.generate_chart(
data.head(
1000,
npartitions=data.npartitions,
compute=True,
)
)
else:
self.generate_chart(data)
def _format_data(self, data):
if not self.force_computation:
data = data.head(1000)
if self.drop_duplicates:
data = data.drop_duplicates()
return data
def generate_chart(self, data):
if self.columns is None:
self.columns = list(data.columns)
self.chart = hv.Table(self._format_data(data[self.columns]))
def _repr_mimebundle_(self, include=None, exclude=None):
view = self.view()
if self._initialized and panel_extension._loaded:
return view._repr_mimebundle_(include, exclude)
if self._initialized is False:
logging.warning(
"dashboard has not been initialized."
"Please run cuxfilter.dashboard.Dashboard([...charts])"
" to view this object in notebook"
)
if panel_extension._loaded is False:
logging.warning(
"notebooks assets not loaded."
"Please run cuxfilter.load_notebooks_assets()"
" to view this object in notebook"
)
if isinstance(view, pn.Column):
return view.pprint()
return None
def view(self, width=600, height=400):
return pn.panel(self.chart, width=width, height=height)
def get_dashboard_view(self):
return pn.panel(self.chart, sizing_mode="stretch_both")
def reload_chart(self, data):
if isinstance(data, dask_cudf.core.DataFrame):
if self.force_computation:
self.chart.data = self._format_data(
data[self.columns].compute()
)
else:
self.chart.data = self._format_data(
data[self.columns].head(
1000, npartitions=data.npartitions, compute=True
)
)
else:
self.chart.data = self._format_data(data[self.columns])
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/core_chart.py
|
import cudf
import dask_cudf
import logging
import panel as pn
from bokeh.models import ColumnDataSource
from panel.config import panel_extension
from typing import Dict, Literal
from ...assets import datetime as dt
class BaseChart:
chart_type: str = None
x: str = None
y: str = None
aggregate_fn: str = "count"
color: str = None
add_interaction: bool = True
chart = None
source = None
source_backup = None
data_points: int = 0
filter_widget = None
_library_specific_params: Dict[str, str] = {}
stride = None
stride_type = int
min_value: float = 0.0
max_value: float = 0.0
x_label_map = None
y_label_map = None
_initialized = False
# widget=False can only be rendered the main layout
is_widget = False
title = ""
_renderer_mode: Literal["web-app", "notebook"] = "web-app"
@property
def renderer_mode(self):
return self._renderer_mode
@renderer_mode.setter
def renderer_mode(self, value):
valid_values = ["web-app", "notebook"]
if value not in valid_values:
raise ValueError(
f"""Invalid value '{value}'. Value must be one of
{valid_values}."""
)
self._renderer_mode = value
@property
def name(self):
chart_type = self.chart_type if self.chart_type else "chart"
return f"{self.x}_{chart_type}_{self.title}"
@property
def library_specific_params(self):
return self._library_specific_params
@property
def x_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.x].dtype
return None
@property
def y_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.y].dtype
return None
@library_specific_params.setter
def library_specific_params(self, value):
self._library_specific_params = value
self.extract_mappers()
self.set_color()
def set_color(self):
if "color" in self.library_specific_params:
self.color = self.library_specific_params["color"]
def extract_mappers(self):
if "x_label_map" in self.library_specific_params:
self.x_label_map = self.library_specific_params["x_label_map"]
self.library_specific_params.pop("x_label_map")
if "y_label_map" in self.library_specific_params:
self.y_label_map = self.library_specific_params["y_label_map"]
self.library_specific_params.pop("y_label_map")
def _repr_mimebundle_(self, include=None, exclude=None):
view = self.view()
if self._initialized and panel_extension._loaded:
return view._repr_mimebundle_(include, exclude)
if self._initialized is False:
logging.warning(
"dashboard has not been initialized."
"Please run cuxfilter.dashboard.Dashboard([...charts])"
" to view this object in notebook"
)
if panel_extension._loaded is False:
logging.warning(
"notebooks assets not loaded."
"Please run cuxfilter.load_notebooks_assets()"
" to view this object in notebook"
)
if isinstance(view, pn.Column):
return view.pprint()
return None
def _to_xaxis_type(self, dates):
"""
Description: convert to int64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.x_dtype)
def _to_yaxis_type(self, dates):
"""
Description: convert to int64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.y_dtype)
def _xaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.x_dtype)
def _yaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.y_dtype)
def _xaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.x_dtype)
def _yaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.y_dtype)
def _xaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.x_dtype is
of type datetime, else return stride_type
"""
return dt.transform_stride_type(stride_type, self.x_dtype)
def _yaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.y_dtype is
of type datetime else return stride_type
"""
return dt.transform_stride_type(stride_type, self.y_dtype)
def view(self, width=600, height=400):
return pn.panel(self.chart, width=width, height=height)
def get_dashboard_view(self):
return self.view()
def calculate_source(self, data):
print("base calc source function, to over-ridden by delegated classes")
return -1
def generate_chart(self, **kwargs):
print("base calc source function, to over-ridden by delegated classes")
return -1
def add_reset_event(self, callback=None):
print("base calc source function, to over-ridden by delegated classes")
return -1
def compute_query_dict(self, query_dict):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reset_chart(self, data: list = []):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reload_chart(self, data):
print("base calc source function, to over-ridden by delegated classes")
return -1
def format_source_data(self, source_dict):
""""""
# print('function to be overridden by library specific extensions')
return -1
def apply_mappers(self):
""""""
# print('function to be overridden by library specific extensions')
return -1
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/__init__.py
|
from .core_chart import BaseChart
from .core_widget import BaseWidget
from .core_view_dataframe import ViewDataFrame
def view_dataframe(
columns=None,
drop_duplicates=False,
force_computation=False,
):
"""
Parameters
----------
columns: list, default None
display subset of columns, and all columns if None
drop_duplicates: bool, default False
display only unique rows if True
force_computation: bool, default False
- force_computation=False returns df.head(1000)
- force_computation=True returns entire df, but it can be
computationally intensive
Returns
-------
A view dataframe object.
Type cuxfilter.charts.core_view_dataframe.ViewDataFrame
"""
plot = ViewDataFrame(columns, drop_duplicates, force_computation)
plot.chart_type = "view_dataframe"
return plot
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/core_widget.py
|
import logging
import panel as pn
from panel.config import panel_extension
from typing import Dict
from cuxfilter.assets import datetime as dt
class BaseWidget:
chart_type: str = None
x: str = None
color: str = None
chart = None
data_points = None
start: float = None
end: float = None
_stride = None
stride_type = int
params = None
min_value: float = 0.0
max_value: float = 0.0
label_map: Dict[str, str] = None
use_data_tiles = False
_initialized = False
# widget is a chart type that can be rendered in a sidebar or main layout
is_widget = True
@property
def name(self):
chart_type = self.chart_type if self.chart_type else "widget"
return f"{self.x}_{chart_type}"
@property
def stride(self):
return self._stride
@stride.setter
def stride(self, value):
if value is not None:
self.stride_type = type(value)
self._stride = value
@property
def x_dtype(self):
# default x_dtype
return float
def _xaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
# self.x_dtype is a computed read-only property
return dt.to_np_dt64_if_datetime(dates, self.x_dtype)
def __init__(
self,
x,
data_points=None,
step_size=None,
step_size_type=int,
**params,
):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
self.x = x
self.params = params
self.data_points = data_points
self.stride_type = step_size_type
self.stride = step_size
if "value" in params:
self.value = params["value"]
params.pop("value")
if "label_map" in params:
self.label_map = params["label_map"]
self.label_map = {v: k for k, v in self.label_map.items()}
params.pop("label_map")
def _repr_mimebundle_(self, include=None, exclude=None):
view = self.view()
if self._initialized and panel_extension._loaded:
return view._repr_mimebundle_(include, exclude)
if self._initialized is False:
logging.warning(
"dashboard has not been initialized."
"Please run cuxfilter.dashboard.Dashboard([...charts])"
" to view this object in notebook"
)
if panel_extension._loaded is False:
logging.warning(
"notebooks assets not loaded."
"Please run cuxfilter.load_notebooks_assets()"
" to view this object in notebook"
)
if isinstance(view, pn.Column):
return view.pprint()
return None
def view(self, width=400, height=10):
return pn.Column(self.chart, width=width, height=height)
def get_dashboard_view(self):
return pn.panel(self.chart, sizing_mode="stretch_width")
def add_event(self, event, callback):
self.chart.on_event(event, callback)
def compute_query_dict(self, query_dict):
print("base calc source function, to over-ridden by delegated classes")
def reload_chart(self, *args, **kwargs):
# No reload functionality, added function for consistency
# with other charts
return -1
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
if hasattr(self.chart, "styles"):
self.chart.styles = {
"color": theme.style.color,
}
if hasattr(self.chart, "stylesheets"):
self.chart.stylesheets = [
f"""
.noUi-handle {{
background-color: {theme.chart_color};
border-color: {theme.chart_color};
}}
.noUi-connect {{
background-color: {theme.chart_color} !important;
}}
"""
]
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/aggregate/core_aggregate.py
|
import numpy as np
from typing import Union
from bokeh.models import DatetimeTickFormatter
import holoviews as hv
from ..core_chart import BaseChart
from ...constants import (
CUDF_DATETIME_TYPES,
)
from ....assets.cudf_utils import get_min_max
class BaseAggregateChart(BaseChart):
reset_event = None
x_axis_tick_formatter = None
y_axis_tick_formatter = None
use_data_tiles = True
stride = None
data_points: Union[int, None] = None
_x_dtype = float
box_stream = hv.streams.SelectionXY()
reset_stream = hv.streams.PlotReset()
@property
def name(self):
# overwrite BaseChart name function to allow unique choropleths on
# value x
if self.chart_type is not None:
return (
f"{self.x}_{self.aggregate_fn}_{self.chart_type}_{self.title}"
)
else:
return f"{self.x}_{self.aggregate_fn}_chart_{self.title}"
@property
def x_dtype(self):
"""
override core_chart x_dtype and make it constant, as panel 0.11 seems
to update the datetime x_axis type to float during runtime
"""
return self._x_dtype
@x_dtype.setter
def x_dtype(self, value):
self._x_dtype = value
@property
def custom_binning(self):
return self._stride is not None or self._data_points is not None
def _transformed_source_data(self, property):
"""
this fixes a bug introduced with panel 0.11, where bokeh CDS
x-axis datetime is converted to float, and the only way to
convert it back to datetime is using datetime64[ms]
"""
if self.x_dtype in CUDF_DATETIME_TYPES:
return self.source.data[property].astype("datetime64[ms]")
return self.source.data[property]
def __init__(
self,
x,
y=None,
data_points=None,
add_interaction=True,
aggregate_fn="count",
step_size=None,
step_size_type=int,
title="",
autoscaling=True,
x_axis_tick_formatter=None,
y_axis_tick_formatter=None,
unselected_alpha=0.1,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
data_points
add_interaction
aggregate_fn
step_size
step_size_type
title
autoscaling
x_label_map
y_label_map
x_axis_tick_formatter
y_axis_tick_formatter
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.y = y
self._stride = step_size
self._data_points = data_points
self.stride_type = step_size_type
self.add_interaction = add_interaction
self.aggregate_fn = aggregate_fn
self.title = title if title else self.x
self.autoscaling = autoscaling
self.x_axis_tick_formatter = x_axis_tick_formatter
self.y_axis_tick_formatter = y_axis_tick_formatter
self.unselected_alpha = unselected_alpha
self.library_specific_params = library_specific_params
def _compute_array_all_bins(self, source_x, update_data_x, update_data_y):
"""
source_x: current_source_x, np.array()
update_data_x: updated_data_x, np.array()
update_data_y: updated_data_x, np.array()
"""
if self.x_dtype in CUDF_DATETIME_TYPES:
source_x = source_x.astype("datetime64[ms]")
result_array = np.zeros(shape=source_x.shape)
indices = [np.where(x_ == source_x)[0][0] for x_ in update_data_x]
np.put(result_array, indices, update_data_y)
return result_array
def compute_min_max(self, dashboard_cls):
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
def compute_stride(self):
self.stride_type = self._xaxis_stride_type_transform(self.stride_type)
if self.stride_type == int and self.max_value < 1:
self.stride_type = float
if self.stride is None and self.data_points is not None:
raw_stride = (self.max_value - self.min_value) / self.data_points
stride = (
round(raw_stride) if self.stride_type == int else raw_stride
)
self.stride = stride
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
self.x_dtype = dashboard_cls._cuxfilter_df.data[self.x].dtype
# reset data_point to input _data_points
self.data_points = self._data_points
# reset stride to input _stride
self.stride = self._stride
if self.x_dtype == "bool":
self.min_value = 0
self.max_value = 1
self.stride = 1
else:
self.compute_min_max(dashboard_cls)
if self.x_dtype in CUDF_DATETIME_TYPES:
self.x_axis_tick_formatter = DatetimeTickFormatter()
if self.x_dtype != "object":
self.compute_stride()
self.source = dashboard_cls._cuxfilter_df.data
self.generate_chart()
self.add_events(dashboard_cls)
def get_reset_callback(self, dashboard_cls):
def reset_callback(resetting):
self.box_selected_range = None
self.selected_indices = None
dashboard_cls._query_str_dict.pop(self.name, None)
dashboard_cls._reload_charts()
return reset_callback
def get_box_select_callback(self, dashboard_cls):
def cb(bounds, x_selection, y_selection):
self.box_selected_range, self.selected_indices = None, None
if type(x_selection) == tuple:
self.box_selected_range = {
self.x + "_min": x_selection[0],
self.x + "_max": x_selection[1],
}
elif type(x_selection) == list:
self.selected_indices = (
dashboard_cls._cuxfilter_df.data[self.x]
.isin(x_selection)
.reset_index()
)[[self.x]]
if self.box_selected_range or self.selected_indices is not None:
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts()
return cb
def get_dashboard_view(self):
return self.chart.view()
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.box_selected_range:
query_str_dict[
self.name
] = f"@{self.x}_min<={self.x}<=@{self.x}_max"
query_local_variables_dict.update(self.box_selected_range)
else:
if self.selected_indices is not None:
query_str_dict[self.name] = self.selected_indices
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_min", None)
query_local_variables_dict.pop(self.x + "_max", None)
def add_events(self, dashboard_cls):
"""
Description: add events to the chart, for the filter function to
facilitate interaction behavior,
that updates the rest of the charts on the page
-------------------------------------------
Input:
- dashboard_cls = current dashboard class reference
"""
self.chart.add_box_select_callback(
self.get_box_select_callback(dashboard_cls)
)
self.chart.add_reset_callback(self.get_reset_callback(dashboard_cls))
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/aggregate/core_number_chart.py
|
from cuxfilter.charts.core import BaseChart
from cuxfilter.assets import cudf_utils
class BaseNumberChart(BaseChart):
stride = 1
# widget is a chart type that can be rendered in a sidebar or main layout
is_widget = True
@property
def is_datasize_indicator(self):
return False
@property
def name(self):
return f"{self.chart_type}_{self.title}"
def __init__(
self,
expression=None,
aggregate_fn="count",
title="",
format="{value}",
default_color="black",
colors=[],
font_size="18pt",
title_size="9.75pt",
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
**library_specific_params
-------------------------------------------
Ouput:
"""
self.expression = expression
self.title = title if title else expression
self.aggregate_fn = aggregate_fn
self.format = format
self.default_color = default_color
self.colors = colors
self.font_size = font_size
self.title_size = title_size
self.library_specific_params = library_specific_params
self.chart_type = "base_number_chart"
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
self.generate_chart(dashboard_cls._cuxfilter_df.data)
def generate_chart(self, data):
pass
def view(self):
return self.chart
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
pass
def reload_chart(self, data):
"""
reload chart
"""
pass
def _compute_source(self, query, local_dict, indices):
"""
Compute source dataframe based on the values query and indices.
If both are not provided, return the original dataframe.
"""
return cudf_utils.query_df(self.source, query, local_dict, indices)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/aggregate/core_aggregate_choropleth.py
|
from typing import Dict
import os
import numpy as np
import panel as pn
from ..core_chart import BaseChart
from ....assets.numba_kernels import calc_groupby
from ....assets import geo_json_mapper
from ....assets.cudf_utils import get_min_max
from ...constants import CUXF_NAN_COLOR
np.seterr(divide="ignore", invalid="ignore")
class BaseChoropleth(BaseChart):
reset_event = None
geo_mapper: Dict[str, str] = {}
use_data_tiles = True
source = None
@property
def name(self):
# overwrite BaseChart name function to allow unique choropleths on
# value x
if self.chart_type is not None:
return (
f"{self.x}_{self.aggregate_fn}_{self.chart_type}_{self.title}"
)
else:
return f"{self.x}_{self.aggregate_fn}_chart_{self.title}"
def __init__(
self,
x,
color_column,
elevation_column=None,
color_aggregate_fn="count",
color_factor=1,
elevation_aggregate_fn="sum",
elevation_factor=1,
add_interaction=True,
geoJSONSource=None,
geoJSONProperty=None,
geo_color_palette=None,
mapbox_api_key=os.getenv("MAPBOX_API_KEY"),
map_style=None,
tooltip=True,
tooltip_include_cols=[],
nan_color=CUXF_NAN_COLOR,
title="",
x_range=None,
y_range=None,
opacity=None,
layer_spec={}, # deck.gl layer spec
):
"""
Description:
-------------------------------------------
Input:
x
color_column,
elevation_column,
color_aggregate_fn,
color_factor,
elevation_aggregate_fn,
elevation_factor,
geoJSONSource
geoJSONProperty
add_interaction
geo_color_palette
nan_color
mapbox_api_key
map_style
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.color_column = color_column
self.color_aggregate_fn = color_aggregate_fn
self.color_factor = color_factor
self.elevation_column = elevation_column
self.aggregate_dict = {
self.color_column: self.color_aggregate_fn,
}
if self.elevation_column is not None:
self.elevation_aggregate_fn = elevation_aggregate_fn
self.elevation_factor = elevation_factor
self.aggregate_dict[
self.elevation_column
] = self.elevation_aggregate_fn
self.add_interaction = add_interaction
if geoJSONSource is None:
print("geoJSONSource is required for the choropleth map")
else:
self.geoJSONSource = geoJSONSource
self.geo_color_palette = geo_color_palette
self.geoJSONProperty = geoJSONProperty
if not (x_range and y_range):
# get default x_range and y_range from geoJSONSource
default_x_range, default_y_range = geo_json_mapper(
self.geoJSONSource, self.geoJSONProperty, projection=4326
)[1:]
x_range = x_range or default_x_range
y_range = y_range or default_y_range
self.x_range = x_range
self.y_range = y_range
self.stride = 1
self.mapbox_api_key = mapbox_api_key
self.map_style = map_style
self.tooltip = tooltip
self.tooltip_include_cols = tooltip_include_cols
self.nan_color = nan_color
self.title = title or f"{self.x}"
self.opacity = opacity
self.input_layer_spec = layer_spec
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
self.geo_mapper, x_range, y_range = geo_json_mapper(
self.geoJSONSource,
self.geoJSONProperty,
4326,
self.x,
dashboard_cls._cuxfilter_df.data[self.x].dtype,
)
self.calculate_source(dashboard_cls._cuxfilter_df.data)
self.generate_chart()
self.apply_mappers()
self.add_events(dashboard_cls)
def view(self, width=800, height=400):
return pn.WidgetBox(self.chart.pane, width=width, height=height)
def get_dashboard_view(self):
return pn.panel(self.chart.view(), sizing_mode="stretch_both")
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
self.format_source_data(
calc_groupby(self, data, agg=self.aggregate_dict)
)
def get_selection_callback(self, dashboard_cls):
"""
Description: generate callback for choropleth selection event
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def selection_callback(old, new):
self.compute_query_dict(dashboard_cls._query_str_dict)
if old != new and not new:
dashboard_cls._reload_charts()
else:
dashboard_cls._reload_charts(ignore_cols=[self.name])
return selection_callback
def compute_query_dict(self, query_str_dict):
"""
Description:
-------------------------------------------
Input:
query_str_dict = reference to dashboard.__cls__.query_str_dict
-------------------------------------------
Ouput:
"""
list_of_indices = self.get_selected_indices()
if len(list_of_indices) == 0 or list_of_indices == [""]:
query_str_dict.pop(self.name, None)
elif len(list_of_indices) == 1:
query_str_dict[self.name] = f"{self.x}=={list_of_indices[0]}"
else:
indices_string = ",".join(map(str, list_of_indices))
query_str_dict[self.name] = f"{self.x} in ({indices_string})"
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.add_selection_event(
self.get_selection_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(event):
dashboard_cls._query_str_dict.pop(self.name, None)
dashboard_cls._reload_charts()
# add callback to reset chart button
self.chart.on_event(self.reset_event, reset_callback)
def get_selected_indices(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
print("function to be overridden by library specific extensions")
return []
def add_selection_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
print("function to be overridden by library specific extensions")
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/aggregate/__init__.py
|
from .core_aggregate import BaseAggregateChart
from .core_aggregate_choropleth import BaseChoropleth
from .core_number_chart import BaseNumberChart
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/core_non_aggregate.py
|
from typing import Tuple
import cudf
import dask_cudf
import dask.dataframe as dd
import panel as pn
from .utils import point_in_polygon
from ..core_chart import BaseChart
class BaseNonAggregate(BaseChart):
"""
.. note::
If dataset size is greater than a few thousand points,
scatter geos can crash the browser tabs, and is only recommended
with datashader plugin, in which case an image is
rendered instead of points on canvas
"""
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
selected_indices: cudf.Series = None
box_selected_range = None
aggregate_col = None
use_data_tiles = False
@property
def name(self):
# overwrite BaseChart name function to allow unique chart on value x
chart_type = self.chart_type if self.chart_type else "chart"
return (
f"{self.x}_{self.y}"
f"{'_'+self.aggregate_col if self.aggregate_col else ''}"
f"{'_'+self.aggregate_fn if self.aggregate_fn else ''}"
f"_{chart_type}_{self.title}"
)
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
if self.x_range is None:
self.x_range = (
dashboard_cls._cuxfilter_df.data[self.x].min(),
dashboard_cls._cuxfilter_df.data[self.x].max(),
)
if self.y_range is None:
self.y_range = (
dashboard_cls._cuxfilter_df.data[self.y].min(),
dashboard_cls._cuxfilter_df.data[self.y].max(),
)
if isinstance(dashboard_cls._cuxfilter_df.data, dask_cudf.DataFrame):
self.x_range = dd.compute(*self.x_range)
self.y_range = dd.compute(*self.y_range)
self.calculate_source(dashboard_cls._cuxfilter_df.data)
self.generate_chart()
self.add_events(dashboard_cls)
def view(self, width=800, height=400):
return pn.panel(
self.chart.view().opts(
width=width, height=height, responsive=False
)
)
def get_dashboard_view(self):
return pn.panel(self.chart.view(), sizing_mode="stretch_both")
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def get_box_select_callback(self, dashboard_cls):
def cb(bounds, x_selection, y_selection):
self.x_range = self._xaxis_dt_transform(x_selection)
self.y_range = self._yaxis_dt_transform(y_selection)
# set lasso selected indices to None
self.selected_indices = None
self.box_selected_range = {
self.x + "_min": self.x_range[0],
self.x + "_max": self.x_range[1],
self.y + "_min": self.y_range[0],
self.y + "_max": self.y_range[1],
}
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts()
return cb
def get_lasso_select_callback(self, dashboard_cls):
def cb(geometry):
self.source = dashboard_cls._cuxfilter_df.data
# set box selected ranges to None
self.x_range, self.y_range, self.box_selected_range = (
None,
None,
None,
)
args = (self.x, self.y, geometry)
if isinstance(self.source, dask_cudf.DataFrame):
self.selected_indices = (
self.source.assign(
**{
self.x: self._to_xaxis_type(self.source[self.x]),
self.y: self._to_yaxis_type(self.source[self.y]),
}
)
.map_partitions(
point_in_polygon,
*args,
)
.persist()
)
else:
self.selected_indices = point_in_polygon(self.source, *args)
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts()
return cb
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.box_selected_range:
query_str_dict[self.name] = (
f"@{self.x}_min<={self.x}<=@{self.x}_max"
+ f" and @{self.y}_min<={self.y}<=@{self.y}_max"
)
query_local_variables_dict.update(self.box_selected_range)
else:
if self.selected_indices is not None:
query_str_dict[self.name] = self.selected_indices
else:
query_str_dict.pop(self.name, None)
for key in [
self.x + "_min",
self.x + "_max",
self.y + "_min",
self.y + "_max",
]:
query_local_variables_dict.pop(key, None)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.chart.add_lasso_select_callback(
self.get_lasso_select_callback(dashboard_cls)
)
self.chart.add_box_select_callback(
self.get_box_select_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# def reset_callback():
def reset_callback(resetting):
self.selected_indices = None
self.box_selected_range = None
self.chart.reset_all_selections()
dashboard_cls._query_str_dict.pop(self.name, None)
dashboard_cls._reload_charts()
# add callback to reset chart button
self.chart.add_reset_event(reset_callback)
def add_selection_geometry_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
def reset_chart_geometry_ranges(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/core_line.py
|
from .core_non_aggregate import BaseNonAggregate
class BaseLine(BaseNonAggregate):
stride = 0.0
reset_event = None
filter_widget = None
x_axis_tick_formatter = None
default_color = "#8735fb"
@property
def color_set(self):
return self._color_input is not None
@property
def color(self):
if self.color_set:
return self._color_input
return self.default_color
@property
def name(self):
# overwrite BaseChart name function to allow unique chart on value x
chart_type = self.chart_type if self.chart_type else "chart"
return f"{self.x}_{self.y}_{chart_type}_{self.title}"
def __init__(
self,
x,
y,
data_points=100,
add_interaction=True,
pixel_shade_type="linear",
color=None,
step_size=None,
step_size_type=int,
title="",
timeout=100,
x_axis_tick_formatter=None,
y_axis_tick_formatter=None,
unselected_alpha=0.2,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
data_points
add_interaction
aggregate_fn
step_size
step_size_type
x_label_map
y_label_map
title
timeout
x_axis_tick_formatter
y_axis_tick_formatter
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.y = y
self.data_points = data_points
self.add_interaction = add_interaction
self._color_input = color
self.stride = step_size
self.stride_type = step_size_type
self.pixel_shade_type = pixel_shade_type
self.title = title
self.timeout = timeout
self.x_axis_tick_formatter = x_axis_tick_formatter
self.y_axis_tick_formatter = y_axis_tick_formatter
self.unselected_alpha = unselected_alpha
self.library_specific_params = library_specific_params
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/core_graph.py
|
from typing import Tuple
import cudf
import dask.dataframe as dd
import dask_cudf
import panel as pn
from .utils import point_in_polygon
from ..core_chart import BaseChart
from ...constants import CUXF_DEFAULT_COLOR_PALETTE
class BaseGraph(BaseChart):
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
selected_indices: cudf.Series = None
box_selected_range = None
use_data_tiles = False
default_palette = CUXF_DEFAULT_COLOR_PALETTE
@property
def colors_set(self):
return self._node_color_palette_input is not None
@property
def name(self):
# overwrite BaseChart name function to allow unique chart on value x
chart_type = self.chart_type if self.chart_type else "chart"
return (
f"{self.node_x}_{self.node_y}_{self.node_id}_"
f"{self.node_aggregate_fn}_{chart_type}_{self.title}"
)
@property
def node_color_palette(self):
if self.colors_set:
return list(self._node_color_palette_input)
return self.default_palette
@property
def edge_columns(self):
if self.edge_aggregate_col:
return [
self.edge_source,
self.edge_target,
self.edge_aggregate_col,
]
return [self.edge_source, self.edge_target]
@property
def node_columns(self):
if self.node_aggregate_col:
return [
self.node_id,
self.node_x,
self.node_y,
self.node_aggregate_col,
]
return [self.node_id, self.node_x, self.node_y]
def __init__(
self,
node_x="x",
node_y="y",
node_id="vertex",
edge_source="source",
edge_target="target",
x_range=None,
y_range=None,
add_interaction=True,
node_aggregate_col=None,
edge_aggregate_col=None,
node_aggregate_fn="count",
edge_aggregate_fn="count",
node_color_palette=None,
edge_color_palette=["#000000"],
node_point_size=1,
node_point_shape="circle",
node_pixel_shade_type="eq_hist",
node_pixel_density=0.5,
node_pixel_spread="dynspread",
edge_render_type="direct",
edge_transparency=0,
curve_params=dict(strokeWidth=1, curve_total_steps=100),
tile_provider="CARTODBPOSITRON",
title="",
timeout=100,
legend=True,
legend_position="center",
x_axis_tick_formatter=None,
y_axis_tick_formatter=None,
unselected_alpha=0.2,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
node_x
node_y
node_id
edge_source
edge_target
x_range
y_range
add_interaction
node_aggregate_col
edge_aggregate_col
node_aggregate_fn
edge_aggregate_fn
node_color_palette
edge_color_palette
node_point_size
node_point_shape
node_pixel_shade_type
node_pixel_density
node_pixel_spread
edge_render_type
edge_transparency
curve_params
tile_provider
title
timeout
legend
legend_position
x_axis_tick_formatter
y_axis_tick_formatter
unselected_alpha
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = node_x
self.node_x = node_x
self.node_y = node_y
self.node_id = node_id
self.edge_source = edge_source
self.edge_target = edge_target
self.x_range = x_range
self.y_range = y_range
self.add_interaction = add_interaction
self.node_aggregate_col = node_aggregate_col or node_id
self.edge_aggregate_col = edge_aggregate_col
self.node_aggregate_fn = node_aggregate_fn
self.edge_aggregate_fn = edge_aggregate_fn
self._node_color_palette_input = node_color_palette
self.edge_color_palette = list(edge_color_palette)
self.node_point_size = node_point_size
self.node_point_shape = node_point_shape
self.node_pixel_shade_type = node_pixel_shade_type
self.node_pixel_density = node_pixel_density
self.node_pixel_spread = node_pixel_spread
self.edge_render_type = edge_render_type
self.edge_transparency = edge_transparency
self.curve_params = curve_params
self.tile_provider = tile_provider
self.title = title
self.timeout = timeout
self.legend = legend
self.legend_position = legend_position
self.x_axis_tick_formatter = x_axis_tick_formatter
self.y_axis_tick_formatter = y_axis_tick_formatter
self.unselected_alpha = unselected_alpha
self.library_specific_params = library_specific_params
@property
def x_dtype(self):
if isinstance(self.nodes, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.nodes[self.node_x].dtype
return None
@property
def y_dtype(self):
if isinstance(self.nodes, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.nodes[self.node_y].dtype
return None
@property
def df_type(self):
if type(self.nodes) == type(self.edges): # noqa: E721
return type(self.nodes)
raise TypeError("nodes and edges must be of the same type")
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
Ouput:
"""
self.nodes = dashboard_cls._cuxfilter_df.data
if dashboard_cls._cuxfilter_df.edges is None:
raise ValueError("Edges dataframe not provided")
if self.x_range is None:
self.x_range = (
dashboard_cls._cuxfilter_df.data[self.node_x].min(),
dashboard_cls._cuxfilter_df.data[self.node_x].max(),
)
if self.y_range is None:
self.y_range = (
dashboard_cls._cuxfilter_df.data[self.node_y].min(),
dashboard_cls._cuxfilter_df.data[self.node_y].max(),
)
if isinstance(
dashboard_cls._cuxfilter_df.data, dask_cudf.core.DataFrame
):
self.x_range = dd.compute(*self.x_range)
self.y_range = dd.compute(*self.y_range)
self.calculate_source(dashboard_cls._cuxfilter_df)
self.generate_chart()
self.add_events(dashboard_cls)
def view(self, width=800, height=400):
return pn.panel(
self.chart.view().opts(
width=width, height=height, responsive=False
)
)
def get_dashboard_view(self):
return pn.panel(self.chart.view(), sizing_mode="stretch_both")
def calculate_source(self, cuxfilter_df):
"""
Description:
-------------------------------------------
Input:
_cuxfilter_df = cuxfilter.DataFrame (nodes,edges)
-------------------------------------------
Ouput:
"""
self.format_source_data(cuxfilter_df)
@property
def concat(self):
if self.df_type == dask_cudf.DataFrame:
return dask_cudf.concat
return cudf.concat
def query_graph(self, node_ids, nodes, edges):
edges_ = self.concat(
[
node_ids.merge(
edges, left_on=self.node_id, right_on=self.edge_source
),
node_ids.merge(
edges, left_on=self.node_id, right_on=self.edge_target
),
]
)[self.edge_columns]
nodes_ = self.concat(
[
nodes.merge(
edges_,
left_on=self.node_id,
right_on=self.edge_source,
),
nodes.merge(
edges_,
left_on=self.node_id,
right_on=self.edge_target,
),
]
)[self.node_columns].drop_duplicates()
return nodes_, edges_
def get_box_select_callback(self, dashboard_cls):
def cb(bounds, x_selection, y_selection):
self.nodes = dashboard_cls._cuxfilter_df.data
self.x_range = self._xaxis_dt_transform(x_selection)
self.y_range = self._yaxis_dt_transform(y_selection)
# set lasso selected indices to None
self.selected_indices = None
self.box_selected_range = {
self.node_x + "_min": self.x_range[0],
self.node_x + "_max": self.x_range[1],
self.node_y + "_min": self.y_range[0],
self.node_y + "_max": self.y_range[1],
}
edges = None
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
nodes = dashboard_cls._query(dashboard_cls._generate_query_str())
if self.inspect_neighbors._active:
nodes, edges = self.query_graph(nodes, self.nodes, self.edges)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts(data=nodes, ignore_cols=[self.name])
# reload graph chart separately as it has an extra edges argument
self.reload_chart(data=nodes, edges=edges)
del nodes, edges
return cb
def get_lasso_select_callback(self, dashboard_cls):
def cb(geometry):
self.nodes = dashboard_cls._cuxfilter_df.data
# set box selected ranges to None
self.x_range, self.y_range, self.box_selected_range = (
None,
None,
None,
)
args = (self.node_x, self.node_y, geometry)
if isinstance(self.nodes, dask_cudf.DataFrame):
self.selected_indices = (
self.nodes.assign(
**{
self.node_x: self._to_xaxis_type(
self.nodes[self.node_x]
),
self.node_y: self._to_yaxis_type(
self.nodes[self.node_y]
),
}
)
.map_partitions(point_in_polygon, *args)
.persist()
)
else:
self.selected_indices = point_in_polygon(self.nodes, *args)
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
nodes = dashboard_cls._query(dashboard_cls._generate_query_str())
edges = None
if self.inspect_neighbors._active:
# node_ids = nodes[self.node_id]
nodes, edges = self.query_graph(nodes, self.nodes, self.edges)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts(data=nodes, ignore_cols=[self.name])
# reload graph chart separately as it has an extra edges argument
self.reload_chart(data=nodes, edges=edges)
del nodes, edges
return cb
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.box_selected_range:
query_str_dict[self.name] = (
f"@{self.node_x}_min<={self.node_x}<=@{self.node_x}_max"
+ f" and @{self.node_y}_min<={self.node_y}<=@{self.node_y}_max"
)
temp_local_dict = {
self.node_x + "_min": self.x_range[0],
self.node_x + "_max": self.x_range[1],
self.node_y + "_min": self.y_range[0],
self.node_y + "_max": self.y_range[1],
}
query_local_variables_dict.update(temp_local_dict)
else:
if self.selected_indices is not None:
query_str_dict[self.name] = self.selected_indices
else:
query_str_dict.pop(self.name, None)
for key in [
self.node_x + "_min",
self.node_x + "_max",
self.node_y + "_min",
self.node_y + "_max",
]:
query_local_variables_dict.pop(key, None)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.chart.add_lasso_select_callback(
self.get_lasso_select_callback(dashboard_cls)
)
self.chart.add_box_select_callback(
self.get_box_select_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(resetting):
self.selected_indices = None
self.box_selected_range = None
self.chart.reset_all_selections()
dashboard_cls._query_str_dict.pop(self.name, None)
dashboard_cls._reload_charts()
# add callback to reset chart button
self.chart.add_reset_event(reset_callback)
def add_selection_geometry_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
def reset_chart_geometry_ranges(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/core_scatter.py
|
from typing import Tuple
from .core_non_aggregate import BaseNonAggregate
from ...constants import CUXF_DEFAULT_COLOR_PALETTE
class BaseScatter(BaseNonAggregate):
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
aggregate_col = None
default_palette = CUXF_DEFAULT_COLOR_PALETTE
@property
def colors_set(self):
return self._color_palette_input is not None
@property
def color_palette(self):
if self.colors_set:
return list(self._color_palette_input)
return self.default_palette
def __init__(
self,
x,
y,
x_range=None,
y_range=None,
add_interaction=True,
color_palette=None,
aggregate_col=None,
aggregate_fn="count",
point_size=1,
point_shape="circle",
pixel_shade_type="eq_hist",
pixel_density=0.5,
pixel_spread="dynspread",
tile_provider=None,
title="",
timeout=100,
legend=True,
legend_position="center",
unselected_alpha=0.2,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
x_range
y_range
add_interaction
color_palette
aggregate_col
aggregate_fn
point_size
point_shape
pixel_shade_type
pixel_density
pixel_spread
title
timeout
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
self.y = y
self.x_range = x_range
self.y_range = y_range
self.add_interaction = add_interaction
self.aggregate_col = aggregate_col or y
self._color_palette_input = color_palette
self.aggregate_fn = aggregate_fn
self.tile_provider = tile_provider
self.point_shape = point_shape
self.point_size = point_size
self.title = title
self.timeout = timeout
self.pixel_shade_type = pixel_shade_type
self.pixel_density = pixel_density
self.pixel_spread = pixel_spread
self.legend = legend
self.legend_position = legend_position
self.unselected_alpha = unselected_alpha
self.library_specific_params = library_specific_params
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/__init__.py
|
from .core_scatter import BaseScatter
from .core_line import BaseLine
from .core_stacked_line import BaseStackedLine
from .core_graph import BaseGraph
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/utils.py
|
import cuspatial
from shapely.geometry import Polygon
import geopandas as gpd
def point_in_polygon(df, x, y, polygons):
points = cuspatial.GeoSeries.from_points_xy(
df[[x, y]].interleave_columns().astype("float64")
)
polygons = cuspatial.GeoSeries(
gpd.GeoSeries(Polygon(polygons)), index=["selection"]
)
return cuspatial.point_in_polygon(points, polygons)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py
|
import cudf
import dask_cudf
from typing import Tuple
import panel as pn
from ..core_chart import BaseChart
class BaseStackedLine(BaseChart):
"""
.. note::
If dataset size is greater than a few thousand points,
scatter geos can crash the browser tabs, and is only recommended
with datashader plugin, in which case an image is
rendered instead of points on canvas
"""
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
use_data_tiles = False
y: list = []
colors: list = []
default_colors = ["#8735fb"]
box_selected_range = None
@property
def y_dtype(self):
"""
overwriting the y_dtype property from BaseChart for stackedLines where
self.y is a list of columns
"""
if isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.y[0]].dtype
return None
@property
def name(self):
# overwrite BaseChart name function to allow unique chart on value x
chart_type = self.chart_type if self.chart_type else "chart"
return (
f"{self.x}_{'_'.join([str(_i) for _i in self.y])}_{chart_type}"
f"_{self.title}"
)
@property
def colors_set(self):
return self._colors_input != []
@property
def colors(self):
if self.colors_set:
return list(self._colors_input)
return self.default_colors * len(self.y)
def __init__(
self,
x,
y=[],
data_points=100,
add_interaction=True,
colors=[],
step_size=None,
step_size_type=int,
title="",
timeout=100,
legend=True,
legend_position="center",
x_axis_tick_formatter=None,
y_axis_tick_formatter=None,
unselected_alpha=0.2,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x,
y
data_points
add_interaction
colors
step_size
step_size_type
title
timeout
legend
legend_position
x_axis_tick_formatter
y_axis_tick_formatter
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
if not isinstance(y, list):
raise TypeError("y must be a list of column names")
if len(y) == 0:
raise ValueError("y must not be empty")
self.y = y
self.data_points = data_points
self.add_interaction = add_interaction
self.stride = step_size
if not isinstance(colors, (list, dict)):
raise TypeError(
"colors must be either list of colors or"
+ "dictionary of column to color mappings"
)
self._colors_input = colors
self.stride_type = step_size_type
self.title = title
self.timeout = timeout
self.legend = legend
self.legend_position = legend_position
self.x_axis_tick_formatter = x_axis_tick_formatter
self.y_axis_tick_formatter = y_axis_tick_formatter
self.unselected_alpha = unselected_alpha
self.library_specific_params = library_specific_params
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
"""
for _y in self.y:
if self.y_dtype != dashboard_cls._cuxfilter_df.data[_y].dtype:
raise TypeError("All y columns should be of same type")
if self.x_range is None:
self.x_range = (
dashboard_cls._cuxfilter_df.data[self.x].min(),
dashboard_cls._cuxfilter_df.data[self.x].max(),
)
if self.y_range is None:
# cudf_df[['a','b','c']].min().min() gives min value
# between all values in columns a,b and c
self.y_range = (
dashboard_cls._cuxfilter_df.data[self.y].min().min(),
dashboard_cls._cuxfilter_df.data[self.y].max().max(),
)
self.calculate_source(dashboard_cls._cuxfilter_df.data)
self.generate_chart()
self.add_events(dashboard_cls)
def view(self, width=800, height=400):
return pn.panel(
self.chart.view().opts(
width=width, height=height, responsive=False
)
)
def get_dashboard_view(self):
return pn.panel(self.chart.view(), sizing_mode="stretch_both")
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def get_box_select_callback(self, dashboard_cls):
def cb(bounds, x_selection, y_selection):
self.x_range = self._xaxis_dt_transform(x_selection)
self.box_selected_range = {
self.x + "_min": self.x_range[0],
self.x + "_max": self.x_range[1],
}
self.compute_query_dict(
dashboard_cls._query_str_dict,
dashboard_cls._query_local_variables_dict,
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts()
return cb
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.box_selected_range:
query_str_dict[
self.name
] = f"@{self.x}_min<={self.x}<=@{self.x}_max"
temp_local_dict = {
self.x + "_min": self.x_range[0],
self.x + "_max": self.x_range[1],
}
query_local_variables_dict.update(temp_local_dict)
else:
query_str_dict.pop(self.name, None)
for key in [self.x + "_min", self.x + "_max"]:
query_local_variables_dict.pop(key, None)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.chart.add_box_select_callback(
self.get_box_select_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(resetting):
self.box_selected_range = None
self.chart.reset_all_selections()
dashboard_cls._query_str_dict.pop(self.name, None)
dashboard_cls._reload_charts()
# add callback to reset chart button
self.chart.add_reset_event(reset_callback)
def add_selection_geometry_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
def reset_chart_geometry_ranges(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/__init__.py
|
from .datashader import (
scatter,
line,
heatmap,
stacked_lines,
graph,
)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/datashader.py
|
from . import plots
def scatter(
x,
y,
x_range=None,
y_range=None,
add_interaction=True,
color_palette=None,
aggregate_col=None,
aggregate_fn="count",
point_size=15,
point_shape="circle",
pixel_shade_type="eq_hist",
pixel_density=0.5,
pixel_spread="dynspread",
tile_provider=None,
title="",
timeout=100,
legend=True,
legend_position="top_right",
unselected_alpha=0.2,
):
"""
Parameters
----------
x: str
x-axis column name from the gpu dataframe
y: str, default None
y-axis column name from the gpu dataframe
x_range: tuple, default(gpu_dataframe[x].min(), gpu_dataframe[x].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
y_range: tuple, default(gpu_dataframe[y].min(), gpu_dataframe[y].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
add_interaction: {True, False}, default True
color_palette: bokeh.palettes or list/tuple of hex_color_codes,
or list/tuple of color names, default bokeh.palettes.Virisdis10
aggregate_col: str, default None
column from the gpu dataframe on which the aggregate_fn will be run on,
if None, aggregate_fn is run on y-column
aggregate_fn: {'count', 'mean', 'max', 'min'}, default 'count'
point_size: int, default 1
Point size in the scatter plot.
point_shape: str, default 'circle'
Available options: circle, square, rect_vertical, rect_horizontal.
pixel_shade_type: str, default 'eq_hist'
The "how" parameter in datashader.transfer_functions.shade()
function.
Available options: eq_hist, linear, log, cbrt
pixel_density: float, default 0.5
A tuning parameter in [0, 1], with higher values giving more dense
scatter plot.
pixel_spread: str, default 'dynspread'
dynspread: Spread pixels in an image dynamically based on the image
density.
spread: Spread pixels in an image.
tile_provider: str, default None
Underlying map type.See
https://holoviews.org/reference/elements/bokeh/Tiles.html
title: str,
chart title
timeout: int (milliseconds), default 100
Determines the timeout after which the callback will
process new events without the previous one having
reported completion. Increase for very long running
callbacks and if zooming feels laggy.
legend: bool, default True
Adds Bokeh.models.LinearColorMapper based legend if True,
Note: legend currently only works with pixel_shade_type='linear'/'log'
legend_position: str, default top_right
position of legend on the chart.
Valid places are: right, left, bottom, top, top_right, top_left,
bottom_left, bottom_right
unselected_alpha: float [0, 1], default 0.2
if True, displays unselected data in the same color_palette
but transparent(alpha=0.2)
Returns
-------
A cudashader scatter plot of type:
cuxfilter.charts.datashader.custom_extensions.InteractiveDatashaderPoints
"""
plot = plots.Scatter(
x,
y,
x_range,
y_range,
add_interaction,
color_palette,
aggregate_col,
aggregate_fn,
point_size,
point_shape,
pixel_shade_type,
pixel_density,
pixel_spread,
tile_provider=tile_provider,
title=title,
timeout=timeout,
legend=legend,
legend_position=legend_position,
unselected_alpha=unselected_alpha,
)
plot.chart_type = "scatter"
return plot
def graph(
node_x="x",
node_y="y",
node_id="vertex",
edge_source="source",
edge_target="target",
x_range=None,
y_range=None,
add_interaction=True,
node_aggregate_col=None,
edge_aggregate_col=None,
node_aggregate_fn="count",
edge_aggregate_fn="count",
node_color_palette=None,
edge_color_palette=["#000000"],
node_point_size=15,
node_point_shape="circle",
node_pixel_shade_type="eq_hist",
node_pixel_density=0.8,
node_pixel_spread="dynspread",
edge_render_type="direct",
edge_transparency=0,
curve_params=dict(strokeWidth=1, curve_total_steps=100),
tile_provider=None,
title="",
timeout=100,
legend=True,
legend_position="top_right",
unselected_alpha=0.2,
):
"""
Parameters
----------
node_x: str, default "x"
x-coordinate column name for the nodes cuDF dataframe
node_y: str, default "y"
y-coordinate column name for the nodes cuDF dataframe
node_id: str, default "vertex"
node_id/label column name for the nodes cuDF dataframe
edge_source: str, default "source"
edge_source column name for the edges cuDF dataframe
edge_target="target",
edge_target column name for the edges cuDF dataframe
x_range: tuple, default(nodes_gpu_dataframe[x].min(),
nodes_gpu_dataframe[x].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
y_range: tuple, default(nodes_gpu_dataframe[y].min(),
nodes_gpu_dataframe[y].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
add_interaction: {True, False}, default True
node_aggregate_col=str, default None,
column from the nodes gpu dataframe on which the mode_aggregate_fn
will be run on
edge_aggregate_col=str, default None,
column from the edges gpu dataframe on which the mode_aggregate_fn
will be run on
node_aggregate_fn={'count', 'mean', 'max', 'min'}, default 'count'
edge_aggregate_fn={'count', 'mean', 'max', 'min'}, default 'count'
node_color_palette=bokeh.palettes or list/tuple of hex_color_codes,
or list/tuple of color names, default bokeh.palettes.Virisdis10
edge_color_palette=bokeh.palettes or list/tuple of hex_color_codes,
or list/tuple of color names, default ["#000000"]
node_point_size: int, default 8
Point size in the scatter plot.
node_point_shape: str, default 'circle'
Available options: circle, square, rect_vertical, rect_horizontal.
node_pixel_shade_type: str, default 'eq_hist'
The "how" parameter in datashader.transfer_functions.shade()
function.
Available options: eq_hist, linear, log, cbrt
node_pixel_density: float, default 0.8
A tuning parameter in [0, 1], with higher values giving more dense
scatter plot.
node_pixel_spread: str, default 'dynspread'
dynspread: Spread pixels in an image dynamically based on the image
density.
spread: Spread pixels in an image.
edge_render_type: str, default 'direct'
type of edge render. Available options are 'direct'/'curved'
*Note: Curved edge rendering is an experimental feature and may throw
out of memory errors
edge_transparency: float, default 0
value in range [0,1] to specify transparency level of edges, with
1 being completely transparent
curve_params: dict, default dict(strokeWidth=1, curve_total_steps=100)
control curvature and max_bundle_size if edge_render_type='curved'
tile_provider: str, default None
Underlying map type.See
https://holoviews.org/reference/elements/bokeh/Tiles.html
title: str,
chart title
timeout: int (milliseconds), default 100
Determines the timeout after which the callback will
process new events without the previous one having
reported completion. Increase for very long running
callbacks and if zooming feels laggy.
legend: bool, default True
Adds Bokeh.models.LinearColorMapper based legend if True,
Note: legend currently only works with pixel_shade_type='linear'/'log'
legend_position: str, default top_right
position of legend on the chart.
Valid places are: right, left, bottom, top, top_right, top_left,
bottom_left, bottom_right
unselected_alpha: float [0, 1], default 0.2
if True, displays unselected data in the same color_palette
but transparent(alpha=0.2) (nodes only)
Returns
-------
A cudashader graph plot of type:
cuxfilter.charts.datashader.custom_extensions.InteractiveDatashaderGraph
"""
plot = plots.Graph(
node_x,
node_y,
node_id,
edge_source,
edge_target,
x_range,
y_range,
add_interaction,
node_aggregate_col,
edge_aggregate_col,
node_aggregate_fn,
edge_aggregate_fn,
node_color_palette,
edge_color_palette,
node_point_size,
node_point_shape,
node_pixel_shade_type,
node_pixel_density,
node_pixel_spread,
edge_render_type,
edge_transparency,
curve_params,
tile_provider,
title,
timeout,
legend=legend,
legend_position=legend_position,
unselected_alpha=unselected_alpha,
)
plot.chart_type = "graph"
return plot
def heatmap(
x,
y,
x_range=None,
y_range=None,
add_interaction=True,
color_palette=None,
aggregate_col=None,
aggregate_fn="mean",
point_size=15,
point_shape="rect_vertical",
title="",
timeout=100,
legend=True,
legend_position="top_right",
unselected_alpha=0.2,
):
"""
Heatmap using default datashader.scatter plot with slight modifications.
Added for better defaults. In theory, scatter directly can be used
to generate the same.
Parameters
----------
x: str
x-axis column name from the gpu dataframe
y: str, default None
y-axis column name from the gpu dataframe
x_range: tuple, default(gpu_dataframe[x].min(), gpu_dataframe[x].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
y_range: tuple, default(gpu_dataframe[y].min(), gpu_dataframe[y].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
add_interaction: {True, False}, default True
color_palette: bokeh.palettes or list/tuple of hex_color_codes,
or list/tuple of color names, default bokeh.palettes.Virisdis10
aggregate_col: str, default None
column from the gpu dataframe on which the aggregate_fn will be run on,
if None, aggregate_fn is run on y-column
aggregate_fn: {'count', 'mean', 'max', 'min'}, default 'count'
point_size: int, default 1
Point size in the scatter plot.
point_shape: str, default 'rect_vertical'
Available options: circle, square, rect_vertical, rect_horizontal.
pixel_density: float, default 0.5
A tuning parameter in [0, 1], with higher values giving more dense
scatter plot.
pixel_spread: str, default 'dynspread'
dynspread: Spread pixels in an image dynamically based on the image
density.
spread: Spread pixels in an image.
title: str,
chart title
timeout: int (milliseconds), default 100
Determines the timeout after which the callback will
process new events without the previous one having
reported completion. Increase for very long running
callbacks and if zooming feels laggy.
legend: bool, default True
Adds Bokeh.models.LinearColorMapper based legend if True,
legend_position: str, default top_right
position of legend on the chart.
Valid places are: right, left, bottom, top, top_right, top_left,
bottom_left, bottom_right
unselected_alpha: float [0, 1], default 0.2
if True, displays unselected data in the same color_palette
but transparent(alpha=0.2)
Returns
-------
A cudashader heatmap (scatter object) of type:
cuxfilter.charts.datashader.custom_extensions.InteractiveDatashaderPoints
"""
plot = plots.Scatter(
x,
y,
x_range,
y_range,
add_interaction,
color_palette,
aggregate_col,
aggregate_fn,
point_size,
point_shape,
"linear",
1,
"spread",
tile_provider=None,
title=title,
timeout=timeout,
legend=legend,
legend_position=legend_position,
unselected_alpha=unselected_alpha,
)
plot.chart_type = "heatmap"
return plot
def line(
x,
y,
data_points=100,
add_interaction=True,
pixel_shade_type="linear",
color=None,
step_size=None,
step_size_type=int,
title="",
timeout=100,
unselected_alpha=0.2,
):
"""
Parameters
----------
x: str
x-axis column name from the gpu dataframe
y: str
y-axis column name from the gpu dataframe
x_range: tuple, default(gpu_dataframe[x].min(), gpu_dataframe[x].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
y_range: tuple, default(gpu_dataframe[y].min(), gpu_dataframe[y].max())
(min, max) x-dimensions of the geo-scatter plot to be displayed
add_interaction: {True, False}, default True
pixel_shade_type: str, default 'linear'
The "how" parameter in datashader.transfer_functions.shade()
function.
Available options: eq_hist, linear, log, cbrt
color: str, default #8735fb
step_size: int, default None
for the range_slider below the chart
step_size_type: type, default int
for the range_slider below the chart
title: str,
chart title
timeout: int (milliseconds), default 100
Determines the timeout after which the callback will
process new events without the previous one having
reported completion. Increase for very long running
callbacks and if zooming feels laggy.
unselected_alpha: float [0, 1], default 0.2
if True, displays unselected data in the same color_palette
but transparent(alpha=0.2)
Returns
-------
A cudashader scatter plot of type:
cuxfilter.charts.datashader.custom_extensions.InteractiveDatashaderLine
"""
plot = plots.Line(
x,
y,
data_points,
add_interaction,
pixel_shade_type,
color,
step_size,
step_size_type,
title,
timeout,
unselected_alpha=unselected_alpha,
)
plot.chart_type = "non_aggregate_line"
return plot
def stacked_lines(
x,
y,
data_points=100,
add_interaction=True,
colors=[],
step_size=None,
step_size_type=int,
title="",
timeout=100,
legend=True,
legend_position="top_right",
unselected_alpha=0.2,
):
"""
stacked lines chart
Parameters
----------
x: str
x-axis column name from the gpu dataframe
y: list
y-axis column names from the gpu dataframe for the stacked lines
add_interaction: {True, False}, default True
colors: list, default [#8735fb, #8735fb, ....]
step_size: int, default None
for the range_slider below the chart
step_size_type: type, default int
for the range_slider below the chart
title: str,
chart title
timeout: int (milliseconds), default 100
Determines the timeout after which the callback will
process new events without the previous one having
reported completion. Increase for very long running
callbacks and if zooming feels laggy.
legend: bool, default True
Adds Bokeh.models.LinearColorMapper based legend if True,
Note: legend currently only works with pixel_shade_type='linear'/'log'
legend_position: str, default top_right
position of legend on the chart.
Valid places are: right, left, bottom, top, top_right, top_left,
bottom_left, bottom_right
unselected_alpha: float [0, 1], default 0.2
if True, displays unselected data in the same color_palette
but transparent(alpha=0.2)
Returns
-------
A cudashader stacked_lines plot of type:
cuxfilter.charts.datashader.custom_extensions.InteractiveDatashaderMultiLine
"""
if not isinstance(y, list) or len(y) == 0:
raise ValueError("y must be a list of atleast one column name")
plot = plots.StackedLines(
x,
y,
data_points,
add_interaction,
colors,
step_size,
step_size_type,
title,
timeout,
legend=legend,
legend_position=legend_position,
unselected_alpha=unselected_alpha,
)
plot.chart_type = "stacked_lines"
return plot
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/plots.py
|
from ..core.non_aggregate import (
BaseScatter,
BaseLine,
BaseStackedLine,
BaseGraph,
)
from .custom_extensions import (
CustomInspectTool,
calc_connected_edges,
InteractiveDatashaderPoints,
InteractiveDatashaderLine,
InteractiveDatashaderGraph,
InteractiveDatashaderMultiLine,
)
from packaging.version import Version
import datashader as ds
import dask_cudf
import dask.dataframe as dd
import cupy as cp
import cudf
import holoviews as hv
from bokeh import events
from PIL import Image
import requests
from io import BytesIO
ds_version = Version(ds.__version__)
def load_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content))
class Scatter(BaseScatter):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "y"
data_x_axis = "x"
def format_source_data(self, data):
"""
Description:
format source
-------------------------------------------
Input:
source_dict = {
'X': [],
'Y': []
}
-------------------------------------------
Ouput:
"""
self.source = data
def generate_chart(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if not self.title:
self.title = (
"Scatter plot for "
+ (self.aggregate_col or "")
+ " "
+ self.aggregate_fn
)
self.chart = InteractiveDatashaderPoints(
source_df=self.source,
x=self.x,
y=self.y,
aggregate_col=self.aggregate_col,
aggregate_fn=self.aggregate_fn,
color_palette=self.color_palette,
pixel_shade_type=self.pixel_shade_type,
tile_provider=self.tile_provider,
legend=self.legend,
legend_position=self.legend_position,
spread_threshold=self.pixel_density,
point_shape=self.point_shape,
max_px=self.point_size,
unselected_alpha=self.unselected_alpha,
title=self.title,
)
def reload_chart(self, data=None):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if data is not None:
if len(data) == 0:
data = cudf.DataFrame({k: cp.nan for k in data.columns})
self.chart.update_data(data)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
if not self.colors_set:
self.chart.color_palette = theme.color_palette
self.chart._compute_datashader_assets()
class Graph(BaseGraph):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "node_y"
data_x_axis = "node_x"
def format_source_data(self, dataframe):
"""
Description:
format source
-------------------------------------------
Input:
source_dict = {
'X': [],
'Y': []
}
-------------------------------------------
Ouput:
"""
if isinstance(dataframe, cudf.DataFrame):
self.nodes = dataframe
else:
self.nodes = dataframe.data
self.edges = dataframe.edges
if self.edges is not None:
# update connected_edges value for datashaded edges
self.connected_edges = calc_connected_edges(
self.nodes,
self.edges,
self.node_x,
self.node_y,
self.node_id,
self.edge_source,
self.edge_target,
self.edge_aggregate_col,
self.x_dtype,
self.y_dtype,
self.edge_render_type,
self.curve_params,
)
def generate_chart(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if not self.title:
self.title = "Graph"
impath = (
"https://raw.githubusercontent.com/rapidsai/cuxfilter/"
+ "branch-0.15/python/cuxfilter/charts/datashader/icons/graph.png"
)
self.inspect_neighbors = CustomInspectTool(
icon=load_image(impath),
_active=True,
description="Inspect Neighboring Edges",
)
# loading icon from a url
impath = (
"https://raw.githubusercontent.com/rapidsai/cuxfilter/"
+ "branch-0.15/python/cuxfilter/charts/datashader/icons/XPan.png"
)
self.display_edges = CustomInspectTool(
icon=load_image(impath), _active=True, description="Display Edges"
)
def cb(attr, old, new):
if not new:
self.chart.update_data(edges=self.connected_edges.head(0))
else:
self.chart.update_data(edges=self.connected_edges)
self.display_edges.on_change("_active", cb)
self.chart = InteractiveDatashaderGraph(
nodes_df=self.nodes,
edges_df=self.connected_edges,
node_x=self.node_x,
node_y=self.node_y,
node_aggregate_col=self.node_aggregate_col,
node_aggregate_fn=self.node_aggregate_fn,
node_color_palette=self.node_color_palette,
node_pixel_shade_type=self.node_pixel_shade_type,
tile_provider=self.tile_provider,
legend=self.legend,
legend_position=self.legend_position,
node_spread_threshold=self.node_pixel_density,
node_point_shape=self.node_point_shape,
node_max_px=self.node_point_size,
edge_source=self.node_x,
edge_target=self.node_y,
edge_color=self.edge_color_palette[0],
edge_transparency=self.edge_transparency,
inspect_neighbors=self.inspect_neighbors,
display_edges=self.display_edges,
unselected_alpha=self.unselected_alpha,
title=self.title,
)
def reload_chart(self, data, edges=None):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if data is not None:
if len(data) == 0:
data = cudf.DataFrame({k: cp.nan for k in self.nodes.columns})
# update connected_edges value for datashaded edges
# if display edge toggle is active
if self.display_edges._active:
self.connected_edges = calc_connected_edges(
data,
self.edges if edges is None else edges,
self.node_x,
self.node_y,
self.node_id,
self.edge_source,
self.edge_target,
self.edge_aggregate_col,
self.x_dtype,
self.y_dtype,
self.edge_render_type,
self.curve_params,
)
self.chart.update_data(data, self.connected_edges)
else:
self.chart.update_data(data)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
if not self.colors_set:
self.default_palette = theme.color_palette
self.chart.update_color_palette(theme.color_palette)
class Line(BaseLine):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "y"
data_x_axis = "x"
use_data_tiles = False
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def format_source_data(self, data):
"""
Description:
format source
-------------------------------------------
Input:
source_dict = {
'X': [],
'Y': []
}
-------------------------------------------
Ouput:
"""
self.source = data
self.x_range = (self.source[self.x].min(), self.source[self.x].max())
self.y_range = (self.source[self.y].min(), self.source[self.y].max())
if isinstance(data, dask_cudf.core.DataFrame):
self.x_range = dd.compute(*self.x_range)
self.y_range = dd.compute(*self.y_range)
def generate_chart(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if not self.title:
if self.x == self.y:
self.title = "Line plot for " + self.x
else:
self.title = "Line plot for (" + self.x + "," + self.y + ")"
self.chart = InteractiveDatashaderLine(
source_df=self.source,
x=self.x,
y=self.y,
color=self.color,
pixel_shade_type=self.pixel_shade_type,
unselected_alpha=self.unselected_alpha,
title=self.title,
)
def reload_chart(self, data):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if data is not None:
if len(data) == 0:
data = cudf.DataFrame({k: cp.nan for k in data.columns})
self.chart.update_data(data)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
if not self.color_set:
self.default_color = theme.chart_color
self.chart.color = theme.chart_color
class StackedLines(BaseStackedLine):
"""
Description:
"""
reset_event = events.Reset
data_y_axis = "y"
data_x_axis = "x"
def compute_legend(self, colors=None):
colors = colors or self.colors
if self.legend:
res = []
for i, val in enumerate(colors):
res.append((self.y[i], val))
return hv.NdOverlay(
{
k: hv.Curve(
self.source.head(1),
label=str(k),
kdims=[self.x],
vdims=[self.y[0]],
).opts(color=v)
for k, v in res
}
).opts(legend_position=self.legend_position)
return None
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def format_source_data(self, data):
"""
Description:
format source
-------------------------------------------
Input:
source_dict = {
'X': [],
'Y': []
}
-------------------------------------------
Ouput:
"""
self.source = data
if self.x_range is None:
self.x_range = (
self.source[self.x].min(),
self.source[self.x].max(),
)
if self.y_range is None:
# cudf_df[['a','b','c']].min().min() gives min value
# between all values in columns a,b and c
self.y_range = (
self.source[self.y].min().min(),
self.source[self.y].max().max(),
)
if isinstance(data, dask_cudf.core.DataFrame):
self.x_range = dd.compute(*self.x_range)
self.y_range = dd.compute(*self.y_range)
def generate_chart(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if not self.title:
self.title = "Stacked Line plots on x-axis: " + self.x
self.chart = InteractiveDatashaderMultiLine(
source_df=self.source,
x=self.x,
line_dims=self.y,
colors=self.colors,
legend=self.compute_legend(),
unselected_alpha=self.unselected_alpha,
title=self.title,
)
def reload_chart(self, data):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if data is not None:
if len(data) == 0:
data = cudf.DataFrame({k: cp.nan for k in data.columns})
self.chart.update_data(data)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
if not self.colors_set:
self.default_colors = [theme.chart_color]
self.chart.legend = self.compute_legend(self.default_colors)
self.chart.colors = self.default_colors
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/custom_extensions/graph_inspect_widget.py
|
from bokeh.core.properties import Bool, Nullable
from bokeh.models import Tool
from bokeh.util.compiler import TypeScript
TS_CODE = """
import {InspectTool, InspectToolView} from
"models/tools/inspectors/inspect_tool"
import * as p from "core/properties"
export class CustomInspectToolView extends InspectToolView {
declare model: CustomInspectTool
connect_signals(): void {
super.connect_signals()
this.on_change([this.model.properties.active], () => {
this.model._active = this.model.active
})
}
}
export namespace CustomInspectTool {
export type Attrs = p.AttrsOf<Props>
export type Props = InspectTool.Props & {
_active: p.Property<boolean>
}
}
export interface CustomInspectTool extends CustomInspectTool.Attrs {}
export class CustomInspectTool extends InspectTool {
declare properties: CustomInspectTool.Props
declare __view_type__: CustomInspectToolView
constructor(attrs?: Partial<CustomInspectTool.Attrs>) {
super(attrs)
}
static {
this.prototype.default_view = CustomInspectToolView
this.define<CustomInspectTool.Props>(({Boolean}) => ({
_active: [ Boolean, true ]
}))
this.register_alias("customInspect", () => new CustomInspectTool())
}
}
"""
class CustomInspectTool(Tool):
__implementation__ = TypeScript(TS_CODE)
_active = Nullable(Bool)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/custom_extensions/graph_assets.py
|
import cupy as cp
import cudf
from cuxfilter.assets import cudf_utils
import dask_cudf
from numba import cuda
from math import sqrt, ceil
from ....assets import datetime as dt
def cuda_args(shape):
"""
Compute the blocks-per-grid and threads-per-block parameters for use when
invoking cuda kernels
Parameters
----------
shape: int or tuple of ints
The shape of the input array that the kernel will parallelize over
Returns
-------
tuple
Tuple of (blocks_per_grid, threads_per_block)
"""
if isinstance(shape, int):
shape = (shape,)
max_threads = cuda.get_current_device().MAX_THREADS_PER_BLOCK
# Note: We divide max_threads by 2.0 to leave room for the registers
# occupied by the kernel. For some discussion, see
# https://github.com/numba/numba/issues/3798.
threads_per_block = int(ceil(max_threads / 2.0) ** (1.0 / len(shape)))
tpb = (threads_per_block,) * len(shape)
bpg = tuple(int(ceil(d / threads_per_block)) for d in shape)
return bpg, tpb
def bundle_edges(edges, src="src", dst="dst"):
# Create a duplicate table with:
# * all the [src, dst] in the upper half
# * all the [dst, src] pairs as the lower half, but flipped so dst->src,
# src->dst
edges["eid"] = edges.index
edges_duplicated = cudf.DataFrame(
{
"eid": cudf.concat([edges["eid"], edges["eid"]]),
# concat [src, dst] into the 'src' column
src: cudf.concat([edges[src], edges[dst]]),
# concat [dst, src] into the dst column
dst: cudf.concat([edges[dst], edges[src]]),
}
)
# Group the duplicated edgelist by [src, dst] and get the min edge id.
# Since all the [dst, src] pairs have been flipped to [src, dst], each
# edge with the same [src, dst] or [dst, src] vertices will be assigned
# the same bundle id
bundles = (
edges_duplicated.groupby([src, dst])
.agg({"eid": "min"})
.reset_index()
.rename(columns={"eid": "bid"}, copy=False)
)
# Join the bundle ids into the edgelist
edges = edges.merge(bundles, on=[src, dst], how="inner")
# Determine each bundle's size and relative offset
bundles = edges["bid"].sort_values()
lengths = bundles.value_counts(sort=False)
offsets = lengths.cumsum() - lengths
# Join the bundle segment lengths + offsets into the edgelist
edges = edges.merge(
cudf.DataFrame(
{
"start": offsets.reset_index(drop=True),
"count": lengths.reset_index(drop=True),
"bid": bundles.unique().reset_index(drop=True),
}
),
on="bid",
how="left",
)
# Determine each edge's index relative to its bundle
edges = edges.sort_values(by="bid").reset_index(drop=True)
edges["_index"] = (
cudf.core.index.RangeIndex(0, len(edges)) - edges["start"]
).astype("int32")
# Re-sort the edgelist by edge id and cleanup
edges = edges.sort_values("eid").reset_index(drop=True)
edges = edges.rename(columns={"eid": "id"}, copy=False)
return edges
@cuda.jit(device=True)
def bezier(start, end, control_point, steps, result):
for i in range(steps.shape[0]):
result[i] = (
start * (1 - steps[i]) ** 2
+ 2 * (1 - steps[i]) * steps[i] * control_point
+ end * steps[i] ** 2
)
@cuda.jit(device=True)
def add_aggregate_col(aggregate_col, steps, result):
for i in range(steps.shape[0]):
result[i] = aggregate_col
@cuda.jit
def compute_curves(nodes, control_points, result, steps):
start = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(start, nodes.shape[0], stride):
v1_x = nodes[i, 0]
v1_y = nodes[i, 1]
v2_x = nodes[i, 2]
v2_y = nodes[i, 3]
bezier(v1_x, v2_x, control_points[i, 0], steps, result[i, 0])
result[i, 0, -1] = cp.nan
bezier(v1_y, v2_y, control_points[i, 1], steps, result[i, 1])
result[i, 1, -1] = cp.nan
if nodes.shape[1] == 5:
add_aggregate_col(nodes[i, 4], steps, result[i, 2])
def control_point_compute_kernel(
x_src,
y_src,
count,
_index,
x_dst,
y_dst,
ctrl_point_x,
ctrl_point_y,
strokeWidth,
):
"""
GPU kernel to compute control points for each edge
"""
for i, (bcount, eindex) in enumerate(zip(count, _index)):
midp_x = (x_src[i] + x_dst[i]) * 0.5
midp_y = (y_src[i] + y_dst[i]) * 0.5
diff_x = x_dst[i] - x_src[i]
diff_y = y_dst[i] - y_src[i]
normalized_x = diff_x / sqrt(float(diff_x**2 + diff_y**2))
normalized_y = diff_y / sqrt(float(diff_x**2 + diff_y**2))
unit_x = -1 * normalized_y
unit_y = normalized_x
maxBundleSize = sqrt(float((diff_x**2 + diff_y**2))) * 0.15
direction = (1 - bcount % 2.0) + (-1) * bcount % 2.0
size = (maxBundleSize / strokeWidth) * (eindex / bcount)
if maxBundleSize < bcount * strokeWidth * 2.0:
size = strokeWidth * 2.0 * eindex
size += maxBundleSize
ctrl_point_x[i] = midp_x + (unit_x * size * direction)
ctrl_point_y[i] = midp_y + (unit_y * size * direction)
def curved_connect_edges(
edges, x, y, edge_source, edge_target, connected_edge_columns, curve_params
):
"""
edges: cudf DataFrame(x_src, y_src, x_dst, y_dst)
returns a cudf DataFrame of the form (
row1 -> x_src, y_src
row2 -> x_dst, y_dst
row3 -> nan, nan
...
) as the input to datashader.line
"""
bundled_edges = bundle_edges(
edges, src=edge_source, dst=edge_target
).rename(
columns={
f"{x}_src": "x_src",
f"{y}_src": "y_src",
f"{x}_dst": "x_dst",
f"{y}_dst": "y_dst",
}
)
curve_total_steps = curve_params.pop("curve_total_steps")
# if aggregate column exists, ignore it for bundled edges compute
fin_df_ = bundled_edges.apply_rows(
control_point_compute_kernel,
incols=["x_src", "y_src", "x_dst", "y_dst", "count", "_index"],
outcols=dict(ctrl_point_x=cp.float32, ctrl_point_y=cp.float32),
kwargs=curve_params,
)
shape = (
fin_df_.shape[0],
len(connected_edge_columns) - 2,
curve_total_steps + 1,
)
result = cp.zeros(shape=shape, dtype=cp.float32)
steps = cp.linspace(0, 1, curve_total_steps)
# Make sure no control points are added for rows with source==destination
fin_df_ = fin_df_.query(edge_source + "!=" + edge_target)
compute_curves[cuda_args(fin_df_.shape[0])](
fin_df_[["x_src", "y_src", "x_dst", "y_dst"]].to_cupy(),
fin_df_[["ctrl_point_x", "ctrl_point_y"]].to_cupy(),
result,
steps,
)
if len(connected_edge_columns) == 5:
return cudf.DataFrame(
{
x: result[:, 0].flatten(),
y: result[:, 1].flatten(),
connected_edge_columns[-1]: result[:, 2].flatten(),
}
).fillna(cp.nan)
else:
return cudf.DataFrame(
{x: result[:, 0].flatten(), y: result[:, 1].flatten()}
).fillna(cp.nan)
@cuda.jit
def connect_edges(edges, result):
start = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(start, edges.shape[0], stride):
result[i, 0, 0] = edges[i, 0]
result[i, 0, 1] = edges[i, 2]
result[i, 0, 2] = cp.nan
result[i, 1, 0] = edges[i, 1]
result[i, 1, 1] = edges[i, 3]
result[i, 1, 2] = cp.nan
if edges.shape[1] == 5:
result[i, 2, 0] = edges[i, 4]
result[i, 2, 1] = edges[i, 4]
result[i, 2, 2] = cp.nan
def directly_connect_edges(edges, x, y, edge_aggregate_col=None):
"""
edges: cudf DataFrame(x_src, y_src, x_dst, y_dst)
x: str, node x-coordinate column name
y: str, node y-coordinate column name
edge_aggregate_col: str, edge aggregate column name, if any
returns a cudf DataFrame of the form (
row1 -> x_src, y_src
row2 -> x_dst, y_dst
row3 -> nan, nan
...
) as the input to datashader.line
"""
# dask.distributed throws a not supported error when cudf.NA is used
edges[x] = cp.NAN
edges[y] = cp.NAN
src_columns = [f"{x}_src", f"{y}_src"]
dst_columns = [f"{x}_dst", f"{y}_dst"]
if edge_aggregate_col:
src_columns.append(edge_aggregate_col)
dst_columns.append(edge_aggregate_col)
# realign each src -> target row, as 3 rows:
# [[x_src, y_src], [x_dst, y_dst], [nan, nan]]
return cudf.concat(
[
edges[src_columns].rename(columns={f"{x}_src": x, f"{y}_src": y}),
edges[dst_columns].rename(columns={f"{x}_dst": x, f"{y}_dst": y}),
edges[[x, y]],
]
).sort_index()
def calc_connected_edges(
nodes,
edges,
node_x,
node_y,
node_id,
edge_source,
edge_target,
edge_aggregate_col,
node_x_dtype,
node_y_dtype,
edge_render_type="direct",
curve_params=None,
):
"""
calculate directly connected edges
nodes: cudf.DataFrame/dask_cudf.DataFrame
edges: cudf.DataFrame/dask_cudf.DataFrame
edge_type: direct/curved
"""
edges_columns = [
edge_source,
edge_target,
edge_aggregate_col,
node_x,
node_y,
]
connected_edge_columns = [
node_x + "_src",
node_y + "_src",
node_x + "_dst",
node_y + "_dst",
edge_aggregate_col,
]
# removing edge_aggregate_col if its None
if edge_aggregate_col is None:
edges_columns.remove(None)
connected_edge_columns.remove(None)
nodes = nodes[[node_id, node_x, node_y]].drop_duplicates()
nodes[node_x] = dt.to_int64_if_datetime(nodes[node_x], node_x_dtype)
nodes[node_y] = dt.to_int64_if_datetime(nodes[node_y], node_y_dtype)
connected_edges_df = (
edges.merge(nodes, left_on=edge_source, right_on=node_id)
.drop_duplicates(subset=[edge_source, edge_target])[edges_columns]
.reset_index(drop=True)
)
connected_edges_df = (
connected_edges_df.merge(
nodes,
left_on=edge_target,
right_on=node_id,
suffixes=("_src", "_dst"),
)
.drop_duplicates(subset=[edge_source, edge_target])
.reset_index(drop=True)
)
result = cudf.DataFrame()
def get_df_size(df):
if isinstance(df, dask_cudf.DataFrame):
return df.shape[0].compute()
return df.shape[0]
if get_df_size(connected_edges_df) > 1:
# shape=1 when the dataset has src == dst edges
if edge_render_type == "direct":
if isinstance(edges, dask_cudf.DataFrame):
result = (
connected_edges_df[connected_edge_columns]
.map_partitions(
directly_connect_edges,
node_x,
node_y,
edge_aggregate_col,
)
.persist()
)
# cull any empty partitions, since dask_cudf dataframe
# filtering may result in one
result = cudf_utils.cull_empty_partitions(result)
else:
result = directly_connect_edges(
connected_edges_df[connected_edge_columns],
node_x,
node_y,
edge_aggregate_col,
)
elif edge_render_type == "curved":
if isinstance(edges, dask_cudf.DataFrame):
raise NotImplementedError(
"curved edges not implemented for dask_cudf Dataframes"
)
result = curved_connect_edges(
connected_edges_df,
node_x,
node_y,
edge_source,
edge_target,
connected_edge_columns,
curve_params.copy(),
)
if get_df_size(result) == 0:
result = cudf.DataFrame({k: cp.nan for k in [node_x, node_y]})
if edge_aggregate_col is not None:
result[edge_aggregate_col] = cp.nan
return result
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/custom_extensions/__init__.py
|
from .graph_inspect_widget import CustomInspectTool
from .graph_assets import calc_connected_edges
from .holoviews_datashader import (
InteractiveDatashaderPoints,
InteractiveDatashaderLine,
InteractiveDatashaderMultiLine,
InteractiveDatashaderGraph,
)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/datashader/custom_extensions/holoviews_datashader.py
|
import cudf
import dask_cudf
import datashader as ds
import holoviews as hv
from holoviews.element.tiles import tile_sources
from holoviews.operation.datashader import (
SpreadingOperation,
datashade,
rasterize,
)
import numpy as np
import param
from . import CustomInspectTool
from datashader import transfer_functions as tf
from ...constants import CUXF_DEFAULT_COLOR_PALETTE
from ....assets.cudf_utils import get_min_max
import requests
from PIL import Image
from io import BytesIO
def load_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content))
def _rect_vertical_mask(px):
"""
Produce a vertical rectangle mask with truth
values in ``(2 * px + 1) * ((2 * px + 1)/2)``
"""
px = int(px)
w = 2 * px + 1
zero_bool = np.zeros((w, px), dtype="bool")
x_bool = np.ones((w, w - px), dtype="bool")
return np.concatenate((x_bool, zero_bool), axis=1)
def _rect_horizontal_mask(px):
"""
Produce a horizontal rectangle mask with truth
values in ``((2 * px + 1)/2) * (2 * px + 1)``
"""
px = int(px)
w = 2 * px + 1
zero_bool = np.zeros((px, w), dtype="bool")
x_bool = np.ones((w - px, w), dtype="bool")
return np.concatenate((x_bool, zero_bool), axis=0)
def _cross_mask(px):
"""
Produce a cross symbol mask with truth
values in ``((2 * px + 1)/2) * (2 * px + 1)``
"""
px = int(px)
w = 2 * px + 1
zero_bool = np.zeros((w, w), dtype="bool")
np.fill_diagonal(zero_bool, True)
np.fill_diagonal(np.fliplr(zero_bool), True)
return zero_bool
ds.transfer_functions._mask_lookup["rect_vertical"] = _rect_vertical_mask
ds.transfer_functions._mask_lookup["rect_horizontal"] = _rect_horizontal_mask
ds.transfer_functions._mask_lookup["cross"] = _cross_mask
class dynspread(SpreadingOperation):
"""
datashader has a pending change to support internally converting
cupy arrays to numpy(https://github.com/holoviz/datashader/pull/1015)
This class is a custom implmentation of
https://github.com/holoviz/holoviews/blob/master/holoviews/operation/datashader.py#L1660
to support the cupy array internal conversion until datashader merges the
changes
"""
max_px = param.Integer(
default=3,
doc="""
Maximum number of pixels to spread on all sides.""",
)
threshold = param.Number(
default=0.5,
bounds=(0, 1),
doc="""
When spreading, determines how far to spread.
Spreading starts at 1 pixel, and stops when the fraction
of adjacent non-empty pixels reaches this threshold.
Higher values give more spreading, up to the max_px
allowed.""",
)
shape = param.ObjectSelector(
default="circle",
objects=[
"circle",
"square",
"rect_vertical",
"rect_horizontal",
"cross",
],
)
def _apply_spreading(self, array):
return tf.dynspread(
array,
max_px=self.p.max_px,
threshold=self.p.threshold,
how=self.p.how,
shape=self.p.shape,
)
class InteractiveDatashaderBase(param.Parameterized):
tile_provider = param.String(None)
title = param.String("Interactive Datashader Chart")
box_stream = param.ClassSelector(
class_=hv.streams.SelectionXY, default=hv.streams.SelectionXY()
)
lasso_stream = param.ClassSelector(
class_=hv.streams.Lasso, default=hv.streams.Lasso()
)
reset_stream = param.ClassSelector(
class_=hv.streams.PlotReset,
default=hv.streams.PlotReset(resetting=False),
)
tools = param.List(
default=[
"pan",
"box_select",
"reset",
"lasso_select",
"wheel_zoom",
"save",
],
doc="interactive tools to add to the chart",
)
unselected_alpha = param.Number(
0.2,
bounds=(0, 1),
doc=("Transparency of the unselected points. "),
)
def __init__(self, **params):
"""
initialize InteractiveDatashaderBase object
"""
super(InteractiveDatashaderBase, self).__init__(**params)
self.tiles = (
tile_sources[self.tile_provider]()
if (self.tile_provider is not None)
else self.tile_provider
)
@property
def vdims(self):
if self.aggregate_col is None:
return [self.y]
return [self.y, self.aggregate_col]
def add_box_select_callback(self, callback_fn):
self.box_stream = hv.streams.SelectionXY(subscribers=[callback_fn])
def add_lasso_select_callback(self, callback_fn):
self.lasso_stream = hv.streams.Lasso(subscribers=[callback_fn])
def reset_all_selections(self):
self.lasso_stream.reset()
self.box_stream.reset()
def add_reset_event(self, callback_fn):
self.reset_stream = hv.streams.PlotReset(subscribers=[callback_fn])
class InteractiveDatashader(InteractiveDatashaderBase):
source_df = param.ClassSelector(
class_=(cudf.DataFrame, dask_cudf.DataFrame),
doc="source cuDF/dask_cuDF dataframe",
)
x = param.String("x")
y = param.String("y")
pixel_shade_type = param.String("linear")
spread_threshold = param.Number(
0, doc="threshold parameter passed to dynspread function"
)
class InteractiveDatashaderPoints(InteractiveDatashader):
aggregate_col = param.String(allow_None=True)
aggregate_fn = param.String("count")
legend = param.Boolean(True, doc="whether to display legends or not")
legend_position = param.String("right", doc="position of legend")
cmap = param.Dict(default={"cmap": CUXF_DEFAULT_COLOR_PALETTE})
tools = param.List(
default=[
"pan",
"reset",
"box_select",
"lasso_select",
"wheel_zoom",
"save",
],
doc="interactive tools to add to the chart",
)
color_palette = param.List()
point_shape = param.ObjectSelector(
default="circle",
objects=[
"circle",
"square",
"rect_vertical",
"rect_horizontal",
"cross",
],
)
max_px = param.Integer(10)
clims = param.Tuple(default=(None, None))
def __init__(self, **params):
super(InteractiveDatashaderPoints, self).__init__(**params)
self._compute_datashader_assets()
def _compute_clims(self):
if not isinstance(
self.source_df[self.aggregate_col].dtype,
cudf.core.dtypes.CategoricalDtype,
):
self.clims = get_min_max(self.source_df, self.aggregate_col)
def _compute_datashader_assets(self):
self.aggregator = None
self.cmap = {"cmap": self.color_palette}
if isinstance(
self.source_df[self.aggregate_col].dtype,
cudf.core.dtypes.CategoricalDtype,
):
self.cmap = {
"color_key": {
k: v
for k, v in zip(
list(
self.source_df[
self.aggregate_col
].cat.categories.to_pandas()
),
self.color_palette,
)
}
}
if self.aggregate_fn:
self.aggregator = getattr(ds, self.aggregate_fn)(
self.aggregate_col
)
self._compute_clims()
def update_data(self, data):
self.source_df = data
self._compute_clims()
@param.depends("source_df")
def points(self, **kwargs):
return hv.Scatter(
self.source_df, kdims=[self.x], vdims=self.vdims
).opts(tools=[], default_tools=[])
def get_base_chart(self):
return dynspread(
rasterize(self.points()).opts(
cnorm=self.pixel_shade_type,
**self.cmap,
nodata=0,
alpha=self.unselected_alpha,
tools=[],
default_tools=[],
),
threshold=self.spread_threshold,
shape=self.point_shape,
max_px=self.max_px,
)
def get_chart(self, streams=[]):
dmap = rasterize(
hv.DynamicMap(self.points, streams=streams),
aggregator=self.aggregator,
).opts(
cnorm=self.pixel_shade_type,
**self.cmap,
colorbar=self.legend,
nodata=0,
alpha=1,
colorbar_position=self.legend_position,
tools=[],
default_tools=[],
)
if self.aggregate_fn != "count":
dmap = dmap.opts(clim=self.clims)
return dmap
def view(self):
dmap = dynspread(
self.get_chart(
streams=[
self.box_stream,
self.lasso_stream,
self.reset_stream,
]
),
threshold=self.spread_threshold,
shape=self.point_shape,
max_px=self.max_px,
).opts(
xaxis=None,
yaxis=None,
responsive=True,
tools=self.tools,
active_tools=["wheel_zoom", "pan"],
)
if self.unselected_alpha > 0:
dmap *= self.get_base_chart()
return (self.tiles * dmap if self.tiles is not None else dmap).relabel(
self.title
)
class InteractiveDatashaderLine(InteractiveDatashader):
color = param.String()
transparency = param.Number(0, bounds=(0, 1))
tools = param.List(
default=[
"pan",
"reset",
"box_select",
"lasso_select",
"wheel_zoom",
"xbox_select",
"save",
],
doc="interactive tools to add to the chart",
)
def __init__(self, **params):
super(InteractiveDatashaderLine, self).__init__(**params)
def update_data(self, data):
self.source_df = data
@param.depends("source_df")
def line(self, **kwargs):
return hv.Curve(self.source_df, kdims=[self.x], vdims=[self.y]).opts(
tools=[], default_tools=[]
)
def get_base_chart(self):
return dynspread(
rasterize(self.line()).opts(
cmap=[self.color],
alpha=self.unselected_alpha,
tools=[],
default_tools=[],
)
).opts(
responsive=True,
tools=self.tools,
active_tools=["wheel_zoom", "pan"],
default_tools=[],
)
def get_chart(self, streams=[]):
return rasterize(hv.DynamicMap(self.line, streams=streams)).opts(
cmap=[self.color], tools=[], default_tools=[]
)
def view(self):
dmap = dynspread(
self.get_chart(
streams=[
self.box_stream,
self.lasso_stream,
self.reset_stream,
]
)
).opts(
responsive=True,
tools=self.tools,
active_tools=["wheel_zoom", "pan"],
default_tools=[],
)
if self.unselected_alpha > 0:
dmap *= self.get_base_chart()
return (self.tiles * dmap if self.tiles is not None else dmap).relabel(
self.title
)
class InteractiveDatashaderMultiLine(InteractiveDatashader):
colors = param.List(default=[])
transparency = param.Number(0, bounds=(0, 1))
line_dims = param.List(
default=[],
doc=(
"list of dimensions of lines to be rendered"
+ "against a common x-column"
),
)
tools = param.List(
default=[
"pan",
"reset",
"xbox_select",
"wheel_zoom",
"xwheel_zoom",
"save",
],
doc="interactive tools to add to the chart",
)
legend = param.ClassSelector(
class_=hv.NdOverlay,
doc="legend to be added on top of the multi-line chart",
default=None,
)
def __init__(self, **params):
super(InteractiveDatashaderMultiLine, self).__init__(**params)
def update_data(self, data):
self.source_df = data
def add_box_select_callback(self, callback_fn):
self.box_stream = hv.streams.SelectionXY(subscribers=[callback_fn])
@param.depends("source_df")
def lines(self, **kwargs):
return hv.NdOverlay(
{
_y: hv.Curve(
self.source_df[[self.x, _y]].rename(columns={_y: "y"})
)
for i, _y in enumerate(self.line_dims)
},
kdims="k",
)
def get_base_chart(self):
return dynspread(
datashade(
self.lines(),
aggregator=ds.count_cat("k"),
color_key=self.colors,
).opts(alpha=self.unselected_alpha, tools=[], default_tools=[])
)
def get_chart(self, streams=[]):
return datashade(
hv.DynamicMap(self.lines, streams=streams),
aggregator=ds.count_cat("k"),
color_key=self.colors,
).opts(tools=[], default_tools=[])
def view(self):
dmap = dynspread(
self.get_chart(streams=[self.box_stream, self.reset_stream])
).opts(
responsive=True,
tools=self.tools,
active_tools=["xbox_select"],
default_tools=[],
)
if self.legend:
dmap *= self.legend
if self.unselected_alpha > 0:
dmap *= self.get_base_chart()
return (self.tiles * dmap if self.tiles is not None else dmap).relabel(
self.title
)
class InteractiveDatashaderGraph(InteractiveDatashaderBase):
nodes_df = param.ClassSelector(
class_=(cudf.DataFrame, dask_cudf.DataFrame),
doc="nodes cuDF/dask_cuDF dataframe",
)
edges_df = param.ClassSelector(
class_=(cudf.DataFrame, dask_cudf.DataFrame),
doc="edges cuDF/dask_cuDF dataframe",
)
node_x = param.String("x")
node_y = param.String("y")
node_pixel_shade_type = param.String("linear")
node_spread_threshold = param.Number(
0, doc="threshold parameter passed to dynspread function"
)
tile_provider = param.String(None)
node_aggregate_col = param.String(allow_None=True)
node_aggregate_fn = param.String("count")
legend = param.Boolean(True, doc="whether to display legends or not")
legend_position = param.String("right", doc="position of legend")
node_cmap = param.Dict(default={"cmap": CUXF_DEFAULT_COLOR_PALETTE})
tools = param.List(
default=[
"pan",
"reset",
"box_select",
"lasso_select",
"wheel_zoom",
"save",
],
doc="interactive tools to add to the chart",
)
node_color_palette = param.List()
node_point_shape = param.ObjectSelector(
default="circle",
objects=[
"circle",
"square",
"rect_vertical",
"rect_horizontal",
"cross",
],
)
node_max_px = param.Integer(10)
node_clims = param.Tuple(default=(None, None))
edge_color = param.String()
edge_source = param.String("src")
edge_target = param.String("dst")
edge_transparency = param.Number(0, bounds=(0, 1))
inspect_neighbors = param.ClassSelector(
class_=CustomInspectTool,
doc="tool to assign selection mechanism(inspect neighbors or default)",
)
display_edges = param.ClassSelector(
class_=CustomInspectTool,
doc="tool to select whether to display edges or not",
)
@property
def df_type(self):
if type(self.nodes_df) == type(self.edges_df): # noqa: E721
return type(self.nodes_df)
raise TypeError("nodes and edges must be of the same type")
def update_color_palette(self, value):
self.node_color_palette = value
self.nodes_chart.color_palette = value
def __init__(self, **params):
super(InteractiveDatashaderGraph, self).__init__(**params)
self.tiles = (
tile_sources[self.tile_provider]()
if (self.tile_provider is not None)
else self.tile_provider
)
self.nodes_chart = InteractiveDatashaderPoints(
source_df=self.nodes_df,
x=self.node_x,
y=self.node_y,
aggregate_col=self.node_aggregate_col,
aggregate_fn=self.node_aggregate_fn,
color_palette=self.node_color_palette,
pixel_shade_type=self.node_pixel_shade_type,
tile_provider=self.tile_provider,
legend=self.legend,
legend_position=self.legend_position,
spread_threshold=self.node_spread_threshold,
point_shape=self.node_point_shape,
max_px=self.node_max_px,
)
self.edges_chart = InteractiveDatashaderLine(
source_df=self.edges_df,
x=self.edge_source,
y=self.edge_target,
color=self.edge_color,
transparency=self.edge_transparency,
)
def update_data(self, nodes=None, edges=None):
if nodes is not None:
self.nodes_chart.update_data(nodes)
if edges is not None:
self.edges_chart.update_data(edges)
def view(self):
def set_tools(plot, element):
if plot.state.toolbar.tools[-1] != self.display_edges:
# if self.df_type != dask_cudf.DataFrame:
# # no interactions(yet) with dask_cudf backed graph charts
plot.state.add_tools(self.inspect_neighbors)
plot.state.add_tools(self.display_edges)
dmap_nodes = dynspread(
self.nodes_chart.get_chart(
streams=[
self.box_stream,
self.lasso_stream,
self.reset_stream,
]
),
threshold=self.node_spread_threshold,
shape=self.node_point_shape,
max_px=self.node_max_px,
).opts(
xaxis=None,
yaxis=None,
responsive=True,
default_tools=[],
active_tools=["wheel_zoom", "pan"],
tools=self.tools,
hooks=[set_tools],
)
dmap_edges = dynspread(
self.edges_chart.get_chart().opts(default_tools=[])
)
dmap_graph = dmap_edges * dmap_nodes
if self.unselected_alpha > 0:
dmap_graph *= self.nodes_chart.get_base_chart()
return (
self.tiles * dmap_graph if self.tiles is not None else dmap_graph
).relabel(self.title)
| 0 |
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts
|
rapidsai_public_repos/cuxfilter/python/cuxfilter/charts/deckgl/__init__.py
|
# from .bindings import PolygonDeckGL, TS_CODE
from .deckgl import choropleth
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.