repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cuxfilter/docs/source
rapidsai_public_repos/cuxfilter/docs/source/api_reference/dataframe.rst
API Reference ============= The two main components to cuxfilter are `DataFrame` for connecting the dashboard to a cuDF backed dataframe, and `Dashboard` for setting dashboard options. .. currentmodule:: cuxfilter.dataframe DataFrame --------- .. autoclass:: DataFrame :members: .. currentmodule:: cuxfilter.dashboard DashBoard --------- .. autoclass:: DashBoard :members:
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/test_python.sh
#!/bin/bash # Copyright (c) 2022-2023, NVIDIA CORPORATION. set -euo pipefail . /opt/conda/etc/profile.d/conda.sh rapids-logger "Generate Python testing dependencies" rapids-dependency-file-generator \ --output conda \ --file_key test_python \ --matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml rapids-mamba-retry env create --force -f env.yaml -n test # Temporarily allow unbound variables for conda activation. set +u conda activate test set -u rapids-logger "Downloading artifacts from previous jobs" PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python) RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"} RAPIDS_COVERAGE_DIR=${RAPIDS_COVERAGE_DIR:-"${PWD}/coverage-results"} mkdir -p "${RAPIDS_TESTS_DIR}" "${RAPIDS_COVERAGE_DIR}" rapids-print-env rapids-mamba-retry install \ --channel "${PYTHON_CHANNEL}" \ cuxfilter rapids-logger "Check GPU usage" nvidia-smi EXITCODE=0 trap "EXITCODE=1" ERR set +e rapids-logger "pytest cuxfilter" pushd python/ pytest \ --cache-clear \ --junitxml="${RAPIDS_TESTS_DIR}/junit-cuxfilter.xml" \ --numprocesses=8 \ --cov-config=.coveragerc \ --cov=cuxfilter \ --cov-report=xml:"${RAPIDS_COVERAGE_DIR}/cuxfilter-coverage.xml" \ --cov-report=term popd rapids-logger "Test script exiting with value: $EXITCODE" exit ${EXITCODE}
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/test_external.sh
#!/bin/bash # Copyright (c) 2023, NVIDIA CORPORATION. set -e rapids-logger "Create test_external conda environment" . /opt/conda/etc/profile.d/conda.sh # Install external dependencies into test_external conda environment rapids-mamba-retry env update -f ./ci/utils/external_dependencies.yaml conda activate test_external # Define input parameter PROJECT=$1 PR_NUMBER=$2 LIBRARIES=("datashader" "holoviews") # Change directory to /tmp pushd /tmp # Clone the specified Python libraries if [ "$PROJECT" = "all" ]; then # Loop through each library and install dependencies for LIBRARY in "${LIBRARIES[@]}" do rapids-logger "Clone $LIBRARY" # Clone the repository git clone https://github.com/holoviz/$LIBRARY.git rapids-logger "Install $LIBRARY" # Change directory to the library pushd $LIBRARY # Run setup.py with test dependencies python -m pip install -e .[tests] popd done else rapids-logger "Clone $PROJECT" git clone https://github.com/pyviz/$PROJECT.git # Check if PR_NUMBER is a non-empty, valid number if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" -eq "$PR_NUMBER" ] 2>/dev/null; then rapids-logger "checkout PR $PR_NUMBER" # Fetch the pull request and check it out git fetch origin pull/$PR_NUMBER/head:pr/$PR_NUMBER git checkout pr/$PR_NUMBER fi rapids-logger "Install $PROJECT" # Change directory to the specified project pushd $PROJECT # Run setup.py with test dependencies python -m pip install -e .[tests] popd fi FILES="" # Install and run tests if [ "$PROJECT" = "all" ]; then # Loop through each library and install dependencies for LIBRARY in "${LIBRARIES[@]}" do rapids-logger "gathering GPU tests for $LIBRARY" TEST_DIR="$LIBRARY/$LIBRARY/tests" # Find all Python scripts containing the keywords cudf or dask_cudf except test_quadmesh.py FILES+=" $(grep -l -R -e 'cudf' --include='*.py' "$TEST_DIR" | grep -v test_quadmesh.py)" done else rapids-logger "gathering GPU tests for $PROJECT" TEST_DIR="$PROJECT/$PROJECT/tests" # Find all Python scripts containing the keywords cudf or dask_cudf FILES+=$(grep -l -R -e 'cudf' --include='*.py' "$TEST_DIR") fi EXITCODE=0 trap "EXITCODE=1" ERR set +e rapids-logger "running all gathered tests" DATASHADER_TEST_GPU=1 pytest --numprocesses=8 $FILES if [[ "$PROJECT" = "all" ]] || [[ "$PROJECT" = "datashader" ]]; then # run test_quadmesh.py separately as dask.array tests fail with numprocesses rapids-logger "running test_quadmesh.py" DATASHADER_TEST_GPU=1 pytest datashader/datashader/tests/test_quadmesh.py fi rapids-logger "Test script exiting with value: $EXITCODE" exit ${EXITCODE}
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/build_python.sh
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. set -euo pipefail source rapids-env-update rapids-print-env package_name="cuxfilter" package_dir="python" version=$(rapids-generate-version) commit=$(git rev-parse HEAD) echo "${version}" > VERSION sed -i "/^__git_commit__/ s/= .*/= \"${commit}\"/g" "${package_dir}/${package_name}/_version.py" rapids-logger "Begin py build" # TODO: Remove `--no-test` flag once importing on a CPU # node works correctly RAPIDS_PACKAGE_VERSION=${version} rapids-conda-retry mambabuild \ --no-test \ conda/recipes/cuxfilter rapids-upload-conda-to-s3 python
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/build_wheel.sh
#!/bin/bash # Copyright (c) 2023, NVIDIA CORPORATION. set -euo pipefail package_name="cuxfilter" package_dir="python" source rapids-configure-sccache source rapids-date-string version=$(rapids-generate-version) commit=$(git rev-parse HEAD) RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" # This is the version of the suffix with a preceding hyphen. It's used # everywhere except in the final wheel name. PACKAGE_CUDA_SUFFIX="-${RAPIDS_PY_CUDA_SUFFIX}" # Patch project metadata files to include the CUDA version suffix and version override. pyproject_file="${package_dir}/pyproject.toml" version_file="${package_dir}/${package_name}/_version.py" sed -i "s/name = \"${package_name}\"/name = \"${package_name}${PACKAGE_CUDA_SUFFIX}\"/g" ${pyproject_file} echo "${version}" > VERSION sed -i "/^__git_commit__/ s/= .*/= \"${commit}\"/g" ${version_file} # For nightlies we want to ensure that we're pulling in alphas as well. The # easiest way to do so is to augment the spec with a constraint containing a # min alpha version that doesn't affect the version bounds but does allow usage # of alpha versions for that dependency without --pre alpha_spec='' if ! rapids-is-release-build; then alpha_spec=',>=0.0.0a0' fi sed -r -i "s/cudf==(.*)\"/cudf${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file} sed -r -i "s/dask_cudf==(.*)\"/dask_cudf${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file} sed -r -i "s/cuspatial==(.*)\"/cuspatial${PACKAGE_CUDA_SUFFIX}==\1${alpha_spec}\"/g" ${pyproject_file} if [[ $PACKAGE_CUDA_SUFFIX == "-cu12" ]]; then sed -i "s/cupy-cuda11x/cupy-cuda12x/g" ${pyproject_file} fi cd "${package_dir}" python -m pip wheel . -w dist -vvv --no-deps --disable-pip-version-check RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-upload-wheels-to-s3 dist
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/check_style.sh
#!/bin/bash # Copyright (c) 2020-2022, NVIDIA CORPORATION. set -euo pipefail rapids-logger "Create checks conda environment" . /opt/conda/etc/profile.d/conda.sh rapids-dependency-file-generator \ --output conda \ --file_key checks \ --matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml rapids-mamba-retry env create --force -f env.yaml -n checks conda activate checks # Run pre-commit checks pre-commit run --hook-stage manual --all-files --show-diff-on-failure
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/test_wheel.sh
#!/bin/bash # Copyright (c) 2023, NVIDIA CORPORATION. set -eou pipefail RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" RAPIDS_PY_WHEEL_NAME="cuxfilter_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist # echo to expand wildcard before adding `[extra]` requires for pip python -m pip install $(echo ./dist/cuxfilter*.whl)[test] python -m pytest -n 8 ./python/cuxfilter/tests
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/test_notebooks.sh
#!/bin/bash # Copyright (c) 2020-2023, NVIDIA CORPORATION. set -euo pipefail . /opt/conda/etc/profile.d/conda.sh rapids-logger "Generate notebook testing dependencies" rapids-dependency-file-generator \ --output conda \ --file_key test_notebooks \ --matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml rapids-mamba-retry env create --force -f env.yaml -n test # Temporarily allow unbound variables for conda activation. set +u conda activate test set -u rapids-print-env rapids-logger "Downloading artifacts from previous jobs" PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python) rapids-mamba-retry install \ --channel "${PYTHON_CHANNEL}" \ cuxfilter NBTEST="$(realpath "$(dirname "$0")/utils/nbtest.sh")" pushd notebooks # Add notebooks that should be skipped here # (space-separated list of filenames without paths) SKIPNBS="" EXITCODE=0 trap "EXITCODE=1" ERR set +e for nb in $(find . -name "*.ipynb"); do nbBasename=$(basename ${nb}) # Skip all notebooks that use dask (in the code or even in their name) if ((echo ${nb} | grep -qi dask) || \ (grep -q dask ${nb})); then echo "--------------------------------------------------------------------------------" echo "SKIPPING: ${nb} (suspected Dask usage, not currently automatable)" echo "--------------------------------------------------------------------------------" elif (echo " ${SKIPNBS} " | grep -q " ${nbBasename} "); then echo "--------------------------------------------------------------------------------" echo "SKIPPING: ${nb} (listed in skip list)" echo "--------------------------------------------------------------------------------" else nvidia-smi ${NBTEST} ${nbBasename} fi done rapids-logger "Test script exiting with value: $EXITCODE" exit ${EXITCODE}
0
rapidsai_public_repos/cuxfilter
rapidsai_public_repos/cuxfilter/ci/build_docs.sh
#!/bin/bash set -euo pipefail rapids-logger "Create test conda environment" . /opt/conda/etc/profile.d/conda.sh rapids-dependency-file-generator \ --output conda \ --file_key docs \ --matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml rapids-mamba-retry env create --force -f env.yaml -n docs conda activate docs rapids-print-env rapids-logger "Downloading artifacts from previous jobs" PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python) rapids-mamba-retry install \ --channel "${PYTHON_CHANNEL}" \ cuxfilter export RAPIDS_VERSION_NUMBER="23.12" export RAPIDS_DOCS_DIR="$(mktemp -d)" rapids-logger "Build Python docs" pushd docs sphinx-build -b dirhtml ./source _html sphinx-build -b text ./source _text mkdir -p "${RAPIDS_DOCS_DIR}/cuxfilter/"{html,txt} mv _html/* "${RAPIDS_DOCS_DIR}/cuxfilter/html" mv _text/* "${RAPIDS_DOCS_DIR}/cuxfilter/txt" popd rapids-upload-docs
0
rapidsai_public_repos/cuxfilter/ci
rapidsai_public_repos/cuxfilter/ci/release/update-version.sh
#!/bin/bash # Copyright (c) 2020-2023, NVIDIA CORPORATION. ############################# # cuxfilter Version Updater # ############################# ## Usage # bash update-version.sh <new_version> # Format is YY.MM.PP - no leading 'v' or trailing 'a' NEXT_FULL_TAG=$1 # Get current version CURRENT_TAG=$(git tag --merged HEAD | grep -xE '^v.*' | sort --version-sort | tail -n 1 | tr -d 'v') CURRENT_MAJOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[1]}') CURRENT_MINOR=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[2]}') CURRENT_PATCH=$(echo $CURRENT_TAG | awk '{split($0, a, "."); print a[3]}') CURRENT_SHORT_TAG=${CURRENT_MAJOR}.${CURRENT_MINOR} #Get <major>.<minor> for next version NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}') NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}') NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR} # Need to distutils-normalize the original version NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; print(packaging.version.Version('${NEXT_SHORT_TAG}'))") echo "Preparing release $CURRENT_TAG => $NEXT_FULL_TAG" # Inplace sed replace; workaround for Linux and Mac function sed_runner() { sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak } # RTD update sed_runner 's/version = .*/version = '"'${NEXT_SHORT_TAG}'"'/g' docs/source/conf.py sed_runner 's/release = .*/release = '"'${NEXT_FULL_TAG}'"'/g' docs/source/conf.py # docs update sed_runner "/cuxfilter=[0-9]\{2\}.[0-9]\{2\}/ s/=[0-9]\{2\}.[0-9]\{2\}/=${NEXT_SHORT_TAG}/g" docs/source/user_guide/installation.rst # Centralized version file update echo "${NEXT_FULL_TAG}" > VERSION DEPENDENCIES=( cudf cuxfilter dask-cuda dask-cudf cugraph cuspatial ) for DEP in "${DEPENDENCIES[@]}"; do for FILE in dependencies.yaml conda/environments/*.yaml ci/utils/external_dependencies.yaml; do sed_runner "/-.* ${DEP}==/ s/==.*/==${NEXT_SHORT_TAG_PEP440}.*/g" ${FILE} done for FILE in python/pyproject.toml; do sed_runner "/\"${DEP}==/ s/==.*\"/==${NEXT_SHORT_TAG_PEP440}.*\"/g" ${FILE} done done # README.md update sed_runner "/version == / s/== .*\`/== ${NEXT_SHORT_TAG}\`/g" README.md sed_runner "/cuxfilter=[0-9]\{2\}.[0-9]\{2\}/ s/=[0-9]\{2\}.[0-9]\{2\}/=${NEXT_SHORT_TAG}/g" README.md # CI files for FILE in .github/workflows/*.yaml; do sed_runner "/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" ${FILE}; done sed_runner "s/RAPIDS_VERSION_NUMBER=\".*/RAPIDS_VERSION_NUMBER=\"${NEXT_SHORT_TAG}\"/g" ci/build_docs.sh sed_runner "s/RAPIDS_VERSION=.*/RAPIDS_VERSION=${NEXT_SHORT_TAG}.*/g" ci/test_external.sh
0
rapidsai_public_repos/cuxfilter/ci
rapidsai_public_repos/cuxfilter/ci/utils/external_dependencies.yaml
name: test_external channels: - rapidsai-nightly - conda-forge - nvidia dependencies: - cudf==23.12.* - dask-cudf==23.12.* - cuxfilter==23.12.* - cuda-version=12.0 - python=3.10 - tensorflow - xarray-spatial - pycaret - graphistry - dash - dask-sql - pytest-benchmark
0
rapidsai_public_repos/cuxfilter/ci
rapidsai_public_repos/cuxfilter/ci/utils/nbtest.sh
#!/bin/bash MAGIC_OVERRIDE_CODE=" def my_run_line_magic(*args, **kwargs): g=globals() l={} for a in args: try: exec(str(a),g,l) except Exception as e: print('WARNING: %s\n While executing this magic function code:\n%s\n continuing...\n' % (e, a)) else: g.update(l) def my_run_cell_magic(*args, **kwargs): my_run_line_magic(*args, **kwargs) get_ipython().run_line_magic=my_run_line_magic get_ipython().run_cell_magic=my_run_cell_magic " NO_COLORS=--colors=NoColor EXITCODE=0 NBTMPDIR="$WORKSPACE/tmp" mkdir -p ${NBTMPDIR} for nb in $*; do NBFILENAME=$1 NBNAME=${NBFILENAME%.*} NBNAME=${NBNAME##*/} NBTESTSCRIPT=${NBTMPDIR}/${NBNAME}-test.py shift echo -------------------------------------------------------------------------------- echo STARTING: ${NBNAME} echo -------------------------------------------------------------------------------- jupyter nbconvert --to script ${NBFILENAME} --output ${NBTMPDIR}/${NBNAME}-test echo "${MAGIC_OVERRIDE_CODE}" > ${NBTMPDIR}/tmpfile cat ${NBTESTSCRIPT} >> ${NBTMPDIR}/tmpfile mv ${NBTMPDIR}/tmpfile ${NBTESTSCRIPT} echo "Running \"ipython ${NO_COLORS} ${NBTESTSCRIPT}\" on $(date)" echo time bash -c "ipython ${NO_COLORS} ${NBTESTSCRIPT}; EC=\$?; echo -------------------------------------------------------------------------------- ; echo DONE: ${NBNAME}; exit \$EC" NBEXITCODE=$? echo EXIT CODE: ${NBEXITCODE} echo EXITCODE=$((EXITCODE | ${NBEXITCODE})) done exit ${EXITCODE}
0
rapidsai_public_repos/cuxfilter/ci
rapidsai_public_repos/cuxfilter/ci/utils/wheel_smoke_test.py
import cuxfilter if __name__ == '__main__': assert cuxfilter.__version__ is not None
0
rapidsai_public_repos/cuxfilter/ci
rapidsai_public_repos/cuxfilter/ci/utils/nbtestlog2junitxml.py
# Generate a junit-xml file from parsing a nbtest log import re from xml.etree.ElementTree import Element, ElementTree from os import path import string from enum import Enum startingPatt = re.compile("^STARTING: ([\w\.\-]+)$") skippingPatt = re.compile("^SKIPPING: ([\w\.\-]+)\s*(\(([\w\.\-\ \,]+)\))?\s*$") exitCodePatt = re.compile("^EXIT CODE: (\d+)$") folderPatt = re.compile("^FOLDER: ([\w\.\-]+)$") timePatt = re.compile("^real\s+([\d\.ms]+)$") linePatt = re.compile("^" + ("-" * 80) + "$") def getFileBaseName(filePathName): return path.splitext(path.basename(filePathName))[0] def makeTestCaseElement(attrDict): return Element("testcase", attrib=attrDict) def makeSystemOutElement(outputLines): e = Element("system-out") e.text = "".join(filter(lambda c: c in string.printable, outputLines)) return e def makeFailureElement(outputLines): e = Element("failure", message="failed") e.text = "".join(filter(lambda c: c in string.printable, outputLines)) return e def setFileNameAttr(attrDict, fileName): attrDict.update(file=fileName, classname="", line="", name="", time="" ) def setClassNameAttr(attrDict, className): attrDict["classname"] = className def setTestNameAttr(attrDict, testName): attrDict["name"] = testName def setTimeAttr(attrDict, timeVal): (mins, seconds) = timeVal.split("m") seconds = float(seconds.strip("s")) + (60 * int(mins)) attrDict["time"] = str(seconds) def incrNumAttr(element, attr): newVal = int(element.attrib.get(attr)) + 1 element.attrib[attr] = str(newVal) def parseLog(logFile, testSuiteElement): # Example attrs: # errors="0" failures="0" hostname="a437d6835edf" name="pytest" skipped="2" tests="6" time="6.174" timestamp="2019-11-18T19:49:47.946307" with open(logFile) as lf: testSuiteElement.attrib["tests"] = "0" testSuiteElement.attrib["errors"] = "0" testSuiteElement.attrib["failures"] = "0" testSuiteElement.attrib["skipped"] = "0" testSuiteElement.attrib["time"] = "0" testSuiteElement.attrib["timestamp"] = "" attrDict = {} #setFileNameAttr(attrDict, logFile) setFileNameAttr(attrDict, "nbtest") parserStateEnum = Enum("parserStateEnum", "newTest startingLine finishLine exitCode") parserState = parserStateEnum.newTest testOutput = "" for line in lf.readlines(): if parserState == parserStateEnum.newTest: m = folderPatt.match(line) if m: setClassNameAttr(attrDict, m.group(1)) continue m = skippingPatt.match(line) if m: setTestNameAttr(attrDict, getFileBaseName(m.group(1))) setTimeAttr(attrDict, "0m0s") skippedElement = makeTestCaseElement(attrDict) message = m.group(3) or "" skippedElement.append(Element("skipped", message=message, type="")) testSuiteElement.append(skippedElement) incrNumAttr(testSuiteElement, "skipped") incrNumAttr(testSuiteElement, "tests") continue m = startingPatt.match(line) if m: parserState = parserStateEnum.startingLine testOutput = "" setTestNameAttr(attrDict, m.group(1)) setTimeAttr(attrDict, "0m0s") continue continue elif parserState == parserStateEnum.startingLine: if linePatt.match(line): parserState = parserStateEnum.finishLine testOutput = "" continue elif parserState == parserStateEnum.finishLine: if linePatt.match(line): parserState = parserStateEnum.exitCode else: testOutput += line continue elif parserState == parserStateEnum.exitCode: m = exitCodePatt.match(line) if m: testCaseElement = makeTestCaseElement(attrDict) if m.group(1) != "0": failureElement = makeFailureElement(testOutput) testCaseElement.append(failureElement) incrNumAttr(testSuiteElement, "failures") else: systemOutElement = makeSystemOutElement(testOutput) testCaseElement.append(systemOutElement) testSuiteElement.append(testCaseElement) parserState = parserStateEnum.newTest testOutput = "" incrNumAttr(testSuiteElement, "tests") continue m = timePatt.match(line) if m: setTimeAttr(attrDict, m.group(1)) continue continue if __name__ == "__main__": import sys testSuitesElement = Element("testsuites") testSuiteElement = Element("testsuite", name="nbtest", hostname="") parseLog(sys.argv[1], testSuiteElement) testSuitesElement.append(testSuiteElement) ElementTree(testSuitesElement).write(sys.argv[1]+".xml", xml_declaration=True)
0
rapidsai_public_repos
rapidsai_public_repos/shared-workflows/renovate.json
{ "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": ["config:base"] }
0
rapidsai_public_repos
rapidsai_public_repos/shared-workflows/README.md
# shared-workflows ## Introduction This repository contains [reusable GitHub Action workflows](https://docs.github.com/en/actions/using-workflows/reusing-workflows) and [composite actions](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action). See the articles below for a comparison between these two types of reusable GitHub Action components: - https://wallis.dev/blog/composite-github-actions - https://dev.to/n3wt0n/composite-actions-vs-reusable-workflows-what-is-the-difference-github-actions-11kd ## Folder Structure Reusable workflows must be placed in the `.github/workflows` directory as mentioned in the community discussions below: - https://github.com/community/community/discussions/10773 - https://github.com/community/community/discussions/9050 Composite actions can be placed in any arbitrary repository location. The convention adopted for this repository is to place composite actions in the root of this repository. For more information on any particular composite action, see the `README.md` file in its respective folder.
0
rapidsai_public_repos
rapidsai_public_repos/shared-workflows/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2022 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
rapidsai_public_repos/shared-workflows/ci
rapidsai_public_repos/shared-workflows/ci/release/update-version.sh
#!/bin/bash # Copyright (c) 2019-2023, NVIDIA CORPORATION. ########################################### # shared-workflows Version Updater # ########################################### ## Usage # bash update-version.sh <new_version> # Format is YY.MM.PP - no leading 'v' or trailing 'a' NEXT_FULL_TAG=$1 #Get <major>.<minor> for next version NEXT_MAJOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[1]}') NEXT_MINOR=$(echo $NEXT_FULL_TAG | awk '{split($0, a, "."); print a[2]}') NEXT_SHORT_TAG=${NEXT_MAJOR}.${NEXT_MINOR} echo "Updating repository for $NEXT_FULL_TAG" # Inplace sed replace; workaround for Linux and Mac function sed_runner() { sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak } for FILE in .github/workflows/*.yaml; do sed_runner "/rapidsai\/shared-workflows/ s/@.*/@branch-${NEXT_SHORT_TAG}/g" "${FILE}" done
0
rapidsai_public_repos
rapidsai_public_repos/code-share/README.md
# Rapids - Code Share This repository is a way to share experimental codes that are not yet integrated in RAPIDS products. Prototypes may not build or run.
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/galois-preflowpush.sh
#!/bin/sh #Wrapper for galois maxflow if [ "$#" -lt "3" ] then echo "Usage : $0 <matrix gr format> <source> <target>" exit 0 fi s=$(($2 - 1)) #base 0 in galois t=$(($3 - 1)) #Moving to own directory DIR="$( cd "$(dirname "$0")" && pwd )" $DIR/galois/build/apps/preflowpush/preflowpush "$1" "$s" "$t" -t 12 --noverify | grep "^time\|^max"
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/config.h
// // Config for maxflow // typedef double flow_t; #define FLOW_INF DBL_MAX
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/graph_tools_cpu.cpp
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. template<typename T> void fill(int size, T *data, T value) { for(int i=0; i != size; ++i) data[i] = value; } void setup_mask_unsaturated(int num_edges, char *mask, double *cf) { for(int i=0; i != num_edges; ++i) mask[i] = (cf[i] > 0); } void setup_mask_unsaturated_backward(int num_edges, int *mask, double *cf, int *reverse_edge) { for(int i=0; i != num_edges; ++i) mask[reverse_edge[i]] = (cf[i] > 0); } template void fill<int>(int,int*,int); template void fill<double>(int,double*,double);
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/README.md
# Maxflow This framework implements various maximum flow algorithms on GPU. ## Motivation Existing GPU graph libraries like Gunrock and nvGraph are both missing a few important graph primitives including the maximum flow, which is frequently used in network analysis, image segmentation, clustering, bipartite matching, and other problems. There is also an interesting application of maximum flow algorithm to community detection problem in social networks. There are a lot of algorithms developed to compute the maximum flow so the task will be to investigate their appropriate parallel implementations, find bottlenecks, optimize and benchmark on a set of graphs with different characteristics and explore a few real applications. If things go well, we might consider integration into nvGraph as the final step, although the work will be mostly focused on new algorithms development and analysis. ## Build instructions Update Makefile as necessary then run `make` to build everything, or use `make <version>` to build a specific version only: `cpu`, `gpu_naive`, `gpu_gunrock`. Note that if you're trying to build `gpu_gunrock` you will need to clone recursively to fetch `gunrock` submodule. Then also build Gunrock with all dependencies using cmake. ## Running examples Download data sets using a shell script in `data/get_data.sh`. ``` Usage: ./maxflow <input matrix file> <source id> <target id> [<random seed>] if random seed is not specified the weights are set as 1/degree for each vertex ``` There is also a test script `test.py` which runs various pre-defined examples and validates results. You will need to create a soft link `maxflow_gpu` to `maxflow_gpu_naive` or `maxflow_gpu_gunrock` to use the script.
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/maxflow.cpp
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. /* LOG_LEVEL: 0: no output 1: one line result 2: print augmented path stats 3: print augmented path vertices 4: bfs level stats for each path */ #define _POSIX_C_SOURCE 199309L #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <time.h> #include <assert.h> #include "allocator.h" #include "IO/gr_reader.h" #include "matrix.h" #include "config.h" extern flow_t maxflowimplementation(csr_graph* g, int s, int t, float *time); flow_t max_flow(int argc, char **argv) { csr_graph g; // main graph structure flow_t fm = 0; // value of max flow read_gr(argv[1], g); // read source/target int s = atoi(argv[2])-1; int t = atoi(argv[3])-1; // setup flow network with zeros g.vals_flow = (flow_t*)my_malloc(g.nnz * sizeof(flow_t)); memset(g.vals_flow, 0, g.nnz * sizeof(flow_t)); // start timer float time; fm = maxflowimplementation(&g, s, t, &time); // stop timer int fm_i = (int)fm; printf("max flow = %i\n", fm_i); printf("time: %.3f s\n", time); // write final flow network for debug purposes //write_csr(argv[4], f.n, f.n, f.nnz, f.rows, f.cols, f.vals); // free memory my_free(g.row_offsets); my_free(g.col_indices); my_free(g.vals_cap); my_free(g.vals_flow); return fm; } void print_help() { printf("Usage: ./maxflow <input matrix file> <source id> <target id> [<random seed>]\n"); printf(" if random seed is not specified the weights are set as 1/degree for each vertex\n"); } int main(int argc, char **argv) { if (argc < 4) print_help(); else{ max_flow(argc, argv); } return 0; }
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/graph_tools.h
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #pragma once #include "allocator.h" #include <vector> #include <tuple> #include <algorithm> #include <numeric> using std::vector; using std::tuple; using std::tie; using std::get; using std::make_tuple; typedef tuple<int,double,double> idd; template<typename T> void fill(int size, T *data, T value); bool setup_mask_unsaturated(int num_edges, char *mask, double *cf); void setup_mask_unsaturated_backward(int num_edges, int *mask, double *cf, int *reverse_edge); /* //reorder based on bfs from s void inline reorder_memory(csr_graph* in, csr_graph* out, int *q, int *h, int *s, int *t) { //TODO lambdas in BFS printf("Start reordering \n"); //alocating new graph //using too much memory, pre BFS ? out->row_offsets = (int*)my_malloc((in->n+1) * sizeof(int)); out->col_indices = (int*)my_malloc(in->nnz * sizeof(int)); out->vals_cap = (double*)my_malloc(in->nnz * sizeof(double)); out->vals_flow = (double*)my_malloc(in->nnz * sizeof(double)); //used to store old node id -> new node id int* eq = (int*)malloc(in->n * sizeof(int)); #pragma omp parallel for for (int i = 0; i < in->n; i++) h[i] = -1; // start with source vertex q[0] = *t; eq[*t] = 0; h[*t] = 0; int start_idx = 0; int i_node = 1; int i_edge = 0; int end_idx = i_node; int found = 0; int bfs_level = 0; vector<idd> current_node_edges; while(!found && start_idx < end_idx) { //printf("Level %i : %i nodes \n", bfs_level, end_idx - start_idx); for(int idx = start_idx; idx < end_idx; idx++) { int u = q[idx]; int new_u = idx; out->row_offsets[new_u] = i_edge; //printf("Start index %i ----- : \n", new_u); current_node_edges.clear(); for (int i = in->row_offsets[u]; i < in->row_offsets[u+1]; i++) { int v = in->col_indices[i]; if(__sync_val_compare_and_swap(&h[v], -1, bfs_level+1) == -1) { int new_v = __sync_fetch_and_add (&i_node, 1); q[new_v] = v; eq[v] = new_v; } current_node_edges.push_back(make_tuple(eq[v], in->vals_cap[i], in->vals_flow[i])); //printf("------- to edge %i ----- : \n", eq[v]); } std::sort(current_node_edges.begin(), current_node_edges.end(), [] (const idd& t1, const idd& t2) { return get<0>(t1) < get<0>(t2); } ); //printf("Edges of node %i : \n", u); for(size_t k=0; k != current_node_edges.size(); ++k) { tie(out->col_indices[i_edge], out->vals_cap[i_edge], out->vals_flow[i_edge]) = current_node_edges[k]; ++i_edge; //printf("----> %i : %f \n", out->col_indices[i_edge-1], out->vals_cap[i_edge-1]); } } start_idx = end_idx; end_idx = i_node; ++bfs_level; } out->row_offsets[i_node] = i_edge; out->n = i_node; out->nnz = i_edge; *s = eq[*s]; *t = eq[*t]; free(eq); printf("Reordering memory : from %i to %i \n", in->n, out->n); } */
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/Makefile
CCXX = g++ NVCC = nvcc GPU_TARGETS= edmonds-karp-gpu-naive push-relabel-gpu-naive mpm-gpu-naive GPU_TARGETS_GUNROCK= edmonds-karp-gpu-gunrock push-relabel-gpu-gunrock CPU_TARGETS= edmonds-karp-cpu push-relabel-cpu boost-push-relabel TARGETS= $(GPU_TARGETS) $(GPU_TARGETS_GUNROCK) $(CPU_TARGETS) CXXFLAGS = -O3 -fno-optimize-sibling-calls --std=c++11 NVCC_FLAGS = -DUSE_GPU -O3 --std=c++11 --expt-extended-lambda -lnvToolsExt # add support for more cuda architectures below #NVCC_ARCH += -gencode arch=compute_35,code=sm_35 #NVCC_ARCH += -gencode arch=compute_52,code=sm_52 NVCC_ARCH += -gencode arch=compute_60,code=sm_60 # update gunrock location if necessary GUNROCK_DIR = ./gunrock GUNROCK_OPTS = $(GUNROCK_DIR)/gunrock/util/test_utils.cu $(GUNROCK_DIR)/gunrock/util/error_utils.cu $(GUNROCK_DIR)/externals/moderngpu/src/mgpucontext.cu $(GUNROCK_DIR)/externals/moderngpu/src/mgpuutil.cpp --std=c++11 -I$(GUNROCK_DIR)/externals/moderngpu/include -I$(GUNROCK_DIR)/externals/cub -Xcompiler -DMETIS_FOUND -isystem $(GUNROCK_DIR)/gunrock -isystem $(GUNROCK_DIR) GUNROCK_LIBS = -L$(GUNROCK_DIR)/build/lib -lgunrock -Xlinker -lboost_system -Xlinker -lboost_chrono -Xlinker -lboost_timer -Xlinker -lboost_filesystem -Xlinker -lgomp -Xlinker -lmetis CLEAN=rm -Rf *.o core bfs/*.o edmonds-karp/*.o push-relabel/*.o MPM/*.o boost_push_relabel/*.o galois-preflowpush ifeq ($(LOG_LEVEL),) LOG_LEVEL = 0 endif .PHONY: all_cpu all_gpu clean all_gpu: clean $(GPU_TARGETS) $(GPU_TARGETS_GUNROCK) all_cpu: clean $(CPU_TARGETS) $(GPU_TARGETS) $(GPU_TARGETS_GUNROCK): C = $(NVCC) $(GPU_TARGETS) $(GPU_TARGETS_GUNROCK): CFLAGS = $(NVCC_FLAGS) $(NVCC_ARCH) $(CPU_TARGETS): C = $(CCXX) $(CPU_TARGETS): CFLAGS = $(CXXFLAGS) boost-push-relabel: boost_push_relabel/push-relabel.o edmonds-karp-cpu: bfs/bfs_cpu.o edmonds-karp/edmonds-karp.o maxflow.o edmonds-karp-gpu-naive: bfs/bfs_gpu_naive.o edmonds-karp/edmonds-karp.o maxflow.o edmonds-karp-gpu-gunrock: edmonds-karp/edmonds-karp.o maxflow.o $(NVCC) $(NVCC_FLAGS) $(NVCC_ARCH) $(GUNROCK_OPTS) $(GUNROCK_LIBS) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ $^ bfs/bfs_gpu_gunrock.cu mpm-gpu-naive: bfs/bfs_gpu_naive.o MPM/MPM_implem.o maxflow.o MPM/MPM.o push-relabel-cpu: push-relabel/push-relabel_operations_cpu_omp.o graph_tools_cpu.o bfs/bfs_cpu.o push-relabel/push-relabel.o maxflow.o push-relabel-gpu-naive: push-relabel/push-relabel_operations_gpu.o graph_tools_gpu.o bfs/bfs_gpu_naive.o push-relabel/push-relabel.o maxflow.o push-relabel-gpu-gunrock: graph_tools_gpu.o maxflow.o push-relabel/push-relabel_operations_gpu.o graph_tools_gpu.o push-relabel/push-relabel.o $(NVCC) $(NVCC_FLAGS) $(NVCC_ARCH) $(GUNROCK_OPTS) $(GUNROCK_LIBS) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ $^ bfs/bfs_gpu_gunrock.cu mtx2dimacs: g++ --std=c++11 IO/mtx2dimacs.cpp -o mtx2dimacs $(CPU_TARGETS): $(C) $(CFLAGS) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ $^ $(GPU_TARGETS): $(NVCC) $(NVCC_FLAGS) $(NVCC_ARCH) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ $^ galois-preflowpush: cd galois/build && cmake .. && make preflowpush && cp ../../galois-preflowpush.sh ../../galois-preflowpush #Implicit rules %.o: %.cpp $(C) $(CFLAGS) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ -c $< %.o: %.cu $(NVCC) $(NVCC_FLAGS) $(NVCC_ARCH) -DLOG_LEVEL=$(LOG_LEVEL) -o $@ -c $< clean: $(CLEAN) mrproper: clean rm -f $(TARGETS)
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/utils.cu
#pragma once #define WARP_SIZE 32 #include "allocator.h" #include "matrix.h" #include "cub/cub/cub.cuh" #include <utility> #include <fstream> //TODO remove #define FORWARD 1 #define BACKWARD 0 #define N_MAX_BLOCKS 65534 #define WARP_SIZE 32 #include "config.h" using cub::KeyValuePair; typedef KeyValuePair<int,flow_t> kvpid; // //Custom atomic operations // #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif //TODO temp solution #define MIN_NONZERO_V 0.00000001 __device__ __inline__ double isZeroAfterAtomicAdd(double * address, double val) { return ((atomicAdd(address, val) + val) <= MIN_NONZERO_V); //TODO temp solution } __device__ __inline__ bool isZero(double val) { return (val <= MIN_NONZERO_V); //+0.0 and -0.0, negative value not a pb } __device__ __inline__ double isZeroAfterAtomicAdd(uint32_t * address, uint32_t val) { return ((atomicAdd(address, val) + val) <= 0); //TODO temp solution } __device__ __inline__ bool isZero(uint32_t val) { return (val <= 0); } //TODO this is the version f->void template<typename F, typename ...Args> __device__ void iterate_on_edges(int u, const csr_graph &g, F f, Args&&... args) { int start = g.row_offsets[u]; int end = g.row_offsets[u+1]; int end_last_block = start + blockDim.x * (((end-start) + blockDim.x - 1) / blockDim.x); for(int idx = start + blockDim.x * blockIdx.x + threadIdx.x; idx <= end_last_block; idx += blockDim.x * gridDim.x) { int i_edge = (idx < end) ? idx : -1; int v = (idx < end) ? g.col_indices[idx] : -1; f(u, v, i_edge, std::forward<Args>(args)...); //we should be able to early exit } } //TODO this is actually the version f->void template<typename F, typename ...Args> __device__ void iterate_on_edges(int u, const csr_subgraph &sg, F f, Args&&... args) { int start = sg.edge_offsets[u]; int end = sg.edge_offsets[u+1]; int end_last_block = start + blockDim.x * (((end-start) + blockDim.x - 1) / blockDim.x); for(int idx = start + blockDim.x * blockIdx.x + threadIdx.x; idx <= end_last_block; idx += blockDim.x * gridDim.x) { int i_edge = (idx < end) ? sg.parent_edge_indices[idx] : -1; int v = (idx < end) ? sg.col_indices[idx] : -1; f(u, v, i_edge, std::forward<Args>(args)...); //we should be able to early exit } } //TODO use std::function instead of template template<typename F> __global__ void apply_level_kernel(int *elements, int start, int end, F f) { for(int idx = start + blockIdx.z * blockDim.z + threadIdx.z; idx < end; idx += blockDim.z * gridDim.z) { int u = elements[idx]; f(idx, u); } } template<typename F> __global__ void apply_level_kernel(int start, int end, F f) { for(int u = start + blockIdx.z * blockDim.z + threadIdx.z; u < end; u += blockDim.z * gridDim.z) { f(u); } } //iterate through levels in graph //TODO use std::function instead of template //TODO overload sans q_bfs (just offsets) template<int DIRECTION, typename F> void iterate_on_levels(int start_level, int end_level, int *elements, const int* levels_offset, F f, dim3 localGrid, dim3 localBlock, int elts_per_thread=1, size_t shared_memory=0, cudaStream_t stream=0) { for(int level = start_level; ((DIRECTION == FORWARD) && (level <= end_level)) || ((DIRECTION == BACKWARD) && (level >= end_level)); level += (DIRECTION == FORWARD) ? 1 : -1) { int start = levels_offset[level]; int end = levels_offset[level+1]; int num_items = end - start; int nthreads_z = (num_items + elts_per_thread -1)/elts_per_thread; dim3 grid, block; block.x = localBlock.x; block.y = localBlock.y; block.z = min(512/block.x/block.y, 64); grid.x = localGrid.x; grid.y = localGrid.y; grid.z = min((nthreads_z + block.z - 1)/block.z, N_MAX_BLOCKS); //printf("level with elements : level : %i ; start : %i ; end : %i ; size : %i \n", level, start, end, num_items); apply_level_kernel<<<grid,block,shared_memory,stream>>>(elements, start, end, f); } } template<int DIRECTION, typename F> __device__ void d_iterate_on_levels(const int start_level, const int end_level, const int* levels_offset, F f) { for(int level = start_level; ((DIRECTION == FORWARD) && (level <= end_level)) || ((DIRECTION == BACKWARD) && (level >= end_level)); level += (DIRECTION == FORWARD) ? 1 : -1) { int start = levels_offset[level]; int end = levels_offset[level+1]; //if(threadIdx.x == 0 && threadIdx.z == 0) // printf("\n on device : level : %i ; start : %i ; end : %i \n", level, start, end); for(int u = start + blockIdx.z * blockDim.z + threadIdx.z; u < end; u += blockDim.z * gridDim.z) { f(u); } __syncthreads(); } } template<int DIRECTION, typename F> __global__ void d_iterate_on_levels_kernel(const int start_level, const int end_level, const int* levels_offset, F f){ d_iterate_on_levels<DIRECTION>(start_level, end_level, levels_offset, f); } template<int DIRECTION, typename F> __host__ void iterate_on_levels(int start_level, int end_level, const int* levels_offset, F f, dim3 localGrid, dim3 localBlock, int elts_per_thread=4, size_t shared_memory=0, cudaStream_t stream=0, int max_level_width=INT_MAX //can be used to block-level sync ) { dim3 block; block.x = localBlock.x; block.y = localBlock.y; block.z = min(256/block.x/block.y,64); //only 256, shared mem pb if(max_level_width <= block.z) { dim3 grid(1,1,1); d_iterate_on_levels_kernel<DIRECTION><<<grid,block>>>(start_level, end_level, levels_offset, f); return; } for(int level = start_level; ((DIRECTION == FORWARD) && (level <= end_level)) || ((DIRECTION == BACKWARD) && (level >= end_level)); level += (DIRECTION == FORWARD) ? 1 : -1) { int start = levels_offset[level]; int end = levels_offset[level+1]; int num_items = end - start; int nthreads_z = (num_items + elts_per_thread -1)/elts_per_thread; //printf("level : %i ; start : %i ; end : %i ; size : %i \n", level, start, end, num_items); dim3 grid; grid.x = localGrid.x; grid.y = localGrid.y; grid.z = min((nthreads_z + block.z - 1)/block.z, N_MAX_BLOCKS); apply_level_kernel<<<grid,block,shared_memory,stream>>>(start, end, f); } } void PartitionFlagged(int *d_out, int *d_in, char *d_flags, int num_items, int *d_num_selected_out, void *d_temp_storage, size_t temp_storage_bytes) { // Run selection cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); } void ExclusiveSum(int *d_out, int *d_in, int num_items, void *d_temp_storage, size_t temp_storage_bytes) { // Run exclusive prefix sum cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, 0); } void SegmentedReduce(int *d_out, char *d_in, int *d_offsets, int num_segments) { // Determine temporary device storage requirements void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_offsets, d_offsets + 1); // Allocate temporary storage cudaMalloc(&d_temp_storage, temp_storage_bytes); // Run sum-reduction cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_offsets, d_offsets + 1); } template<typename F> __global__ void apply_on_graph_kernel(int n, F f) { for(int u = blockIdx.z*blockDim.z + threadIdx.z; u < n; u += blockDim.z*gridDim.z) { f(u); } } //call on every node template<typename F> void apply_on_graph(int n, F f, dim3 localGrid, dim3 localBlock, int memory, cudaStream_t stream) { dim3 block,grid; block.x = localBlock.x; block.y = localBlock.y; block.z = min(512/block.x/block.y, 64); grid.x = localGrid.x; grid.y = localGrid.y; grid.z = min((n + block.z - 1) / block.z, N_MAX_BLOCKS); apply_on_graph_kernel<<<grid,block,memory,stream>>>(n, f); } template<typename F> void apply_on_graph(int n, F f, cudaStream_t stream) { dim3 grid, block; grid.x = grid.y = block.x = block.y = 1; apply_on_graph(n, f, grid, block, 0, stream); }
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/graph_tools_gpu.cu
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #include <stdio.h> #define N_BLOCKS_MAX 65535 #define N_THREADS 512 template<typename T> __global__ void fill_kernel(int size, T *data, T value) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) data[tid] = value; } __global__ void setup_mask_unsaturated_kernel(int num_edges, char *mask, double *cf) { for(int u= threadIdx.x + blockIdx.x * blockDim.x; u < num_edges; u += blockDim.x * gridDim.x) mask[u] = (cf[u] > 0); } //TODO memory issue ? reverse_edge can be kind of chatotics __global__ void setup_mask_unsaturated_backward_kernel(int num_edges, int *mask, double *cf, int *reverse_edge) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < num_edges) mask[reverse_edge[tid]] = (cf[tid] > 0); } template<typename T> void fill(int size, T *data, T value) { fill_kernel<<<(size + 255)/256, 256>>>(size, data, value); cudaDeviceSynchronize(); } bool setup_mask_unsaturated(int num_edges, char *mask, double *cf) { setup_mask_unsaturated_kernel<<<min((num_edges + N_THREADS)/N_THREADS, N_BLOCKS_MAX), N_THREADS>>>(num_edges, mask, cf); return true; } void setup_mask_unsaturated_backward(int num_edges, int *mask, double *cf, int *reverse_edge) { setup_mask_unsaturated_backward_kernel<<<(num_edges + 255)/256, 256>>>(num_edges, mask, cf, reverse_edge); cudaDeviceSynchronize(); } template void fill<int>(int,int*,int); template void fill<double>(int,double*,double); template void fill<char>(int,char*,char);
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/matrix.h
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #pragma once #define INF 1e10 #include <algorithm> #include "allocator.h" #include <fstream> #include "config.h" using std::min; struct csr_graph { int n, nnz; int *row_offsets; int *col_indices; flow_t *vals_cap; // capacity flow_t *vals_flow; // maxflow // set edge weight = 1/degree void set_edge_weights_rcp_degree() { for (int i = 0; i < n; i++) { int degree = row_offsets[i+1] - row_offsets[i]; for (int j = row_offsets[i]; j < row_offsets[i+1]; j++) vals_cap[j] = (flow_t)1.0/degree; } } // function returns edge id for i->j using binary search int edge(int i, int j) const { // use binary search here int low = row_offsets[i]; int high = row_offsets[i+1]-1; while (high > low) { int mid = (low + high)/2; if (j == col_indices[mid]) return mid; if (j < col_indices[mid]) high = mid-1; else low = mid+1; } return (col_indices[low] == j) ? low : -1; } }; struct csr_subgraph { int n, nnz; int *edge_offsets = NULL; int *parent_edge_indices = NULL; int *col_indices = NULL; csr_subgraph() { } void resize(int _n, int _nnz) { n = _n; nnz = _nnz; clean(); edge_offsets = (int*)my_malloc(n * sizeof(int)); parent_edge_indices = (int*)my_malloc((nnz+1) * sizeof(int)); col_indices = (int*)my_malloc(nnz * sizeof(int)); } void clean() { if(edge_offsets) my_free(edge_offsets); if(parent_edge_indices) my_free(parent_edge_indices); if(col_indices) my_free(col_indices); } };
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/allocator.h
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #pragma once #include <stdlib.h> #include <stdio.h> static long tot_malloc_bytes = 0; #ifndef USE_GPU inline void* my_malloc(long size) { #if (LOG_LEVEL > 1) && (LOG_LEVEL < 4) tot_malloc_bytes += size; printf("Memory allocation footprint %.3f MB\n", ((float) tot_malloc_bytes)/(1<<20)); #endif void *ptr = malloc(size); return ptr; } inline void my_free(void *ptr) { free(ptr); } #else #include <cuda_runtime.h> inline void* my_malloc(int size) { #if (LOG_LEVEL > 1) && (LOG_LEVEL < 4) tot_malloc_bytes += size; printf("Unified memory allocation footprint %.3f MB\n", ((float) tot_malloc_bytes)/(1<<20)); #endif void *ptr; cudaMallocManaged(&ptr, size); return ptr; } inline void my_free(void *ptr) { cudaFree(ptr); } #endif
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 NVIDIA CORPORATION Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
rapidsai_public_repos/code-share
rapidsai_public_repos/code-share/maxflow/matrix.cu
#include "matrix.h" //CUB histogram is too complex __global__ count_values(int *values, int *counts, int size) { for(int u = blockIdx.x * blockDim.x + threadIdx.x; u < size; u += blockDim.x * gridDim.x) atomicAdd(&counts[values[u]], 1); } __global__ set_idx_origin(int *row_offsets, int *col_indices, int *in_edge_idx_offsets, int *in_edge_idx, int *edge_origin, int n) { for(int u = blockIdx.y * blockDim.y + threadIdx.y; u < n; u += blockDim.y * gridDim.y) { for(int i_edge = row_offsets[u]; i_edge < row_offsets[u+1]; i_edge += blockDim.x) { int v = col_indices[i_edge]; int pos = atomicAdd(&in_edge_idx_offsets[v], -1) - 1; in_edge_idx[pos] = i_edge; edge_origin[i_edge] = u; } } } #define SET_IN_THREADS_PER_VERTEX 4 #define N_THREADS 512 #define SET_IN_BLOCK_Y (N_THREADS / SET_IN_THREADS_PER_VERTEX) #define N_BLOCKS_MAX 65535 void csr_graph_reverse::set_in_edge(const csr_graph &g) { if(g.n <= 0) return; cudaMemset(in_edge_idx_offsets, 0, sizeof(int) * (g.n + 1)); dim3 grid1D(min(N_BLOCKS_MAX, g.n /N_THREADS)); dim3 block1D(N_THREADS); count_values<<<grid1D,block1D>>>(g.col_indices, in_edge_idx_offsets, g.nnz); //Copied gtom cub doc // Determine temporary device storage requirements void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, in_edge_idx_offsets, in_edge_idx_offsets, g.n + 1); // Allocate temporary storage cudaMalloc(&d_temp_storage, temp_storage_bytes); cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, in_edge_idx_offsets, in_edge_idx_offsets, g.n + 1); cudaMemcpy(&in_edge_idx_offsets[g.n], &in_edge_idx_offsets[g.n-1], sizeof(int), cudaMemcpyDeviceToDevice); dim3 grid2D(1, min(N_BLOCKS_MAX, g.n/SET_IN_BLOCK_Y)); dim3 block2D(SET_IN_THREADS_PER_VERTEX, SET_IN_BLOCK_Y); set_idx_origin<<<grid2D, block2D>>>(g.row_offsets, g.col_indices, in_edge_idx_offsets, in_edge_idx, edge_origin, g.n); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/edmonds-karp/edmonds-karp.cpp
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #include "../matrix.h" #include "../allocator.h" #include <stdio.h> #include "../graph_tools.h" #include "../bfs/bfs.h" #include <time.h> //Edmonds-karp implementation //TODO separate CPU and GPU implem ? double maxflowimplementation(csr_graph* g, int s, int t, float *time) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); int it = 0; // number of augmented paths double fm = 0.0; int *q = (int*)my_malloc(g->n * sizeof(int)); // bfs vertices queue int *p = (int*)my_malloc(g->n * sizeof(int)); // parent vertices int *h = (int*)my_malloc(g->n * sizeof(int)); // depth of nodes - TODO remove char *mask = (char*)my_malloc(g->nnz * sizeof(int)); // edge mask (used in Gunrock only) double *cf = g->vals_cap; int* level_width = (int*)my_malloc(g->n * sizeof(int)); // depth of nodes - TODO remove //not used here // find shortest augmented paths in c-f setup_mask_unsaturated(g->nnz, mask, cf); while (bfs(g->row_offsets, g->col_indices, g->n, g->nnz, s, t, q, p, BFS_MARK_PREDECESSOR, mask, level_width)) { // backtrack to find the max flow we can push through int v = t; double mf = INF; while (v != s) { int u = p[v]; int i = g->edge(u,v); mf = min(mf, cf[i]); v = u; } // update flow value fm = fm + mf; // backtrack and update flow graph v = t; int len = 0; while (v != s) { int u = p[v]; int uv = g->edge(u,v); int vu = g->edge(v,u); cf[uv] -= mf; mask[uv] = (cf[uv] > 0); cf[g->edge(v,u)] += mf; mask[vu] = (cf[vu] > 0); v = u; len++; } // output more stats #if LOG_LEVEL > 1 printf("path length %d, aug flow %g\n", len, mf); #endif #if LOG_LEVEL > 2 printf("aug path vertices: "); v = t; while (v != s) { printf("%i ", v+1); v = p[v]; } printf("%i \n", v+1); #endif // count number of iterations it++; } #if LOG_LEVEL > 0 printf("%i augmenting paths\n", it); #endif my_free(q); my_free(p); my_free(mask); clock_gettime(CLOCK_MONOTONIC, &end); *time = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1e-9; return fm; }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/IO/mtx2dimacs.cpp
#include "../matrix.h" #include "matrix_io.h" int main(int argc, char **argv) { if(argc < 3) { printf("Usage : %s <input> <output> \n", argv[0]); exit(1); } csr_graph g; // main graph structure // read mtx entry read_mm_matrix(argv[1], &g.n, &g.n, &g.nnz, &g.row_offsets, &g.col_indices, &g.vals_cap); //Set edges to degrees for cap g.set_edge_weights_rcp_degree(); std::ofstream out(argv[2]); out << "p max " << g.n << " " << g.nnz << "\n"; for(int u = 0; u < g.n; ++u) { for(int i_edge = g.row_offsets[u]; i_edge != g.row_offsets[u+1]; ++i_edge) { int cap = (1000000000.0 * g.vals_cap[i_edge]); int v = g.col_indices[i_edge]; out << "a " << u+1 << " " << v+1 << " " << cap << "\n"; if(g.edge(v, u) == -1) { out << "a " << v+1 << " " << u+1 << " " << "0" << "\n"; } } } return 0; }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/IO/gr_reader.h
#include <fstream> #include <string> #include "../allocator.h" #include "../matrix.h" #include "../config.h" #include <cstdlib> #include <iostream> using std::string; using std::ifstream; using std::cout; void read_gr(const string& filename, csr_graph& g) { ifstream iff(filename); uint64_t header[4]; iff.read((char*)header, sizeof(uint64_t) * 4); uint64_t n, nnz; n = header[2]; nnz = header[3]; uint64_t *degrees = (uint64_t*)malloc(sizeof(uint64_t) * n); iff.read((char*)degrees, sizeof(uint64_t) * n); uint32_t *outs = (uint32_t*)malloc(sizeof(uint32_t) * nnz); iff.read((char*)outs, sizeof(uint32_t) * nnz); //Inc Sum -> Ex sum int last = 0; for(int i=0; i != n; ++i) { int here = degrees[i] - last; last = degrees[i]; degrees[i] -= here; } uint32_t buf; if(nnz & 1) iff.read((char*)&buf, sizeof(uint32_t)); //align on 64 bits uint32_t *w = (uint32_t*)malloc(sizeof(uint32_t) * nnz); iff.read((char*)w, sizeof(uint32_t) * nnz); //Copying into g //If the data types are coherent, we could load directly in those g.row_offsets = (int*)my_malloc(sizeof(int) * n); g.col_indices = (int*)my_malloc(sizeof(int) * nnz); g.vals_cap = (flow_t*)my_malloc(sizeof(flow_t) * nnz); g.n = n; g.nnz = nnz; for(int i=0; i != n; ++i) g.row_offsets[i] = (int)degrees[i]; for(int i=0; i != nnz; ++i) g.col_indices[i] = (int)outs[i]; for(int i=0; i != nnz; ++i) g.vals_cap[i] = (flow_t)w[i]; free(degrees); free(outs); free(w); /* for(int u=0; u != 20; ++u) { cout << "Node " << u << " : "; for(int i = g.row_offsets[u]; i != g.row_offsets[u+1]; ++i) { cout << g.col_indices[i] << "(" << g.vals_cap[i] << ")\t"; } cout << "\n"; } */ }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/IO/matrix_io.h
#include <string.h> #include <map> using std::map; using std::pair; // Routines to read/write matrix. // Modified from http://crd-legacy.lbl.gov/~yunhe/cs267/final/source/utils/convert/matrix_io.c // Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; char *mm_typecode_to_str(MM_typecode matcode); int mm_read_banner(FILE *f, MM_typecode *matcode); int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz); int mm_read_mtx_array_size(FILE *f, int *M, int *N); int mm_write_banner(FILE *f, MM_typecode matcode); int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz); int mm_write_mtx_array_size(FILE *f, int M, int N); /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0]=='M') #define mm_is_sparse(typecode) ( ((typecode)[1]=='C') || ((typecode)[1]=='S') ) #define mm_is_sparserow(typecode) ((typecode)[1]=='S') #define mm_is_coordinate(typecode)((typecode)[1]=='C') #define mm_is_dense(typecode) ((typecode)[1]=='A') #define mm_is_array(typecode) ((typecode)[1]=='A') #define mm_is_complex(typecode) ((typecode)[2]=='C') #define mm_is_real(typecode) ((typecode)[2]=='R') #define mm_is_pattern(typecode) ((typecode)[2]=='P') #define mm_is_integer(typecode) ((typecode)[2]=='I') #define mm_is_symmetric(typecode)((typecode)[3]=='S') #define mm_is_general(typecode) ((typecode)[3]=='G') #define mm_is_skew(typecode) ((typecode)[3]=='K') #define mm_is_hermitian(typecode)((typecode)[3]=='H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0]='M') #define mm_set_coordinate(typecode) ((*typecode)[1]='C') #define mm_set_sparserow(typecode) ((*typecode)[1]='S') #define mm_set_array(typecode) ((*typecode)[1]='A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_complex(typecode)((*typecode)[2]='C') #define mm_set_real(typecode) ((*typecode)[2]='R') #define mm_set_pattern(typecode)((*typecode)[2]='P') #define mm_set_integer(typecode)((*typecode)[2]='I') #define mm_set_symmetric(typecode)((*typecode)[3]='S') #define mm_set_general(typecode)((*typecode)[3]='G') #define mm_set_skew(typecode) ((*typecode)[3]='K') #define mm_set_hermitian(typecode)((*typecode)[3]='H') #define mm_clear_typecode(typecode) ((*typecode)[0]=(*typecode)[1]= \ (*typecode)[2]=' ',(*typecode)[3]='G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSEROW_STR "sparserow" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5) return MM_PREMATURE_EOF; for (p=mtx; *p!='\0'; *p=tolower(*p),p++); /* convert to lower case */ for (p=crd; *p!='\0'; *p=tolower(*p),p++); for (p=data_type; *p!='\0'; *p=tolower(*p),p++); for (p=storage_scheme; *p!='\0'; *p=tolower(*p),p++); /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSEROW_STR) == 0) mm_set_sparserow(matcode); else if (strcmp(crd, MM_COORDINATE_STR) == 0) mm_set_coordinate(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } #ifndef __NVCC__ char *strdup (const char *s) { char *d = (char*)my_malloc (strlen (s) + 1); // Allocate memory if (d != NULL) strcpy (d,s); // Copy string if okay return d; // Return new memory } #endif char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; const char *types[4]; /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = MM_MTX_STR; /* check for CRD or ARR matrix */ if (mm_is_sparserow(matcode)) types[1] = MM_SPARSEROW_STR; else if (mm_is_coordinate(matcode)) types[1] = MM_COORDINATE_STR; else if (mm_is_dense(matcode)) types[1] = MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = MM_SKEW_STR; else return NULL; sprintf(buffer,"%s %s %s %s", types[0], types[1], types[2], types[3]); return strdup(buffer); } /* generates random double in [low, high) */ double random_double (double low, double high) { //return ((high-low)*drand48()) + low; return ((high-low)*rand()/RAND_MAX) + low; } void coo2csr_in(int n, int nz, double *a, int **i_idx, int *j_idx); // in-place conversion, also replaces i_idx with new array of size (n+1) to save memory /* write CSR format */ /* 1st line : % number_of_rows number_of_columns number_of_nonzeros 2nd line : % base of index 3rd line : row_number nz_r(=number_of_nonzeros_in_the_row) next nz_r lines : column_index value(when a != NULL) next line : row_number nz_r(=number_of_nonzeros_in_the_row) next nz_r lines : column_index value(when a != NULL) ... */ void write_csr (char *fn, int m, int n, int nz, int *row_start, int *col_idx, double *a) { FILE *f; int i, j; if ((f = fopen(fn, "w")) == NULL){ printf ("can't open file <%s> \n", fn); exit(1); } fprintf (f, "%s %d %d %d\n", "%", m, n, nz); for (i=0; i<m; i++){ fprintf(f, "%d %d\n", i, row_start[i+1]-row_start[i]); for (j=row_start[i]; j<row_start[i+1]; j++){ if (a) fprintf(f, "%d %g\n", col_idx[j], a[j]); else fprintf(f, "%d\n", col_idx[j]); } } fclose (f); } /* reads matrix market format (coordinate) and returns csr format */ void read_mm_matrix (char *fn, int *m, int *n, int *nz, int **i_idx, int **j_idx, double **a) { MM_typecode matcode; FILE *f; int i,k; int base=1; if ((f = fopen(fn, "r")) == NULL) { printf ("can't open file <%s> \n", fn); exit(1); } if (mm_read_banner(f, &matcode) != 0){ printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (! (mm_is_matrix(matcode) && mm_is_sparse(matcode)) ){ printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* skip comments */ unsigned long pos; char *line = NULL; size_t len = 0; size_t read; do { pos = ftell(f); read = getline(&line, &len, f); } while (read != -1 && line[0] == '%'); fseek(f, pos, SEEK_SET); /* find out size of sparse matrix .... */ if (fscanf(f, "%d %d %d", m, n, nz) != 3) { printf("Error reading matrix header: m n nz\n"); exit(1); } //We always create back edges if doesnt exist /* reserve memory for matrices */ //if (mm_is_symmetric(matcode)){ *i_idx = (int *) my_malloc(*nz *2 * sizeof(int)); *j_idx = (int *) my_malloc(*nz *2 * sizeof(int)); *a = (double *) my_malloc(*nz *2 * sizeof(double)); /* } else { *i_idx = (int *) my_malloc(*nz * sizeof(int)); *j_idx = (int *) my_malloc(*nz * sizeof(int)); *a = (double *) my_malloc(*nz * sizeof(double)); } if (!(*i_idx) || !(*j_idx) || !(*a)){ printf ("cannot allocate memory for %d, %d, %d sparse matrix\n", *m, *n, *nz); exit(1); } */ map<pair<int,int>,double> raw_edges; // map edge(u,v) - > indice of edges in *a k=0; for (i=0; i<*nz; i++) { int u,v; double d; if (mm_is_pattern(matcode)){ if (fscanf(f, "%d %d", &u, &v) != 2) { printf("Error reading matrix entry %i\n", i); exit(1); } d = random_double(0.5, 1.0); } else if (mm_is_real(matcode)){ if (fscanf(f, "%d %d %lg", &u, &v, &d) != 3) { printf("Error reading matrix entry %i\n", i); exit(1); } } u -= base; /* adjust from 1-based to 0-based */ v -= base; raw_edges.insert({{u,v}, d}); } i=0; for(auto& e : raw_edges) { int u = e.first.first, v = e.first.second; (*i_idx)[i] = u; (*j_idx)[i] = v; (*a)[i] = e.second; ++i; if(u != v && raw_edges.count({v,u}) == 0) { (*i_idx)[*nz+k] = v; (*j_idx)[*nz+k] = u; (*a)[*nz+k] = (mm_is_symmetric(matcode)) ? e.second : 0.0; k++; } } *nz += k; fclose(f); coo2csr_in (*m, *nz, *a, i_idx, *j_idx); } void sort(int *col_idx, double *a, int start, int end) { int i, j, it; double dt; for (i=end-1; i>start; i--) for(j=start; j<i; j++) if (col_idx[j] > col_idx[j+1]){ if (a){ dt=a[j]; a[j]=a[j+1]; a[j+1]=dt; } it=col_idx[j]; col_idx[j]=col_idx[j+1]; col_idx[j+1]=it; } } /* converts COO format to CSR format, in-place, if SORT_IN_ROW is defined, each row is sorted in column index. On return, i_idx contains row_start position */ void coo2csr_in(int n, int nz, double *a, int **i_idx, int *j_idx) { int *row_start; int i, j; int init, i_next, j_next, i_pos; double dt, a_next; row_start = (int *)my_malloc((n+1)*sizeof(int)); if (!row_start){ printf ("coo2csr_in: cannot allocate temporary memory\n"); exit (1); } for (i=0; i<=n; i++) row_start[i] = 0; /* determine row lengths */ for (i=0; i<nz; i++) row_start[(*i_idx)[i]+1]++; for (i=0; i<n; i++) row_start[i+1] += row_start[i]; for (init=0; init<nz; ){ dt = a[init]; i = (*i_idx)[init]; j = j_idx[init]; (*i_idx)[init] = -1; while (1){ i_pos = row_start[i]; a_next = a[i_pos]; i_next = (*i_idx)[i_pos]; j_next = j_idx[i_pos]; a[i_pos] = dt; j_idx[i_pos] = j; (*i_idx)[i_pos] = -1; row_start[i]++; if (i_next < 0) break; dt = a_next; i = i_next; j = j_next; } init++; while ((init < nz) && ((*i_idx)[init] < 0)) init++; } /* shift back row_start */ for (i=0; i<n; i++) (*i_idx)[i+1] = row_start[i]; (*i_idx)[0] = 0; for (i=0; i<n; i++){ sort (j_idx, a, (*i_idx)[i], (*i_idx)[i+1]); } /* copy i_idx back to row_start, free old data, switch pointers */ for (i=0; i<n+1; i++) row_start[i] = (*i_idx)[i]; my_free(*i_idx); *i_idx = row_start; }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/MPM.cu
#include "MPM.h" #include "../bfs/bfs.h" //Implementations of MPM functions members #include "get_subgraph.cu" #include "push_pull.cu" #include "prune.cu" #include <time.h> #include <cuda_profiler_api.h> #include "nvToolsExt.h" #include "../config.h" #define GPUID 0 #define N_BLOCKS_MAX 65535 #define N_THREADS 512 MPM::MPM(csr_graph& _g) : g(_g) { //TODO reduce number of mallocs q_bfs = (int*)my_malloc((g.n+1) * sizeof(int)); h = (int*)my_malloc((g.n) * sizeof(int)); node_mask = (char*)my_malloc(g.n * sizeof(char)); queue_mask = (char*)my_malloc(g.n * sizeof(char)); prune_mask = (char*)my_malloc(g.n * sizeof(char)); have_been_pruned = (char*)my_malloc(g.n * sizeof(char)); node_g_to_sg = (int*)my_malloc(g.n * sizeof(int)); //TODO reuse Bfs node_sg_to_g = (int*)my_malloc(g.n * sizeof(int)); edge_mask = (char*)my_malloc(g.nnz * sizeof(char)); edge_mask_orig = (char*)my_malloc(g.nnz * sizeof(char)); reverse_edge_map = (int*)my_malloc(g.nnz * sizeof(int)); cudaMalloc(&d_total_flow, sizeof(flow_t)); e = (flow_t*)my_malloc(g.n * sizeof(flow_t)); //buffer for degree_in and degree_out degree = (flow_t*)my_malloc((2 * g.n) * sizeof(flow_t)); bfs_offsets = (int*)my_malloc((g.n+1) * sizeof(int)); sg_level_offsets = (int*)my_malloc((g.n+1) * sizeof(int)); cudaMalloc(&d_nsg, sizeof(int)); cudaMallocHost(&d_node_to_push, sizeof(int)); cudaMallocHost(&d_flow_to_push, sizeof(flow_t)); cudaStreamCreate(&st1); cudaStreamCreate(&st2); cudaMemset(d_total_flow, 0, sizeof(flow_t)); cudaMemset(e, 0, sizeof(flow_t) * g.n); cudaMemset(prune_mask, 0, sizeof(char) * g.n); buf1 = (int*)my_malloc((g.n+1) * sizeof(int)); buf2 = (int*)my_malloc((g.n+1) * sizeof(int)); sg_in.resize(g.n, g.nnz); sg_out.resize(g.n, g.nnz); cf = g.vals_cap; //TODO alloc and copy //CUB memory //Device Reduce cudaMalloc(&d_ppd, sizeof(post_prune_data)); cub::DeviceReduce::ArgMin(d_min_reduce, min_reduce_size, degree, &d_ppd->d_min, 2*g.n); cudaMalloc(&d_min_reduce, min_reduce_size); //Partition (get subgraph) cub::DevicePartition::Flagged(d_storage_partition, size_storage_partition, buf1, queue_mask, buf2, d_nsg, g.n); cudaMalloc(&d_storage_partition, size_storage_partition); //Exclusive sum (get subgraph) cub::DeviceScan::ExclusiveSum(d_storage_exclusive_sum, size_storage_exclusive_sum, buf1, buf2, g.n); cudaMalloc(&d_storage_exclusive_sum, size_storage_exclusive_sum); //Building reverse edge map for(int u=0; u != g.n; ++u) { for (int i = g.row_offsets[u]; i < g.row_offsets[u+1]; ++i) { int v = g.col_indices[i]; int uv = i; int vu = g.edge(v,u); reverse_edge_map[uv] = vu; } } memFetch(); cudaDeviceSynchronize(); } __global__ void setup_mask_unsaturated_kernel(int num_edges, char *mask, flow_t *cf) { for(int u= threadIdx.x + blockIdx.x * blockDim.x; u < num_edges; u += blockDim.x * gridDim.x) mask[u] = (cf[u] > 0); } bool setup_mask_unsaturated(int num_edges, char *mask, flow_t *cf) { setup_mask_unsaturated_kernel<<<min((num_edges + N_THREADS)/N_THREADS, N_BLOCKS_MAX), N_THREADS>>>(num_edges, mask, cf); return true; } //Main algorithm loop flow_t MPM::maxflow(int _s, int _t, float *elapsed_time) { s = _s; t = _t; //TODO create cf setup_mask_unsaturated(g.nnz, edge_mask_orig, cf); int nsg; //number of nodes in subgraphh cudaDeviceSynchronize(); struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); cudaProfilerStart(); while(bfs(g.row_offsets, g.col_indices, g.n, g.nnz, s, t, q_bfs, h, BFS_MARK_DEPTH, edge_mask_orig, bfs_offsets)) { cudaDeviceSynchronize(); cudaMemcpy(&ht, &h[t], sizeof(int), cudaMemcpyDeviceToHost); init_level_graph(nsg); cudaDeviceSynchronize(); nvtxRangePushA("saturate_subgraph"); //Find node to push - usually done end of prune, but the first need to be done here cub::DeviceReduce::ArgMin(d_min_reduce, min_reduce_size, degree_in+1, &(d_ppd->d_min), 2*(sg_in.n-1), st1); cudaMemcpy(&h_ppd, d_ppd, sizeof(post_prune_data), cudaMemcpyDeviceToHost); do { push_and_pull(); prune(); } while(!h_ppd.s_t_pruned); nvtxRangePop(); } flow_t h_total_flow; cudaMemcpy(&h_total_flow, d_total_flow, sizeof(flow_t), cudaMemcpyDeviceToHost); cudaProfilerStop(); clock_gettime(CLOCK_MONOTONIC, &end); *elapsed_time = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) * 1e-9; return h_total_flow; } void MPM::memFetch() { cudaMemPrefetchAsync(q_bfs, g.n * sizeof(int), 0, st1); cudaMemPrefetchAsync(h, (g.n) * sizeof(int), 0, st1); cudaMemPrefetchAsync(node_mask, g.n * sizeof(char), 0, st1); cudaMemPrefetchAsync(queue_mask, g.n * sizeof(char), 0, st1); cudaMemPrefetchAsync(prune_mask, g.n * sizeof(char), 0, st1); cudaMemPrefetchAsync(have_been_pruned, g.n * sizeof(char), 0, st1); cudaMemPrefetchAsync(node_g_to_sg, g.n * sizeof(int), 0, st1); //TODO reuse Bfs cudaMemPrefetchAsync(node_sg_to_g, g.n * sizeof(int), 0, st1); cudaMemPrefetchAsync(edge_mask, g.nnz * sizeof(char), 0, st1); cudaMemPrefetchAsync(edge_mask_orig, g.nnz * sizeof(char), 0, st1); cudaMemPrefetchAsync(reverse_edge_map, g.nnz * sizeof(int), 0, st1); cudaMemPrefetchAsync(e, g.n * sizeof(flow_t), 0, st1); cudaMemPrefetchAsync(bfs_offsets, (g.n+1) * sizeof(int), 0, st1); cudaMemPrefetchAsync(sg_level_offsets, (g.n+1) * sizeof(int), 0, st1); cudaMemPrefetchAsync(buf1, (g.n+1) * sizeof(int), 0, st1); cudaMemPrefetchAsync(buf2, (g.n+1) * sizeof(int), 0, st1); cudaMemPrefetchAsync(g.row_offsets, g.n * sizeof(int), 0, st1); cudaMemPrefetchAsync(g.col_indices, g.nnz * sizeof(int), 0, st1); cudaMemPrefetchAsync(cf, g.nnz * sizeof(flow_t), 0, st1); } MPM::~MPM() { //TODO free on host }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/MPM_implem.cu
#include "MPM.h" #include "../matrix.h" double maxflowimplementation(csr_graph* g, int s, int t, float *elapsed_time) { MPM mpm(*g); return mpm.maxflow(s,t,elapsed_time); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/prune.cu
#include "MPM.h" #include "nvToolsExt.h" #include "../config.h" #define PRUNE_DIM_X 8 // //Prune : removing nodes will null throughput //First level is detected while executing push/pull // __device__ bool edge_op_prune(const int node_pruned, const int node_to_update, const int i_edge, char *prune_mask, flow_t *degree_to_update, flow_t *cf, char *edge_mask, const csr_subgraph &sg_in, const csr_subgraph &sg_out, post_prune_data *d_ppd, const int cur_flag) { if(i_edge == -1 || !edge_mask[i_edge]) return false; //if this is not a real edge, quit now flow_t cf_edge = cf[i_edge]; if(!isZero(cf_edge)) { edge_mask[i_edge] = 0; flow_t new_degree = atomicAdd(&degree_to_update[node_to_update], -cf_edge) - cf_edge; //TODO shouldnt have atomics if(isZero(new_degree)) { prune_mask[node_to_update] = 1; d_ppd->prune_flag = cur_flag; } } return false; //we have to iterate through all edges, we're not done } template<typename F1, typename F2> __device__ void node_op_prune(const int u, const csr_subgraph &sg_in, const csr_subgraph &sg_out, F1 f_forward_edge, F2 f_backward_edge, char *prune_mask, flow_t *in_degree, flow_t *out_degree, post_prune_data *d_ppd, const int flag) { if(!prune_mask[u]) return; prune_mask[u] = 0; //have_been_pruned[u] = 1; //if(threadIdx.x == 0) // printf("pruning %i \n", u); if(u == 0 || u == (sg_in.n-1)) { //s is 0, t is sg_in-1 d_ppd->s_t_pruned = 1; return; } //those two operations dont need to be serial //Deleting edges and updating neighbor's d_out //TODO warp div iterate_on_edges(u, sg_out, f_forward_edge, flag); iterate_on_edges(u, sg_in, f_backward_edge, flag); in_degree[u] = FLOW_INF; out_degree[u] = FLOW_INF; } void MPM::prune() { auto f_forward_edge = [*this] __device__ (const int node_pruned, const int node_to_update, const int i_edge, const int flag) { return edge_op_prune(node_pruned, node_to_update, i_edge, prune_mask, degree_in, cf, edge_mask, sg_in, sg_out, d_ppd, flag); }; auto f_backward_edge = [*this] __device__ (const int node_pruned, const int node_to_update, const int i_edge, const int flag) { return edge_op_prune(node_pruned, node_to_update, i_edge, prune_mask, degree_out, cf, edge_mask, sg_out, sg_in, d_ppd, flag); //TODO remove sg_in, sg_out }; auto f_node_flag = [*this, f_backward_edge, f_forward_edge] __device__ (const int node, const int flag) { node_op_prune(node, sg_in, sg_out, f_forward_edge, f_backward_edge, prune_mask, degree_in, degree_out, d_ppd, flag); }; nvtxRangePushA("prune"); // // End reduce // dim3 localBlock, localGrid; localBlock.x = PRUNE_DIM_X; localBlock.y = 1; localGrid.x = 1; localGrid.y = 1; bool done = false; int niters = 1; //do 3 iters at first int last_flag = 0; while(!done) { for(int it=0; it != niters; ++it) { ++last_flag; auto f_node = [last_flag, f_node_flag] __device__ (const int node) { f_node_flag(node, last_flag); }; apply_on_graph(sg_in.n, f_node, localGrid, localBlock, 0, st1); } //bug on cub //Find node to push cub::DeviceReduce::ArgMin(d_min_reduce, min_reduce_size, degree_in+1, &d_ppd->d_min, 2*(sg_in.n-1), st1); cudaMemcpyAsync(&h_ppd, d_ppd, sizeof(post_prune_data), cudaMemcpyDeviceToHost, st1); cudaStreamSynchronize(st1); done = (h_ppd.prune_flag != last_flag) || h_ppd.s_t_pruned; niters *= 2; //if(!done) // printf("lets go again - iter=%i \n", niters); } //printf("s_t_pruned : %i \n", h_ppd.s_t_pruned); //TODO could be a simple memset on have_been_pruned if *s_t_pruned nvtxRangePop(); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/MPM_gpu_kernels.cu
#include <limits> #include <cfloat> //for cuda #include "../cub/cub/cub.cuh" #include "../allocator.h" #include <stdlib.h> //TODO just for debug (exit) #define THREADS_PER_VERTEX 32 #define N_BLOCK_THREADS 512 //number of threads in a block #define BLOCK_Y_SIZE (N_BLOCK_THREADS / THREADS_PER_VERTEX) //init edge events cudaEvent_t memset_1, memset_2, start_init; //argmin events cudaEvent_t argmin_out, start_get_min; //push and pull events cudaEvent_t start_move_flow, end_move_flow; void createEvents() { cudaEventCreate(&memset_1); cudaEventCreate(&memset_2); cudaEventCreate(&start_init); cudaEventCreate(&start_get_min); cudaEventCreate(&argmin_out); cudaEventCreate(&start_move_flow); cudaEventCreate(&end_move_flow); } #include "../utils.cu" #include "get_subgraph.cu" #include "find_node_to_push.cu" #include "prune.cu" #include "push_pull.cu" #include "device_gppp.cu" #define N_THREADS 512 #define N_MAX_BLOCKS 65534 dim3 getBlock2D() { return dim3(THREADS_PER_VERTEX, BLOCK_Y_SIZE); } dim3 getGrid2D(int n) { return dim3(1, min((n + BLOCK_Y_SIZE -1) / BLOCK_Y_SIZE, N_MAX_BLOCKS)); } dim3 getBlock1D() { return dim3(N_THREADS); } dim3 getGrid1D(int n) { return dim3 (min((n + N_THREADS - 1) / N_THREADS, N_MAX_BLOCKS)); } // // Push/Pull function // wiq_prune, ll be called by iterate_(in|out)_neighbors // returns true if the caller should stop iterate // // DIRECTION : // FORWARD : push from u to v // BACKWARD : pull from u to v #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // // Reduce-push-pull-prune kernel // Used if layered graph is small enough // // // // //
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/push_pull.cu
#include "../config.h" #include "nvToolsExt.h" // // Pull/Push flow from/to source // #define MOVE_FLOW_DIM_X 32 #define MOVE_FLOW_DIM_Y 1 __host__ __device__ int imin_to_node_to_push(int imin, int nsg) { imin += 1;; return (imin < nsg) ? imin : (imin - nsg); } template<typename F1, typename F2> __global__ void first_push_pull(post_prune_data *d_ppd, flow_t *d_total_flow, csr_graph g, int nsg, int *h, flow_t *e, int *node_sg_to_g, F1 f_node_push, F2 f_node_pull) { int node_to_push = imin_to_node_to_push(d_ppd->d_min.key, nsg); flow_t flow_to_push = d_ppd->d_min.value; //Flag need to be reset before next prune d_ppd->prune_flag = 0; int ithread = threadIdx.y * blockDim.x + threadIdx.x; if(ithread == 0) { *d_total_flow += flow_to_push; //printf("---------- pushing %i with %f - gn=%i \n", node_to_push, flow_to_push, nsg); } switch(threadIdx.y) { case 0: //push f_node_push(node_to_push, flow_to_push); break; case 1: //pull f_node_pull(node_to_push, flow_to_push); break; } } template<int THREADS_VERTEX> __device__ bool edge_op_move_flow(const int from, const int to, const int i_edge, flow_t &to_push, flow_t *degree_to, char *edge_mask, char *edge_mask_orig, char *prune_mask, flow_t *cf, flow_t *e, const int *reverse_edge_map, const int sg_t) { flow_t cf_edge = (i_edge != -1 && edge_mask[i_edge]) ? cf[i_edge] : 0; // // Exclusive sum of edges available capacities (cf = cap - flow) // If exclusive sum is >= to_push -> nothing to do for this thread // Else if exclusive + cf_edge <= to_push do a full push // Else do a partial push of to_push - exclusive // typedef cub::WarpScan<flow_t,THREADS_VERTEX> WarpScan; __shared__ typename WarpScan::TempStorage temp_storage[512/THREADS_VERTEX]; //TODO size, multiple thread int ithread = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * (blockDim.x * blockDim.y); int i_logical_warp = ithread / THREADS_VERTEX; flow_t aggregate, exclusive_sum; exclusive_sum = cf_edge; WarpScan(temp_storage[i_logical_warp]).ExclusiveSum(exclusive_sum, exclusive_sum, aggregate); //printf("(%i,W:%i,XD:%i) U:%i Value:%f Scan:%f \n", threadIdx.x, i_logical_warp, THREADS_VERTEX, from, cf_edge, exclusive_sum); //Do not kill the threads with cf_edge = 0 //We need to update to_push //if(i_edge != -1) //printf("raw : %i -> %i : %i : %f (%f, %i) \n", from, to, i_edge, cf_edge, cf[i_edge], edge_mask[i_edge]); if(!isZero(cf_edge)) { flow_t local_push = 0; int rev_i_edge; if(exclusive_sum < to_push) { local_push = min(cf_edge, to_push - exclusive_sum); rev_i_edge = reverse_edge_map[i_edge]; } if(isZero(cf_edge - local_push)) { //i_edge is now saturated cf[i_edge] = 0; //printf("edge %i is going down \n", i_edge); edge_mask[i_edge] = 0; edge_mask_orig[i_edge] = 0; //rev_i_edge cant be in layer graph (only edges to next level) cf[rev_i_edge] += cf_edge; edge_mask_orig[rev_i_edge] = 1; } else if(local_push > 0) { //partial push on i_edge cf[i_edge] -= local_push; cf[rev_i_edge] += local_push; edge_mask_orig[rev_i_edge] = 1; } if(local_push > 0) { //printf("moving %f from %i to %i - exlu sum : %f \n", local_push, from, to, exclusive_sum); //Assign local_push flow to to //Multiple nodes on the same level can have an edge going to to //We need atomics here //We don't push to s or t //Avoiding useless atomics + avoiding memset (to set e[t'] = 0 at the end) if(to != 0 && to != sg_t) atomicAdd(&e[to], local_push); //if(MOVE_FLOW_MASK) // move_flow_mask[to] = 1; //Multiple nodes on the same level can have an edge going to to //We need atomics here flow_t new_degree_to = atomicAdd(&degree_to[to], -local_push) - local_push; //atomicAdd is postfix if(isZero(new_degree_to)) prune_mask[to] = 1; //printf("new degree from %i %f, to %i %f \n", from, degree_from[from], to, new_degree_to); } } to_push -= aggregate; return (to_push <= 0); //we're done if nothing left to push } void MPM::push_and_pull() { auto f_edge_push = [*this] __device__ (const int from, const int to, const int i_edge, flow_t &to_push) { edge_op_move_flow<MOVE_FLOW_DIM_X>(from, to, i_edge, to_push, degree_in, edge_mask, edge_mask_orig, prune_mask, cf, e, reverse_edge_map, sg_in.n-1); }; auto f_edge_pull = [*this] __device__ (const int from, const int to, const int i_edge, flow_t &to_pull) { edge_op_move_flow<MOVE_FLOW_DIM_X>(from, to, i_edge, to_pull, degree_out, edge_mask, edge_mask_orig, prune_mask, cf, e, reverse_edge_map, sg_in.n-1); }; auto f_node_push = [*this, f_edge_push] __device__ (const int u, flow_t to_push) { if(isZero(to_push)) return; //it is an exact 0 //printf("will push %f from %i \n", to_push, u); flow_t pushed = to_push; iterate_on_edges(u, sg_out, f_edge_push, to_push); //printf("(%i) Post Push \n", threadIdx.x); if(threadIdx.x == 0) { flow_t new_degree = (degree_out[u] -= pushed); //printf("%p new degree out[%i] = %f \n", degree_out, u, new_degree); if(isZero(new_degree)) { prune_mask[u] = 1; } e[u] = 0; } }; auto f_node_pull = [*this, f_edge_pull] __device__ (const int u, flow_t to_pull) { if(isZero(to_pull)) return; //it is an exact 0 //printf("will pull %f from %i \n", to_pull, u); flow_t pulled = to_pull; iterate_on_edges(u, sg_in, f_edge_pull, to_pull); if(threadIdx.x == 0) { flow_t new_degree = (degree_in[u] -= pulled); //printf("%p new degree in[%i] = %f \n", degree_in, u, new_degree); if(isZero(new_degree)) { prune_mask[u] = 1; } e[u] = 0; } }; auto f_node_push_e = [*this, f_node_push] __device__ (const int u) { f_node_push(u, e[u]); }; auto f_node_pull_e = [*this, f_node_pull] __device__ (const int u) { f_node_pull(u, e[u]); }; nvtxRangePushA("push_pull"); //Launching first push pull dim3 sgrid, sblock; sgrid.x =1, sgrid.y = 1, sgrid.z = 1; sblock.x = 32, sblock.y = 2, sblock.z = 1; first_push_pull<<<sgrid,sblock,0,st1>>>(d_ppd, d_total_flow, g, sg_in.n, h, e, node_sg_to_g, f_node_push, f_node_pull); //Computing h[node_to_push] in the meantime kvpid h_min = h_ppd.d_min; int node_to_push = imin_to_node_to_push(h_min.key, sg_in.n); //printf("on host : %i (%f) \n", node_to_push, h_min.value); int level_node_to_push = 0; //could do a binary search while(node_to_push >= sg_level_offsets[level_node_to_push+1]) level_node_to_push++; dim3 grid, block; block.x = MOVE_FLOW_DIM_X; block.y = MOVE_FLOW_DIM_Y; grid.x = 1; grid.y = 1; //cudaEvent_t pull_done; //cudaEventCreate(&pull_done); iterate_on_levels<FORWARD>(level_node_to_push+1, ht-1, sg_level_offsets, f_node_push_e, grid, block, 1, 0, st1, max_level_width); iterate_on_levels<BACKWARD>(level_node_to_push-1, 1, sg_level_offsets, f_node_pull_e, grid, block, 1, 0, st1, max_level_width); //cudaStreamWaitEvent(st1, pull_done, 0); //cudaEventRecord(pull_done, st2); nvtxRangePop(); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/csr_graph_mpm.h
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #pragma once #include "../matrix.h" //TODO inheritance nvgraph csr DS //csr graph using same edge idx than its parent struct csr_graph_mpm : public csr_graph_reverse { //nodes degree double *degree_in; double *degree_out; //Mask to know if an edge is active char *edge_mask; int *edge_mask_orig; //active in the orig graph TODO refactoring //Distance source-node int *h; int *buf1; int *buf2; int *sg_level_offsets; int ht; //depth of sink //buffer must be of size at least g->n csr_graph_mpm(const csr_graph& g, int *buffer) : csr_graph_reverse(g, buffer), cf(g.vals_cap) //TODO copy if we want the details { buf1 = (int*)my_malloc(g.n * sizeof(int)); //TODO use bitset buf2 = (int*)my_malloc(g.n * sizeof(int)); //TODO use bitset sg_level_offsets = (int*)my_malloc((g.n+1) * sizeof(int)); //TODO use bitset h = (int*)my_malloc(g.n * sizeof(int)); } virtual void memFetch(int deviceID) { cudaMemPrefetchAsync(cf, nnz * sizeof(double), deviceID, 0); cudaMemPrefetchAsync(edge_mask, nnz * sizeof(int), deviceID, 0); cudaMemPrefetchAsync(buf2, n * sizeof(int), deviceID, 0); cudaMemPrefetchAsync(buf1, n * sizeof(int), deviceID, 0); cudaMemPrefetchAsync(node_g_to_sg, n * sizeof(int), deviceID, 0); cudaMemPrefetchAsync(node_sg_to_g, n * sizeof(int), deviceID, 0); cudaMemPrefetchAsync(sg_level_offsets, (n+1) * sizeof(int), deviceID, 0); csr_graph_reverse::memFetch(deviceID); } //mess with the copy of struct to gpu /* virtual ~csr_graph_mpm() { my_free(degree_in); my_free(degree_out); my_free(edge_mask); my_free(h); } */ };
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/device_gppp.cu
//Meta operation //loop find node/push pull/prune in device mem #define RPPP_X 32 #define RPPP_Y 16 #define RPPP_NTHREADS (RPPP_X * RPPP_Y) __global__ void reduce_push_pull_prune(csr_graph g, csr_subgraph sg_in, csr_subgraph sg_out, int ht, int *s_t_pruned, double *degree_in, double *degree_out, int *h, int *node_sg_to_g, char *edge_mask, char *edge_mask_orig, int *reverse_edge_map, double *cf, double *e, char *prune_mask, char *have_been_pruned, int *sg_level_offsets, double *d_total_flow) { int n = sg_in.n; /* __shared__ char s_t_pruned; __shared__ double degree_in[25]; __shared__ double degree_out[25]; //use __ballot and bitset __shared__ char edge_mask[12]; // bank conflits - use ballot for writing //Dot not use mask for nodes for now - too complicated //Not useful for 32-bits vals //Load offsets ? //only one thread can set a bit to 1, and random access : bitset with atomicOr //bitset __shared__ char prune_mask[12]; __shared__ char have_been_pruned[12]; __shared__ double e[12]; */ //Init int ithread = threadIdx.x + (blockDim.x) * threadIdx.y; if(ithread == 0) { *s_t_pruned = 0; } __syncthreads(); //End init do { // // Step 1 : Find node to push // __shared__ int node_to_push; __shared__ double flow_to_push; device_get_node_to_push<RPPP_X,RPPP_Y>(degree_in, degree_out, n, node_to_push, flow_to_push, ithread, d_total_flow); int level_node_to_push = h[node_sg_to_g[node_to_push]]; __syncthreads(); // // Step 2 : Push/Pull // device_push_pull<RPPP_X,RPPP_Y>(degree_in, degree_out, edge_mask, edge_mask_orig, reverse_edge_map, cf, e, prune_mask, node_to_push, flow_to_push, level_node_to_push, sg_in, sg_out, sg_level_offsets, ht, ithread); __syncthreads(); // // Step 3 : Prune // device_prune<RPPP_X,RPPP_Y>(ithread, RPPP_NTHREADS, degree_in, degree_out, cf, prune_mask, have_been_pruned, edge_mask, sg_in, sg_out, s_t_pruned); __syncthreads(); break; } while(!*s_t_pruned); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/get_node_to_push.cu
#include "MPM.h" #include "../utils.cu" void MPM::get_node_to_push() { //printf("height winner : %i \n", *h_h_node_to_push); } template<int BLOCK_DIM_X, int BLOCK_DIM_Y=1, int BLOCK_DIM_Z=1> __device__ void device_get_node_to_push(double *in_degree, double *out_degree, int n, int &node_to_push, double &flow_to_push, const int ithread, double *d_total_flow) { cub::ArgMin argmin; kvpid argmin_in = blockArgMin<BLOCK_DIM_X,BLOCK_DIM_Y,BLOCK_DIM_Z>(in_degree+1, n-1); //avoiding source argmin_in.key += 1; kvpid argmin_out = blockArgMin<BLOCK_DIM_X,BLOCK_DIM_Y,BLOCK_DIM_Z>(out_degree, n-1); if(ithread == 0) { kvpid m = argmin(argmin_in, argmin_out); node_to_push = m.key; flow_to_push = m.value; *d_total_flow += flow_to_push; printf("-> pushing %i with %f (in d) \n", node_to_push, flow_to_push); } }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/MPM.h
#pragma once #include "../matrix.h" #include <vector> #include "../cub/cub/cub.cuh" #include "../config.h" using std::vector; using cub::KeyValuePair; typedef KeyValuePair<int,flow_t> kvpid; struct post_prune_data { kvpid d_min; int prune_flag; //used to know if we're done with prune int s_t_pruned; }; class MPM { public: MPM(csr_graph &g); flow_t maxflow(int s, int t, float *time); __host__ __device__ virtual ~MPM(); //Should be private function members void init_level_graph(int& nsg); void get_node_to_push(); void push_and_pull(); void prune(); void write_edges(); void memFetch(); const csr_graph g; //Current query int s, t; //BFS int *q_bfs; int *h; int *bfs_offsets; int ht; int *d_nsg; //size subgraph, on device int *d_node_to_push; flow_t *d_flow_to_push; char *node_mask; // nodes active in layer graph char *queue_mask; // nodes active in layer graph (using their bfs queue idx) char *prune_mask; // nodes to prune - layer graph indexes char *have_been_pruned; char *edge_mask; // nodes to prune - layer graph indexes char *edge_mask_orig; // nodes to prune - layer graph indexes flow_t *d_total_flow; //total flow pushed so far - on device flow_t *e; //local excess - used in push/pull flow_t *cf; //edge cap - edge flow //Degree flow_t *degree; flow_t *degree_in; flow_t *degree_out; //Layer graph csr_subgraph sg_in, sg_out; int *node_sg_to_g; int *node_g_to_sg; int *sg_level_offsets; int max_level_width; cudaStream_t st1, st2; //streams used by kernels //CUB void *d_storage_partition = NULL; size_t size_storage_partition = 0; void *d_storage_exclusive_sum = NULL; size_t size_storage_exclusive_sum = 0; //Buffers - we may not need them int *buf1, *buf2; //Can be removed if memory becomes a pb int *reverse_edge_map; //Used by cub funcs in get_node_min post_prune_data h_ppd, *d_ppd; void *d_min_reduce = NULL; size_t min_reduce_size = 0; };
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/MPM/get_subgraph.cu
#include "../cub/cub/cub.cuh" #include "../utils.cu" #include "../config.h" #include "nvToolsExt.h" #define INIT_G_BLOCK_X 32 #define WRITE_EDGES_DIM_X 32 #define NTHREADS 512 __global__ void reverse_hash(int *reversed, int *hash, int num_items) { for(int u = blockDim.x * blockIdx.x + threadIdx.x; u < num_items; u += blockDim.x * gridDim.x) { reversed[hash[u]] = u; } } void MPM::write_edges() { auto f_edge = [*this] __device__ (const int from, const int to, const int i_edge, flow_t &degree_in_thread, flow_t &degree_out_thread, int &in_edge_offset, int &out_edge_offset) { int rev_i_edge = (i_edge != -1) ? reverse_edge_map[i_edge] : -1; int is_valid_out_edge = (i_edge != -1) && edge_mask[i_edge]; int is_valid_in_edge = (rev_i_edge != -1) && edge_mask[rev_i_edge]; typedef cub::WarpScan<int,WRITE_EDGES_DIM_X> WarpScan; __shared__ typename WarpScan::TempStorage temp_storage_scan[NTHREADS/WRITE_EDGES_DIM_X]; // Compute exclusive warp-wide prefix sums int ithread = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpid = ithread / WRITE_EDGES_DIM_X; int idx_in_edge = is_valid_in_edge, idx_out_edge = is_valid_out_edge; int n_in_edge_in_warp, n_out_edge_in_warp; WarpScan(temp_storage_scan[warpid]).ExclusiveSum(idx_in_edge,idx_in_edge, n_in_edge_in_warp); __syncthreads(); WarpScan(temp_storage_scan[warpid]).ExclusiveSum(idx_out_edge,idx_out_edge, n_out_edge_in_warp); //printf("(%i) u:%i edge:%i ; is in:%i (scan:%i) ; is out:%i (scan:%i) \n", threadIdx.x, from, i_edge, is_valid_in_edge, idx_in_edge, is_valid_out_edge, idx_out_edge); //scan is done, lets return inactive edges //printf("u=%i, tx=%i, active=%i, sum=%i, i_edge=%i \n", from, threadIdx.x, is_edge_active, write_idx_thread, i_edge); if(is_valid_out_edge) { //Computing degree degree_out_thread += cf[i_edge]; //Writing edges int write_idx = out_edge_offset + idx_out_edge; sg_out.parent_edge_indices[write_idx] = i_edge; sg_out.col_indices[write_idx] = node_g_to_sg[to]; //printf("(%i,%i,%i) writing edge=%i (ig:%i) %i -> %i (g:%i) \n", threadIdx.x, threadIdx.y, threadIdx.z, write_idx, i_edge, from, g.node_g_to_sg[to], to); } else if(is_valid_in_edge) { degree_in_thread += cf[rev_i_edge]; //Writing edges int write_idx = in_edge_offset + idx_in_edge; sg_in.parent_edge_indices[write_idx] = rev_i_edge; sg_in.col_indices[write_idx] = node_g_to_sg[to]; } out_edge_offset += n_out_edge_in_warp; in_edge_offset += n_in_edge_in_warp; }; auto f_node = [*this, f_edge] __device__ (const int u) { typedef cub::WarpReduce<flow_t> WarpReduce; __shared__ typename WarpReduce::TempStorage temp_storage_reduce[NTHREADS/WRITE_EDGES_DIM_X]; flow_t in_degree_thread = 0, out_degree_thread = 0; int out_edge_offset = sg_out.edge_offsets[u], in_edge_offset = sg_in.edge_offsets[u]; int u_g = node_sg_to_g[u];//u in g; iterate_on_edges(u_g, g, f_edge, in_degree_thread, out_degree_thread, in_edge_offset, out_edge_offset); int ithread = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpid = ithread / WRITE_EDGES_DIM_X; flow_t total_in_degree = WarpReduce(temp_storage_reduce[warpid]).Sum(in_degree_thread); __syncthreads(); flow_t total_out_degree = WarpReduce(temp_storage_reduce[warpid]).Sum(out_degree_thread); __syncthreads(); if(threadIdx.x == 0) { degree_in[u] = total_in_degree; degree_out[u] = total_out_degree; } }; dim3 grid,block; block.x = WRITE_EDGES_DIM_X; block.y = 1; grid.x = 1; grid.y = 1; apply_on_graph(sg_in.n, f_node, grid, block, 0, st1); } void MPM::init_level_graph(int &nsg) { nvtxRangePushA("get_subgraph"); cudaMemset(edge_mask, 0, sizeof(char) * g.nnz); cudaMemset(node_mask, 0, sizeof(char) * g.n); cudaMemset(queue_mask, 0, sizeof(char) * g.n); cudaMemset(d_ppd, 0, sizeof(post_prune_data)); //resetting s_t_pruned and prune_flag auto f_edge_init = [*this] __device__ (const int from, const int to, const int i_edge, int *n_in_edges, int *n_out_edges) { if(i_edge != -1) { int hto = h[to]; int hfrom = h[from]; if((hto + 1) == hfrom) { // going backward int rev_i_edge = reverse_edge_map[i_edge]; if(edge_mask_orig[rev_i_edge]) { edge_mask[rev_i_edge] = 1; //the edge is part of subgraph node_mask[to] = 1; //to is part of subgraph atomicAdd(n_in_edges, 1); } } else if(edge_mask[i_edge] && (hfrom + 1) == hto) { //going forward atomicAdd(n_out_edges, 1); } } }; auto f_node_init = [*this, f_edge_init] __device__ (int idx, int u) { if(!node_mask[u]) return; if(threadIdx.x == 0) { sg_in.edge_offsets[idx] = 0; sg_out.edge_offsets[idx] = 0; } __syncthreads(); //this code shouldnt diverge iterate_on_edges(u, g, f_edge_init, &sg_in.edge_offsets[idx], &sg_out.edge_offsets[idx]); if(threadIdx.x == 0) { queue_mask[idx] = 1; //printf("Node %i, in=%i, out=%i \n", u, sg_in.edge_offsets[idx], sg_out.edge_offsets[idx]); } //printf("node %i, direction %i, sum %f \n", u, direction, degree_set[u]); }; //localgrid and block : what we actually need in our lambdas, the z-axis will be use and defined internaly in iterate_on_levels dim3 localGrid(1, 1); dim3 localBlock(INIT_G_BLOCK_X, 1); char one = 1; cudaMemcpy(&node_mask[t], &one, sizeof(char), cudaMemcpyHostToDevice); iterate_on_levels<BACKWARD>(ht, 0, q_bfs, bfs_offsets, f_node_init, localGrid, localBlock, 1, // nodes per thread 0, 0); //Creating new list containing only vertices in layered graph //TODO we dont need g.n, we just need bfs_offsets[ht...] cudaEvent_t nsg_on_host; //List nodes in layer PartitionFlagged(node_sg_to_g, q_bfs, queue_mask, g.n, d_nsg, d_storage_partition, size_storage_partition); cudaMemcpy(&nsg, d_nsg, sizeof(int), cudaMemcpyDeviceToHost); //in edge count in layer PartitionFlagged(buf1, sg_in.edge_offsets, queue_mask, g.n, d_nsg, d_storage_partition, size_storage_partition); //out edge count in layer PartitionFlagged(buf2, sg_out.edge_offsets, queue_mask, g.n, d_nsg, d_storage_partition, size_storage_partition); sg_in.n = nsg; sg_out.n = nsg; //printf("nsg is %i \n", nsg); degree_in = degree; degree_out = degree + nsg; //printf("in layered graph : %i \n", nsg); //TODO we could resize sg here (n) // // Compute offsets // ExclusiveSum(sg_in.edge_offsets, buf1, nsg+1, d_storage_exclusive_sum, size_storage_exclusive_sum); //we dont care about whats in g.buf[nsg] (exclusive sum), but we need the nsg+1 first elements of the sum ExclusiveSum(sg_out.edge_offsets, buf2, nsg+1, d_storage_exclusive_sum, size_storage_exclusive_sum); //TODO we could resize sg here (nnz) /* cudaDeviceSynchronize(); for(int i=0; i != nsg; ++i) { printf("sg_node %i (h=%i), g_node %i, in_edge %i, out_edge %i offset in %i, offset out %i \n", i, h[node_sg_to_g[i]], node_sg_to_g[i], buf1[i], buf2[i], sg_in.edge_offsets[i], sg_out.edge_offsets[i]); } */ dim3 block, grid; block.x = 256; grid.x = min((nsg + block.x - 1)/block.x, N_MAX_BLOCKS); reverse_hash<<<grid,block>>>(node_g_to_sg, node_sg_to_g, nsg); //TODO cudaLaunch returns 0x7 in write_edges write_edges(); /* cudaDeviceSynchronize(); printf("bfs off : \n"); for(int i=0; i!=ht+2; ++i) printf("%i\t", bfs_offsets[i]); printf("\n"); */ SegmentedReduce(buf1, queue_mask, bfs_offsets, ht+2); /* printf("counts : \n"); for(int i=0; i!=ht+2; ++i) printf("%i\t", buf1[i]); printf("\n"); */ ExclusiveSum(sg_level_offsets, buf1, ht+2, d_storage_exclusive_sum, size_storage_exclusive_sum); cudaDeviceSynchronize(); //CPU pagefaults max_level_width = 0; for(int i=0; i != (ht+1); ++i) max_level_width = max(max_level_width, sg_level_offsets[i+1] - sg_level_offsets[i]); //printf("max level width : %i \n", max_level_width); /* cudaDeviceSynchronize(); printf("SCAN : \n"); for(int i=0; i!=ht+2; ++i) printf("%i\t", g.sg_level_offsets[i]); printf("\n"); cudaDeviceSynchronize(); printf("levels offsets: \n"); for(int i=0; i != ht+2; ++i) printf("(%i) %i : %i\n", i, buf1[i], sg_level_offsets[i]); printf("\n"); */ /* for(int i=0; i != nsg; ++i) { printf("sg_node %i (h=%i), g_node %i, din=%f, dout=%f \n out edges : ", i, h[node_sg_to_g[i]], node_sg_to_g[i], degree_in[i], degree_out[i]); for(int i_edge = sg_out.edge_offsets[i]; i_edge != sg_out.edge_offsets[i+1]; ++i_edge) printf("%i (g:%i)\t", sg_out.col_indices[i_edge], node_sg_to_g[sg_out.col_indices[i_edge]]); printf("\n in edges :"); for(int i_edge = sg_in.edge_offsets[i]; i_edge != sg_in.edge_offsets[i+1]; ++i_edge) printf("%i (g:%i)\t", sg_in.col_indices[i_edge], node_sg_to_g[sg_in.col_indices[i_edge]]); printf("\n"); } */ //TODO we use it in prune cudaMemset(queue_mask, 0, sizeof(char) * g.n); cudaDeviceSynchronize(); nvtxRangePop(); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/benchmarks/benchmark.py
#!/usr/bin/python import os import sys import subprocess import random import time import argparse #config #maxflow implementations implementations = [ "mpm-gpu-naive", "galois-preflowpush"] log_filename = "bench_log.csv" #end config #parsing args parser = argparse.ArgumentParser(description="Compute benchmarks of maxflow implementations") parser.add_argument("--log", dest='log', action='store_const', const=1, default=0, help="Save individual benchmark results to logfile") parser.add_argument("--make", dest='make', action='store_const', const=1, default=0, help="Make maxflow implementations") args = parser.parse_args() log = args.log make = args.make #make executables for implementation in implementations: if make: subprocess.call(["make", "-C", "..", "clean"]) subprocess.call(["make", "-C", "..", implementation, "LOG_LEVEL=1"]) else: if not os.path.isfile("../" + implementation): print("../" + implementation + " does not exist. Please use --make to compile it.") sys.exit(1) commit_hash = subprocess.Popen(["git", "log", "-n", "1", "--pretty=format:\"%h\""], stdout=subprocess.PIPE).communicate()[0] commit_title = subprocess.Popen(["git", "log", "-n", "1", "--pretty=format:\"%s\""], stdout=subprocess.PIPE).communicate()[0] time_bench = time.time() if log: logfile = open(log_filename, "a") #text coloring def colorRef(val): return "\033[94m" + str(val) + "\033[0m" def colorPassed(val): return "\033[92m" + str(val) + "\033[0m" def colorFailed(val): return "\033[91m" + str(val) + "\033[0m" def argmin(lst): return lst.index(min(lst)) def argsort(seq): return [x for x,y in sorted(enumerate(seq), key = lambda x: x[1])] #extract runtime and flow from program output def flow_time_extract(res): time = res.rsplit(None, 2)[-2].rstrip() flow = res.rsplit(None, 4)[-4].rstrip() return flow,time def test(matrix, s, t, w=None): global failed,passed,winners filename, file_extension = os.path.splitext(matrix) matrix = "../" + matrix if file_extension != '.gr': #we need ton convert the graph first if not os.path.isfile(matrix + '.gr'): print "Converting " + matrix + " to gr format..." subprocess.call(['../data/convert_graph.sh', matrix, matrix + '.gr']) matrix += '.gr' times = [] out_line = [matrix, str(s), str(t)] for i in range(len(implementations)): implementation = implementations[i] res = subprocess.Popen(["../" + implementation, matrix, str(s), str(t)], stdout=subprocess.PIPE).communicate()[0] flow,time = flow_time_extract(res) if i==0: #reference ref_flow = flow out_line.append(ref_flow) times.append(float(time)) out_line.append(colorRef(time)) else: if flow == ref_flow: out_line.append(colorPassed(time)) times.append(float(time)) passed += 1 else: out_line.append(colorFailed(time)) times.append(sys.maxint) failed += 1 if log: logfile_line = [str(time_bench), commit_hash, commit_title, implementation, matrix, str(s), str(t), time, flow] logfile.write(', '.join(logfile_line) + "\n") logfile.flush() best = argmin(times) winners[best] += 1 out_line.append(implementations[best]) print ', '.join(out_line) passed = 0 failed = 0 #winners[i] : number of times implementations[i] was the best one winners = [0] * len(implementations) print '=== BENCHMARKS ===' random.seed(1234321) # save header header = ['matrix', 'source', 'sink', 'flow'] header.extend(implementations) header.append("best") print ', '.join(header) test('data/wiki2003.mtx', 3, 12563) test('data/wiki2003.mtx', 54, 1432) test('data/wiki2003.mtx', 65, 7889) test('data/wiki2003.mtx', 43242, 5634) test('data/wiki2003.mtx', 78125, 327941) test('data/wiki2003.mtx', 2314, 76204) test('data/roadNet-CA.mtx', 2314, 76204) test('data/roadNet-CA.mtx', 9, 1247) test('data/roadNet-CA.mtx', 1548, 365940) test('data/roadNet-CA.mtx', 1548785, 654123) test('data/roadNet-CA.mtx', 8, 284672) # USA road network (23.9M vertices, 28.8M edges) test('data/road_usa.mtx', 125, 7846232) test('data/road_usa.mtx', 458743, 321975) test('data/road_usa.mtx', 96, 4105465) test('data/road_usa.mtx', 5478, 658413) test('data/road_usa.mtx', 364782, 32) #test('data/road_usa.mtx', 21257849, 2502578) #test('data/road_usa.mtx', 12345678, 23000000) #test('data/road_usa.mtx', 16807742, 17453608) # wikipedia (3.7M vertices, 66.4M edges) test('data/wiki2011.mtx', 254, 87452) test('data/wiki2011.mtx', 315547, 874528) test('data/wiki2011.mtx', 8796, 673214) if log: logfile.close() print '=== SUMMARY ===' print str(failed) + ' tests failed out of ' + str(passed + failed) print "Implementations ranking : " w_indexes = reversed(argsort(winners)) for w_idx in w_indexes: print implementations[w_idx] + " : " + str(winners[w_idx]) + " win(s)"
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/benchmarks/benchmarks_log.csv
1472508703.3, "264e4d2", "working commit : limit nblocks 65000, untested", edmonds-karp-cpu, data/roadNet-CA.mtx, 1, 10000, 0.670, 11472508703.3, "264e4d2", "working commit : limit nblocks 65000, untested", mpm-gpu-naive, data/roadNet-CA.mtx, 1, 10000, 0.503, 11472508703.3, "264e4d2", "working commit : limit nblocks 65000, untested", edmonds-karp-cpu, data/roadNet-CA.mtx, 1, 1000, 0.168, 0.8333331472508819.28, "264e4d2", "working commit : limit nblocks 65000, untested", edmonds-karp-cpu, data/roadNet-CA.mtx, 1, 10000, 0.671, 11472508819.28, "264e4d2", "working commit : limit nblocks 65000, untested", mpm-gpu-naive, data/roadNet-CA.mtx, 1, 10000, 0.500, 11472508819.28, "264e4d2", "working commit : limit nblocks 65000, untested", edmonds-karp-cpu, data/roadNet-CA.mtx, 1, 1000, 0.169, 0.8333331472508819.28, "264e4d2", "working commit : limit nblocks 65000, untested", mpm-gpu-naive, data/roadNet-CA.mtx, 1, 1000, 0.415, 0.833333
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/benchmarks/benchmarks.cpp
#define _POSIX_C_SOURCE 199309L #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <time.h> #include <assert.h> #include "../allocator.h" #include "../matrix_io.h" #include "../matrix.h" #include "../MPM/MPM.h" #include <sstream> #include "../boost-push-relabel/push-relabel.h" using std::stringstream; void do_benchmarks() { vector<pair<int,int>> st_roadCA = {{1,1000}}; map<string, vector<pair<int,int>>> todo; todo.insert({"data/roadNet-CA.mtx", st_roadCA); for(auto& g_sts : todo) { string g_path = g_sts.first; vector<pair<int,int>> ls_s_t = g_sts.second; csr_graph g; // read capacity graph, generate symmetric entries read_mm_matrix(argv[1], &g.n, &g.n, &g.nnz, &g.row_offsets, &g.col_indices, &g.vals_cap); if (argc == 4) g.set_edge_weights_rcp_degree(); //Using MPM MPM mpm(*g); for(auto st : ls_s_t) { int s = st.first; int t = st.second; stringstream dimacs; export_to_dimacs(dimacs, g, s, t); float pr_time, mpm_time; boost_push_relabel(dimacs, &pr_time); mpm.maxflow(s, t, &mpm_time); } my_free(g.row_offsets); my_free(g.col_indices); my_free(g.vals_cap); mpm.clean(); } } int main(int argc, char **argv) { do_benchmarks(); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs_gpu_naive.cu
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #include <stdio.h> #include "../matrix.h" #include "bfs.h" #include "nvToolsExt.h" //Generic tools handling masks and fill #define THREADS_PER_VERTEX_BFS 4 #define N_BLOCKS_MAX 65535 template<typename T> __global__ void fill_kernel(int size, T *data, T value) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) data[tid] = value; } template<typename T> void fill(int size, T *data, T value) { fill_kernel<<<(size + 255)/256, 256>>>(size, data, value); cudaDeviceSynchronize(); } // main bfs kernel: finds next frontier using blockDim.x threads per vertex //level_width[i] contain the max width of the ith level __global__ void next_frontier(int start, int end, int *d_next, int *row_offsets, int *col_indices, char *mask, int t, int *q, int *output, bool mark_pred, int bfs_level, int *found) { for(int idx = start + blockIdx.y * blockDim.y + threadIdx.y; idx < end; idx += blockDim.y * gridDim.y) { // current frontier int u = q[idx]; // writing node level TODO optional // loop over neighbor vertices for (int i = row_offsets[u] + threadIdx.x; i < row_offsets[u+1]; i += blockDim.x) { // next frontier int v = col_indices[i]; // only advance if we haven't visited & mask allows it if (output[v] == -5 && mask[i]) { // critical section below to avoid collisions from multiple threads if (atomicCAS(&output[v], -5, mark_pred ? u : (bfs_level+1)) == -5) { // add new vertex to our queue int pos = atomicAdd(d_next, 1); q[pos] = v; if (v == t) { // early exit if we found the path *found = 1; return; } } } } } } //BFS GPU naive implementation int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int s, int t, int *q, int *output, int output_type, char *mask, int *bfs_offsets) { nvtxRangePushA("BFS"); // set all vertices as undiscovered (-5) fill(num_nodes, output, -5); // start with source vertex q[0] = s; bool mark_pred = (output_type == BFS_MARK_PREDECESSOR); output[s] = mark_pred ? s : 0; // found flag (zero-copy memory) static int *found = NULL; if (!found) cudaMallocHost(&found, sizeof(int)); *found = 0; static int *d_next = NULL; if (!d_next) cudaMalloc(&d_next, sizeof(int)); int h_start = 0, h_end = 1; cudaMemcpy(d_next, &h_end, sizeof(int), cudaMemcpyHostToDevice); int bfs_level = 0; int off_idx = 0; bfs_offsets[off_idx++] = 0; dim3 block(THREADS_PER_VERTEX_BFS, 128 / THREADS_PER_VERTEX_BFS); do { // calculate grid size int nitems; nitems = h_end - h_start; #if LOG_LEVEL > 4 printf(" bfs level %i: %i vertices :\n", bfs_level, nitems); for(int i=h_start; i!=h_end; ++i) printf("%i\t", q[i]); printf("\n"); #endif dim3 grid(1, min((nitems + block.y-1) / block.y, N_BLOCKS_MAX)); next_frontier<<<grid, block>>>(h_start, h_end, d_next, row_offsets, col_indices, mask, t, q, output, mark_pred, bfs_level, found); bfs_offsets[off_idx++] = h_end; h_start = h_end; cudaMemcpy(&h_end, d_next, sizeof(int), cudaMemcpyDeviceToHost); ++bfs_level; } while(h_start < h_end && *found == 0); bfs_offsets[off_idx++] = h_end; #if LOG_LEVEL > 1 if (*found) printf("bfs: traversed vertices %d (%.0f%%), ", h_end, (double)100.0*h_end/num_nodes); #endif nvtxRangePop(); return (*found); }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs_gpu_gunrock.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- // Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. /** * @file * test_bfs.cu * * @brief Simple test driver program for breadth-first search. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include <iostream> #include <fstream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> #include <gunrock/util/track_utils.cuh> // BFS includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // graph structure #include "../matrix.h" //Generic tools handling fill #include "../graph_tools.h" #include "bfs.h" using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents) { int *q = (int*)malloc(num_nodes * sizeof(int)); q[0] = src_node; parents[src_node] = src_node; int idx = -1; int size = 1; int found = 0; while (idx+1 < size && !found) { idx++; int u = q[idx]; for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) { int v = col_indices[i]; if (parents[v] == -1 && col_mask[i]) { parents[v] = u; if (v == dst_node) { found = 1; break; } else { q[size] = v; size++; } } } } } int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int src_node, int dst_node, int *q, int *output, int output_type, int *col_mask) { fill(num_nodes, output, -1); cudaDeviceSynchronize(); bool mark_pred = (output_type == BFS_MARK_PREDECESSOR); #if 0 // TODO: use Gunrock's customized BFS here ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents); return cudaSuccess; #else typedef int VertexId; typedef int SizeT; typedef int Value; typedef BFSProblem <VertexId,SizeT,Value, false, // MARK_PREDECESSORS true> // IDEMPOTENCE Problem; typedef BFSEnactor <Problem> Enactor; cudaError_t retval = cudaSuccess; Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; info->InitBase2("BFS"); ContextPtr *context = (ContextPtr*)info->context; cudaStream_t *streams = (cudaStream_t*)info->streams; int *gpu_idx = new int[1]; gpu_idx[0] = 0; Problem *problem = new Problem(false, false); //no direction optimized, no undirected if (retval = util::GRError(problem->Init( false, //stream_from_host (depricated) row_offsets, col_indices, col_mask, output, num_nodes, num_edges, 1, NULL, "random", streams), "BFS Problem Init failed", __FILE__, __LINE__)) return retval; Enactor *enactor = new Enactor(1, gpu_idx); if (retval = util::GRError(enactor->Init(context, problem), "BFS Enactor Init failed.", __FILE__, __LINE__)) return retval; if (retval = util::GRError(problem->Reset( src_node, enactor->GetFrontierType()), "BFS Problem Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Enact(src_node), "BFS Enact failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(problem->Extract(output, NULL), "BFS Extract failed", __FILE__, __LINE__)) return retval; // free memory delete info; delete problem; delete enactor; //check if path exists //MAX_INT default value for src dis TODO return (dst_node >= 0 && dst_node < num_nodes) && (output[dst_node] != -1); #endif } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs_gunrock.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- // Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. /** * @file * test_bfs.cu * * @brief Simple test driver program for breadth-first search. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include <iostream> #include <fstream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> #include <gunrock/util/track_utils.cuh> // BFS includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // graph structure #include "../matrix.h" //Generic tools handling masks and fill #include "bfs_tools.cu" using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents) { int *q = (int*)malloc(num_nodes * sizeof(int)); q[0] = src_node; parents[src_node] = src_node; int idx = -1; int size = 1; int found = 0; while (idx+1 < size && !found) { idx++; int u = q[idx]; for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) { int v = col_indices[i]; if (parents[v] == -1 && col_mask[i]) { parents[v] = u; if (v == dst_node) { found = 1; break; } else { q[size] = v; size++; } } } } } cudaError_t bfs_mask(int src_node, int dst_node, int num_nodes, int num_edges, int *row_offsets, int *col_indices, int *col_mask, int *parents) { #if 0 // TODO: use Gunrock's customized BFS here ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents); return cudaSuccess; #else typedef int VertexId; typedef int SizeT; typedef int Value; typedef BFSProblem <VertexId,SizeT,Value, true, // MARK_PREDECESSORS true> // IDEMPOTENCE Problem; typedef BFSEnactor <Problem> Enactor; cudaError_t retval = cudaSuccess; Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; info->InitBase2("BFS"); ContextPtr *context = (ContextPtr*)info->context; cudaStream_t *streams = (cudaStream_t*)info->streams; int *gpu_idx = new int[1]; gpu_idx[0] = 0; Problem *problem = new Problem(false, false); //no direction optimized, no undirected if (retval = util::GRError(problem->Init( false, //stream_from_host (depricated) row_offsets, col_indices, col_mask, parents, num_nodes, num_edges, 1, NULL, "random", streams), "BFS Problem Init failed", __FILE__, __LINE__)) return retval; Enactor *enactor = new Enactor(1, gpu_idx); if (retval = util::GRError(enactor->Init(context, problem), "BFS Enactor Init failed.", __FILE__, __LINE__)) return retval; if (retval = util::GRError(problem->Reset( src_node, enactor->GetFrontierType()), "BFS Problem Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Enact(src_node), "BFS Enact failed", __FILE__, __LINE__)) return retval; // free memory delete info; delete problem; delete enactor; return retval; #endif } //BFS gunrock implementation int bfs(csr_graph *g, int s, int t, int *q, int *p, int *mask) { // set all vertices as undiscovered (-1) fill<-1><<<(g->n + 255)/256, 256>>>(g->n, p); cudaDeviceSynchronize(); // setup mask, TODO: move this step inside Gunrock to reduce BW setup_mask<<<(g->nnz + 255)/256, 256>>>(g->nnz, mask, g->vals_cap, g->vals_flow); // run bfs (with mask) bfs_mask(s, t, g->n, g->nnz, g->row_offsets, g->col_indices, mask, p); // check if path exists return (p[t] != -1); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs_cpu_omp.cpp
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #include <stdio.h> #include <stdlib.h> #include <string.h> #include "../matrix.h" #include "bfs.h" //BFS CPU OMP implementation int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int s, int t, int *q, int *output, int output_type, char *mask, int* bfs_offsets) { // set all vertices as undiscovered (-1) #pragma omp parallel for for (int i = 0; i < num_nodes; i++) output[i] = -1; // start with source vertex q[0] = s; bool mark_pred = (output_type == BFS_MARK_PREDECESSOR); output[s] = mark_pred ? s : 0; int size = 1; int start_idx = 0; int end_idx = size; int found = 0; int bfs_level = 0; while(!found && start_idx < end_idx) { #pragma omp parallel for for(int idx = start_idx; idx < end_idx; idx++) { int u = q[idx]; for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) { int v = col_indices[i]; if(output[v] == -1 && mask[i]) { if(mask[i] && __sync_val_compare_and_swap(&output[v], -1, mark_pred ? u : (bfs_level+1)) == -1) { if (v == t) { found = 1; break; } int pos = __sync_fetch_and_add (&size, 1); q[pos] = v; } } } } start_idx = end_idx; end_idx = size; ++bfs_level; } return found; }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs_cpu.cpp
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. #include <stdio.h> #include <stdlib.h> #include <string.h> #include "../allocator.h" #include "../matrix.h" #include "bfs.h" //BFS CPU implementation int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int s, int t, int *q, int *output, int output_type, char *mask, int *bfs_offsets) { int found = 0; #pragma omp parallel num_threads(1) { int edges = 0; // count number of visited edges // set all vertices as undiscovered (-1) for (int i = 0; i < num_nodes; i++) output[i] = -1; // start with source vertex q[0] = s; output[s] = (output_type == BFS_MARK_PREDECESSOR) ? s : 0; #if LOG_LEVEL > 3 int *bfs_level = (int*)my_malloc(num_nodes * sizeof(int)); int *bfs_vertices = (int*)my_malloc(num_nodes * sizeof(int)); int *bfs_edges = (int*)my_malloc(num_nodes * sizeof(int)); memset(bfs_level, 0, num_nodes * sizeof(int)); memset(bfs_vertices, 0, num_nodes * sizeof(int)); memset(bfs_edges, 0, num_nodes * sizeof(int)); bfs_vertices[0] = 1; bfs_edges[0] = row_offsets[s+1] - row_offsets[s]; #endif int idx = -1; int size = 1; while (idx+1 < size && !found) { idx = idx+1; int u = q[idx]; for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) { int v = col_indices[i]; edges++; if (output[v] == -1 && mask[i]) { output[v] = (output_type == BFS_MARK_PREDECESSOR) ? u : output[u]+1; #if LOG_LEVEL > 3 bfs_level[v] = bfs_level[u] + 1; bfs_vertices[bfs_level[v]]++; bfs_edges[bfs_level[v]] += row_offsets[v+1] - row_offsets[v]; #endif if (v == t) { found = 1; #if LOG_LEVEL > 1 printf("bfs: traversed vertices %d (%.0f%%), traversed edges %d (%.0f%%), ", size, (double)100.0*size/num_nodes, edges, (double)100*edges/num_edges); #endif #if LOG_LEVEL > 3 printf("\n"); for (int i = 0; i < bfs_level[v]; i++) printf(" bfs level %i: %i vertices, %i edges\n", i, bfs_vertices[i], bfs_edges[i]); #endif break; } q[size] = v; size++; } } } #if LOG_LEVEL > 3 my_free(bfs_level); my_free(bfs_vertices); my_free(bfs_edges); #endif } return found; }
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/bfs/bfs.h
#pragma once #include <vector> using std::vector; //Output types #define BFS_MARK_PREDECESSOR 0 #define BFS_MARK_DEPTH 1 int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int src_node, int dst_node, int *q, int *output, int output_type, char *col_mask, int *bfs_offsets);
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/galois/README
See online documentation at: http://iss.ices.utexas.edu/?p=projects/galois/doc/current/getting_started See build/readme.txt for build instructions
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/galois/CMakeLists.txt
cmake_minimum_required(VERSION 2.8.8) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") project(Galois) set(GALOIS_VERSION_MAJOR "2") set(GALOIS_VERSION_MINOR "2") set(GALOIS_VERSION_PATCH "1") set(GALOIS_VERSION ${GALOIS_VERSION_MAJOR}.${GALOIS_VERSION_MINOR}.${GALOIS_VERSION_PATCH}) set(GALOIS_COPYRIGHT_YEAR "2014") # Also in COPYRIGHT if(NOT CMAKE_BUILD_TYPE) message(STATUS "No build type selected, default to Release") set(CMAKE_BUILD_TYPE "Release") endif() ###### Options (alternatively pass as options to cmake -DName=Value) ###### set(USE_GPROF OFF CACHE BOOL "Enable GCC profiling") set(USE_VTUNE OFF CACHE BOOL "Use VTune for profiling") set(USE_PAPI OFF CACHE BOOL "Use PAPI counters for profiling") set(USE_HPCTOOLKIT OFF CACHE BOOL "Use HPCToolKit for profiling") set(USE_STRICT_CONFIG OFF CACHE BOOL "Instead of falling back gracefully, fail") set(USE_LONGJMP ON CACHE BOOL "Use longjmp instead of exceptions to signal aborts") set(INSTALL_APPS OFF CACHE BOOL "Install apps as well as library") set(SKIP_COMPILE_APPS OFF CACHE BOOL "Skip compilation of applications using Galois library") set(INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries") set(INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables") set(INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files") set(INSTALL_CMAKE_DIR lib/cmake/Galois CACHE PATH "Installation directory for CMake files") # Make relative paths absolute foreach(p LIB BIN INCLUDE CMAKE) set(var INSTALL_${p}_DIR) if(NOT IS_ABSOLUTE "${${var}}") set(${var} "${CMAKE_INSTALL_PREFIX}/${${var}}") endif() endforeach() # Enable iss specific options; should be OFF in the general release; all guarded by USE_EXP set(USE_EXP OFF CACHE BOOL "Use experimental features") set(USE_HTM OFF CACHE BOOL "Use HTM") set(EXP_DOALL "PTHREAD" CACHE STRING "Which type of implementation of parallel_doall") set(USE_PROF OFF CACHE BOOL "Use profiling specific features") set(USE_SUBVERSION_REVISION ON CACHE BOOL "Embed subversion numbers") ###### Configure (users don't need to go beyond here) ###### enable_testing() ###### Configure compiler ###### # ICC if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE ICC_VERSION) if(ICC_VERSION VERSION_GREATER 13.0 OR ICC_VERSION VERSION_EQUAL 13.0) #message(STATUS "ICC Version >= 13.0") else() message(FATAL_ERROR "ICC must be 13.0 or higher found: ${ICC_VERSION}") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -wd68 -wd981 -wd383 -wd869 -wd2196 -wd279 -wd2504 -wd2943 -wd32013") if("$ENV{GCC_BIN}" STREQUAL "") message(STATUS "Using default GCC toolchain; set environment variable GCC_BIN to override") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -gxx-name=$ENV{GCC_BIN}/g++") endif() endif() # Clang if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") if("$ENV{GCC_BIN}" STREQUAL "") message(STATUS "Using default GCC toolchain; set environment variable GCC_BIN to override") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -gcc-toolchain $ENV{GCC_BIN}/..") endif() endif() # XL if(CMAKE_CXX_COMPILER_ID MATCHES "XL") execute_process(COMMAND ${CMAKE_CXX_COMPILER} -qversion COMMAND sed 1d COMMAND sed s/Version:// OUTPUT_VARIABLE XLC_VERSION) if(XLC_VERSION VERSION_GREATER 12.0) #message(STATUS "XLC Version > 12.0") else() message(FATAL_ERROR "XLC must be higher than 12.0") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -qsuppress=1540-0724 -qsuppress=1500-029 -qmaxmem=-1 -qalias=noansi -qsmp=omp") endif() # check for incompatible GCC if(CMAKE_COMPILER_IS_GNUCC) execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) if(GCC_VERSION VERSION_GREATER 4.6 OR GCC_VERSION VERSION_EQUAL 4.6) #message(STATUS "GCC Version >= 4.6") else() message(FATAL_ERROR "GCC must be 4.6 or higher") endif() endif() # solaris if(CMAKE_SYSTEM MATCHES "SunOS.*") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64 -mcpu=niagara2 -lposix4") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -m64 -lposix4") endif() # Always include debug symbols set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g") # Enable architecture-specific optimizations if(CMAKE_BUILD_TYPE MATCHES "Release" AND CMAKE_CXX_COMPILER_ID MATCHES "GNU") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") endif() # More warnings if(CMAKE_BUILD_TYPE MATCHES "Debug") if(NOT CMAKE_CXX_COMPILER_ID MATCHES "XL") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall") endif() if(CMAKE_COMPILER_IS_GNUCC) if(GCC_VERSION VERSION_GREATER 4.8 OR GCC_VERSION VERSION_EQUAL 4.8) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-local-typedefs") endif() endif() endif() # GNU profiling if(USE_GPROF) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pg") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pg") endif(USE_GPROF) ###### Configure features ###### # Experimental features if(USE_EXP) set(USE_VTUNE ON) add_definitions(-DGALOIS_USE_EXP) include_directories("exp/include") find_package(OpenMP) if (OPENMP_FOUND) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") endif () if(USE_PROF) add_definitions(-DGALOIS_USE_PROF) endif() if(USE_SUBVERSION_REVISION) include(GetSVNVersion) set(GALOIS_USE_SVNVERSION on) endif() if(USE_HTM) if(CMAKE_CXX_COMPILER_ID MATCHES "XL") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -qtm -qsmp=speculative") set(GALOIS_USE_HTM on) set(GALOIS_USE_SEQ_ONLY on) set(GALOIS_USE_LONGJMP on) else() message(FATAL_ERROR "Hardware transactional memory not supported") endif() endif() # Experimental Deterministic features if(USE_DET_INORDER) add_definitions(-DGALOIS_USE_DET_INORDER) endif() if(USE_DET_FIXED_WINDOW) add_definitions(-DGALOIS_USE_DET_FIXED_WINDOW) endif() endif() # PThreads find_package(Threads REQUIRED) # NUMA (linux) find_package(NUMA) if(NUMA_FOUND) set(GALOIS_USE_NUMA on) elseif(USE_STRICT_CONFIG) message(FATAL_ERROR "Need libnuma") endif() # CILK include(CheckCilk) # HugePages include(CheckHugePages) if(NOT HAVE_HUGEPAGES AND USE_STRICT_CONFIG) message(FATAL_ERROR "Need huge pages") endif() # Longjmp if(USE_LONGJMP) set(GALOIS_USE_LONGJMP on) endif() # Boost set(Boost_ADDITIONAL_VERSIONS "1.40" "1.40.0" "1.47" "1.47.0" "1.49" "1.49.0" "1.51.0") if(NOT "$ENV{BOOST_DIR}" STREQUAL "") set(BOOST_ROOT $ENV{BOOST_DIR}) endif() find_package(Boost 1.38.0 REQUIRED) include_directories(${Boost_INCLUDE_DIR}) # C++11 features find_package(CXX11) if ("${CXX11_FLAGS}" STREQUAL "") message( FATAL_ERROR "Needs C++11") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}") include(CheckEndian) include(llvm-extras) #always import c99 stdint functions into c++ #include(UseStdMacro) # HandleLLVMOptions.cmake (via llvm-extras) already does this for us #include_directories("${PROJECT_BINARY_DIR}/include") # llvm-extra already does this for us ###### Build Hacks ###### # XXX(ddn): Hack for lonestar machines if(NUMA_FOUND) if(NUMA_OLD) set(GALOIS_USE_NUMA_OLD on) endif() endif() ###### Global Functions ###### include(ParseArguments) function(compileApp name) if(ARGN) set(Sources ${ARGN}) else() file(GLOB Sources *.cpp) endif() add_executable(${name} ${Sources}) endfunction (compileApp) function(app name) PARSE_ARGUMENTS(APP "REQUIRES;EXTLIBS" "" ${ARGN}) foreach(required ${APP_REQUIRES}) if(${${required}} MATCHES "TRUE") else() message("-- NOT compiling ${name} (missing: ${required})") return() endif() endforeach() compileApp(${name} ${APP_DEFAULT_ARGS}) target_link_libraries(${name} ${APP_EXTLIBS}) target_link_libraries(${name} galois) if(INSTALL_APPS) install(TARGETS ${name} DESTINATION bin) endif() endfunction(app) ###### Source finding ###### include_directories(include) add_subdirectory(lib) add_subdirectory(src) add_subdirectory(tools) add_subdirectory(scripts) if(NOT SKIP_COMPILE_APPS) add_subdirectory(apps) add_subdirectory(inputs) add_subdirectory(test) endif() if(USE_EXP) add_subdirectory(exp) endif() ###### Documentation ###### set(DOXYFILE_SOURCE_DIR "src\" \"include") include(UseDoxygen OPTIONAL) ###### Distribution ###### include(InstallRequiredSystemLibraries) set(CPACK_GENERATOR "TGZ") set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/COPYRIGHT") set(CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/README") set(CPACK_PACKAGE_VERSION_MAJOR ${GALOIS_VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MINOR ${GALOIS_VERSION_MINOR}) set(CPACK_PACKAGE_VERSION_PATCH ${GALOIS_VERSION_PATCH}) include(CPack) ###### Installation ###### export(TARGETS galois APPEND FILE "${PROJECT_BINARY_DIR}/GaloisTargets.cmake") export(PACKAGE Galois) # Galois include files file(RELATIVE_PATH rel_include_dir "${INSTALL_CMAKE_DIR}" "${INSTALL_INCLUDE_DIR}") set(GALOIS_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/include" "${PROJECT_BINARY_DIR}/include") if(USE_EXP) set(GALOIS_INCLUDE_DIR "${GALOIS_INCLUDE_DIR}" "${PROJECT_SOURCE_DIR}/exp/include") endif() # Galois include dependencies set(GALOIS_INCLUDE_DIRS) get_target_property(defs galois INCLUDE_DIRECTORIES) foreach(d ${defs}) string(FIND ${d} ${PROJECT_BINARY_DIR} pos1) string(FIND ${d} ${PROJECT_SOURCE_DIR} pos2) if(NOT ${pos1} EQUAL 0 AND NOT ${pos2} EQUAL 0) set(GALOIS_INCLUDE_DIRS ${GALOIS_INCLUDE_DIRS} ${d}) endif() endforeach() # Galois compiler definitions set(galois_defs) get_directory_property(defs DIRECTORY "${PROJECT_SOURCE_DIR}/src" COMPILE_DEFINITIONS) foreach(d ${defs}) set(galois_defs "-D${d} ${galois_defs}") endforeach() get_directory_property(defs DIRECTORY "${PROJECT_SOURCE_DIR}/src" COMPILE_DEFINITIONS_${CMAKE_BUILD_TYPE}) foreach(d ${defs}) set(galois_defs "-D${d} ${galois_defs}") endforeach() string(TOUPPER ${CMAKE_BUILD_TYPE} upper_build_type) set(GALOIS_FLAGS "${galois_defs} ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${upper_build_type}}") set(GALOIS_CXX_COMPILER "${CMAKE_CXX_COMPILER}") # Generate appropriate CMake files for installation and build trees configure_file("${PROJECT_SOURCE_DIR}/cmake/Modules/GaloisConfig.cmake.in" "${PROJECT_BINARY_DIR}/GaloisConfig.cmake" @ONLY) set(GALOIS_INCLUDE_DIR "\${GALOIS_CMAKE_DIR}/${rel_include_dir}") configure_file("${PROJECT_SOURCE_DIR}/cmake/Modules/GaloisConfig.cmake.in" "${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/GaloisConfig.cmake" @ONLY) configure_file("${PROJECT_SOURCE_DIR}/cmake/Modules/GaloisConfigVersion.cmake.in" "${PROJECT_BINARY_DIR}/GaloisConfigVersion.cmake" @ONLY) install(FILES "${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/GaloisConfig.cmake" "${PROJECT_BINARY_DIR}/GaloisConfigVersion.cmake" DESTINATION "${INSTALL_CMAKE_DIR}" COMPONENT dev) install(EXPORT GaloisTargets DESTINATION "${INSTALL_CMAKE_DIR}" COMPONENT dev)
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/galois/Doxyfile.in
# Doxyfile 1.5.8 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = "@PROJECT_NAME@" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = "@PROJECT_VERSION@" # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = "@DOXYFILE_OUTPUT_DIR@" # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, # Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, # Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene, # Spanish, Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command <command> <input-file>, where <command> is the value of # the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = "@DOXYFILE_SOURCE_DIR@" # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = "_darcs" # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = "*/.*" "*/.*/*" # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). #EXAMPLE_PATH = "@CMAKE_CURRENT_SOURCE_DIR@/examples" EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = "@CMAKE_CURRENT_SOURCE_DIR@" # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command <filter> <input-file>, where <filter> # is the value of the INPUT_FILTER tag, and <input-file> is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = "@DOXYFILE_HTML_DIR@" # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to FRAME, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. Other possible values # for this tag are: HIERARCHIES, which will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list; # ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which # disables this behavior completely. For backwards compatibility with previous # releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE # respectively. GENERATE_TREEVIEW = NONE # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = @DOXYFILE_LATEX@ # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = "@DOXYFILE_LATEX_DIR@" # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = "@LATEX_COMPILER@" # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = "@MAKEINDEX_COMPILER@" # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = amsmath # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = @DOXYFILE_PDFLATEX@ # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = YES # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = @DOXYFILE_DOT@ # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = "@DOXYGEN_DOT_PATH@" # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Options related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO
0
rapidsai_public_repos/code-share/maxflow
rapidsai_public_repos/code-share/maxflow/galois/COPYRIGHT
Galois, a framework to exploit amorphous data-parallelism in irregular programs. Copyright (C) 2014, The University of Texas at Austin. All rights reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances shall University be liable for incidental, special, indirect, direct or consequential damages or loss of profits, interruption of business, or related expenses which may arise from use of Software or Documentation, including but not limited to those resulting from defects in Software and/or Documentation, or loss or inaccuracy of data of any kind. This software is released under the terms of the University of Texas at Austin Research License available at http://www.otc.utexas.edu/Forms/ResearchLicense_SourceCode_11142005.doc , which makes this software available without charge to anyone for academic, research, experimental, or personal use. For all other uses, please contact the University of Texas at Austin's Office of Technology Commercialization http://www.otc.utexas.edu/
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/LigraExecutor.h
#ifndef GALOIS_LIGRAEXECUTOR_H #define GALOIS_LIGRAEXECUTOR_H #include "Galois/Galois.h" namespace Galois { //! Implementation of Ligra DSL in Galois namespace Ligra { namespace hidden { template<typename Graph,bool Forward> struct Transposer { typedef typename Graph::GraphNode GNode; typedef typename Graph::in_edge_iterator in_edge_iterator; typedef typename Graph::edge_iterator edge_iterator; typedef typename Graph::edge_data_reference edge_data_reference; GNode getInEdgeDst(Graph& g, in_edge_iterator ii) { return g.getInEdgeDst(ii); } in_edge_iterator in_edge_begin(Graph& g, GNode n) { return g.in_edge_begin(n, Galois::MethodFlag::NONE); } in_edge_iterator in_edge_end(Graph& g, GNode n) { return g.in_edge_end(n, Galois::MethodFlag::NONE); } edge_data_reference getInEdgeData(Graph& g, in_edge_iterator ii) { return g.getInEdgeData(ii); } GNode getEdgeDst(Graph& g, edge_iterator ii) { return g.getEdgeDst(ii); } edge_iterator edge_begin(Graph& g, GNode n) { return g.edge_begin(n, Galois::MethodFlag::NONE); } edge_iterator edge_end(Graph& g, GNode n) { return g.edge_end(n, Galois::MethodFlag::NONE); } edge_data_reference getEdgeData(Graph& g, edge_iterator ii) { return g.getEdgeData(ii); } }; template<typename Graph> struct Transposer<Graph,false> { typedef typename Graph::GraphNode GNode; typedef typename Graph::edge_iterator in_edge_iterator; typedef typename Graph::in_edge_iterator edge_iterator; typedef typename Graph::edge_data_reference edge_data_reference; GNode getInEdgeDst(Graph& g, in_edge_iterator ii) { return g.getEdgeDst(ii); } in_edge_iterator in_edge_begin(Graph& g, GNode n) { return g.edge_begin(n, Galois::MethodFlag::NONE); } in_edge_iterator in_edge_end(Graph& g, GNode n) { return g.edge_end(n, Galois::MethodFlag::NONE); } edge_data_reference getInEdgeData(Graph& g, in_edge_iterator ii) { return g.getEdgeData(ii); } GNode getEdgeDst(Graph& g, edge_iterator ii) { return g.getInEdgeDst(ii); } edge_iterator edge_begin(Graph& g, GNode n) { return g.in_edge_begin(n, Galois::MethodFlag::NONE); } edge_iterator edge_end(Graph& g, GNode n) { return g.in_edge_end(n, Galois::MethodFlag::NONE); } edge_data_reference getEdgeData(Graph& g, edge_iterator ii) { return g.getInEdgeData(ii); } }; template<typename Graph,typename Bag,typename EdgeOperator,bool Forward> struct DenseOperator: public Transposer<Graph,Forward> { typedef Transposer<Graph,Forward> Super; typedef typename Super::GNode GNode; typedef typename Super::in_edge_iterator in_edge_iterator; typedef typename Super::edge_iterator edge_iterator; typedef int tt_does_not_need_aborts; typedef int tt_does_not_need_push; Graph& graph; Bag& input; Bag& output; EdgeOperator op; DenseOperator(Graph& g, Bag& i, Bag& o, EdgeOperator op): graph(g), input(i), output(o), op(op) { } void operator()(GNode n, Galois::UserContext<GNode>&) { (*this)(n); } void operator()(GNode n) { if (!op.cond(graph, n)) return; for (in_edge_iterator ii = this->in_edge_begin(graph, n), ei = this->in_edge_end(graph, n); ii != ei; ++ii) { GNode src = this->getInEdgeDst(graph, ii); if (input.contains(graph.idFromNode(src)) && op(graph, src, n, this->getInEdgeData(graph, ii))) { output.push(graph.idFromNode(n), std::distance(this->edge_begin(graph, n), this->edge_end(graph, n))); } if (!op.cond(graph, n)) return; } } }; template<typename Graph,typename Bag,typename EdgeOperator,bool Forward,bool IgnoreInput> struct DenseForwardOperator: public Transposer<Graph,Forward> { typedef Transposer<Graph,Forward> Super; typedef typename Super::GNode GNode; typedef typename Super::in_edge_iterator in_edge_iterator; typedef typename Super::edge_iterator edge_iterator; typedef int tt_does_not_need_aborts; typedef int tt_does_not_need_push; Graph& graph; Bag& input; Bag& output; EdgeOperator op; DenseForwardOperator(Graph& g, Bag& i, Bag& o, EdgeOperator op): graph(g), input(i), output(o), op(op) { } void operator()(GNode n, Galois::UserContext<GNode>&) { (*this)(n); } void operator()(GNode n) { if (!IgnoreInput && !input.contains(graph.idFromNode(n))) return; for (edge_iterator ii = this->edge_begin(graph, n), ei = this->edge_end(graph, n); ii != ei; ++ii) { GNode dst = this->getEdgeDst(graph, ii); if (op.cond(graph, n) && op(graph, n, dst, this->getEdgeData(graph, ii))) { output.pushDense(graph.idFromNode(dst), std::distance(this->edge_begin(graph, dst), this->edge_end(graph, dst))); } } } }; template<typename Graph,typename Bag,typename EdgeOperator,bool Forward> struct SparseOperator: public Transposer<Graph,Forward> { typedef Transposer<Graph,Forward> Super; typedef typename Super::GNode GNode; typedef typename Super::in_edge_iterator in_edge_iterator; typedef typename Super::edge_iterator edge_iterator; typedef int tt_does_not_need_aborts; typedef int tt_does_not_need_push; Graph& graph; Bag& output; EdgeOperator op; GNode source; SparseOperator(Graph& g, Bag& o, EdgeOperator op, GNode s = GNode()): graph(g), output(o), op(op), source(s) { } void operator()(size_t n, Galois::UserContext<size_t>&) { (*this)(n); } void operator()(size_t id) { GNode n = graph.nodeFromId(id); for (edge_iterator ii = this->edge_begin(graph, n), ei = this->edge_end(graph, n); ii != ei; ++ii) { GNode dst = this->getEdgeDst(graph, ii); if (op.cond(graph, dst) && op(graph, n, dst, this->getEdgeData(graph, ii))) { output.push(graph.idFromNode(dst), std::distance(this->edge_begin(graph, dst), this->edge_end(graph, dst))); } } } void operator()(edge_iterator ii, Galois::UserContext<edge_iterator>&) { (*this)(ii); } void operator()(edge_iterator ii) { GNode dst = this->getEdgeDst(graph, ii); if (op.cond(graph, dst) && op(graph, source, dst, this->getEdgeData(graph, ii))) { output.push(graph.idFromNode(dst), std::distance(this->edge_begin(graph, dst), this->edge_end(graph, dst))); } } }; } // end namespace template<bool Forward,typename Graph,typename EdgeOperator,typename Bag> void edgeMap(Graph& graph, EdgeOperator op, Bag& output) { output.densify(); Galois::for_each_local(graph, hidden::DenseForwardOperator<Graph,Bag,EdgeOperator,Forward,true>(graph, output, output, op)); } template<bool Forward,typename Graph,typename EdgeOperator,typename Bag> void edgeMap(Graph& graph, EdgeOperator op, typename Graph::GraphNode single, Bag& output) { if (Forward) { Galois::for_each(graph.out_edges(single, Galois::MethodFlag::NONE).begin(), graph.out_edges(single, Galois::MethodFlag::NONE).end(), hidden::SparseOperator<Graph,Bag,EdgeOperator,true>(graph, output, op, single)); } else { Galois::for_each(graph.in_edges(single, Galois::MethodFlag::NONE).begin(), graph.in_edges(single, Galois::MethodFlag::NONE).end(), hidden::SparseOperator<Graph,Bag,EdgeOperator,false>(graph, output, op, single)); } } template<bool Forward,typename Graph,typename EdgeOperator,typename Bag> void edgeMap(Graph& graph, EdgeOperator op, Bag& input, Bag& output, bool denseForward) { using namespace Galois::WorkList; size_t count = input.getCount(); if (!denseForward && count > graph.sizeEdges() / 20) { //std::cout << "(D) Count " << count << "\n"; // XXX input.densify(); if (denseForward) { abort(); // Never executed output.densify(); typedef dChunkedFIFO<256*4> WL; Galois::for_each_local(graph, hidden::DenseForwardOperator<Graph,Bag,EdgeOperator,Forward,false>(graph, input, output, op), Galois::wl<WL>()); } else { typedef dChunkedFIFO<256> WL; Galois::for_each_local(graph, hidden::DenseOperator<Graph,Bag,EdgeOperator,Forward>(graph, input, output, op), Galois::wl<WL>()); } } else { //std::cout << "(S) Count " << count << "\n"; // XXX typedef dChunkedFIFO<64> WL; Galois::for_each_local(input, hidden::SparseOperator<Graph,Bag,EdgeOperator,Forward>(graph, output, op), Galois::wl<WL>()); } } template<typename... Args> void outEdgeMap(Args&&... args) { edgeMap<true>(std::forward<Args>(args)...); } template<typename... Args> void inEdgeMap(Args&&... args) { edgeMap<false>(std::forward<Args>(args)...); } } // end namespace } // end namespace #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/TwoLevelIterator.h
/** Two Level Iterator for Per-thread workList-*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * Two Level Iterator for per-thread workList. * * Assumptions * <ul> * <li>Outer and Inner iterators are default- and copy-constructible</li> * <li>Inner and Outer must be at least forward_iterator_tag</li> * <li>InnerBegFn and InnerEndFn take an argument of type *Outer and return an Inner * pointing to begin or end of inner range.</li> * <li>InnerBegFn and InnerEndFn must inherit from std::unary_function so that * argument_type and result_type are available.</li> * </ul> * * Important pitfalls to handle * <ol> * <li>If Outer and Inner have different categories, which category to choose?. The * category of Inner can be chosen after (expensively) supporting moving backwards for * outer iterators of forward category. Note: Lowest category currently supported * is forward iterators.</li> * * <li>Prevent Outer from falling outside the [begin,end) range, because calling * container functions e.g. outer->begin () and outer->end () is not valid and may * cause a segfault.</li> * * <li>The initial position of Outer and Inner iterators must be such that calling * operator * or operator -> on at two level iterator yields a valid result (if * possible). This means advancing the inner iterator to begin of first non-empty * container (else to the end of outer). If the outer iterator is initialized to * end of the outer range i.e. [end, end), then inner iterator cannot be * initialized.</li> * * <li>When incrementing (++), the inner iterator should initially be at a valid * begin position, but after incrementing may end up at end of an Inner range. * So the next valid local begin must be found, else the end of * outer should be reached</li> * * <ol> * <li> When jumping forward, outer should not go beyond end. After jump is * completed, inner may be at local end, so a valid next begin must be found * or else end of outer must be reached</li> * </ol> * * <li>When decrementing (--), the inner iterator may initially be uninitialized * due to outer being at end (See 3 above). * Inner iterator must be brought to a valid location after decrementing, or, else * the begin of outer must be reached (and not exceeded).</li> * * <ol> * <li>When jumping backward, inner iterator may be uninitialized due to * outer being at end.</li> * </ol> * * <li>When jumping forward or backward, check for jump amount being negative.</li> * <ol> * <li>Jumping outside the range of outer cannot be supported.</li> * </ol> * * </ol> * * @author <[email protected]> */ #ifndef GALOIS_TWO_LEVEL_ITER_H #define GALOIS_TWO_LEVEL_ITER_H #include "Galois/config.h" #include <iterator> #include GALOIS_CXX11_STD_HEADER(functional) #include GALOIS_CXX11_STD_HEADER(type_traits) #include <cstdlib> #include <cassert> namespace Galois { namespace TwoLevelIteratorImpl { template <typename Iter> void safe_decrement (Iter& it, const Iter& beg, const Iter& end , std::forward_iterator_tag) { Iter next = beg; Iter curr (next); while (next != it) { curr = next; assert (next != end); ++next; } assert (next == it); assert (curr != it); it = curr; } template <typename Iter> void safe_decrement (Iter& it, const Iter& beg, const Iter& end , std::bidirectional_iterator_tag) { assert (it != beg); --it; } template <typename Iter> void safe_decrement (Iter& it, const Iter& beg, const Iter& end) { safe_decrement (it, beg, end , typename std::iterator_traits<Iter>::iterator_category ()); } } //! Common functionality of TwoLevelIterators template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> class TwoLevelIterBase { protected: // TODO: make begin and end const Outer m_beg_outer; Outer m_end_outer; Outer m_outer; Inner m_inner; InnerBegFn innerBegFn; InnerEndFn innerEndFn; inline bool outerAtBegin () const { return m_outer == m_beg_outer; } inline bool outerAtEnd () const { return m_outer == m_end_outer; } inline bool outerEmpty () const { return m_beg_outer == m_end_outer; } inline Inner innerBegin () { assert (!outerEmpty ()); assert (!outerAtEnd ()); return innerBegFn (*m_outer); } inline Inner innerEnd () { assert (!outerEmpty ()); assert (!outerAtEnd ()); return innerEndFn (*m_outer); } inline bool innerAtBegin () const { return m_inner == const_cast<TwoLevelIterBase*> (this)->innerBegin (); } inline bool innerAtEnd () const { return m_inner == const_cast<TwoLevelIterBase*> (this)->innerEnd (); } TwoLevelIterBase (): m_beg_outer (), m_end_outer (), m_outer (), m_inner (), innerBegFn (), innerEndFn () {} TwoLevelIterBase ( Outer beg_outer, Outer end_outer, InnerBegFn innerBegFn, InnerEndFn innerEndFn) : m_beg_outer (beg_outer), m_end_outer (end_outer), m_outer (m_beg_outer), m_inner (), innerBegFn (innerBegFn), innerEndFn (innerEndFn) {} }; //! Two-Level forward iterator template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> class TwoLevelFwdIter: public std::iterator_traits<Inner>, public TwoLevelIterBase<Outer, Inner, InnerBegFn, InnerEndFn> { protected: typedef std::iterator_traits<Inner> Traits; typedef TwoLevelIterBase<Outer, Inner, InnerBegFn, InnerEndFn> Base; void nextOuter () { assert (!Base::outerAtEnd ()); assert (!Base::outerEmpty ()); ++Base::m_outer; if (!Base::outerAtEnd ()) { Base::m_inner = Base::innerBegin (); } } void seekValidBegin () { while (!Base::outerAtEnd () && Base::innerAtEnd ()) { nextOuter (); } } void step_forward () { assert (!Base::innerAtEnd ()); ++Base::m_inner; if (Base::innerAtEnd ()) { seekValidBegin (); } } bool is_equal (const TwoLevelFwdIter& that) const { // the outer iterators of 'this' and 'that' have been initialized // with either (beg,end), or, (end, end) // - for two level begin, outer is initialized to (beg,end) // - for two level end, outer is initialized to (end, end) assert (this->m_end_outer == that.m_end_outer); return (this->m_outer == that.m_outer) && (Base::outerAtEnd () || (this->m_inner == that.m_inner)); } public: TwoLevelFwdIter (): Base () {} TwoLevelFwdIter ( Outer beg_outer, Outer end_outer, InnerBegFn innerBegFn, InnerEndFn innerEndFn) : Base (beg_outer, end_outer, innerBegFn, innerEndFn) { if (!Base::outerAtEnd ()) { Base::m_inner = Base::innerBegin (); seekValidBegin (); } } typename Traits::reference operator * () const { return *Base::m_inner; } typename Traits::pointer operator -> () const { return Base::m_inner->operator -> (); } TwoLevelFwdIter& operator ++ () { step_forward (); return *this; } TwoLevelFwdIter operator ++ (int) { TwoLevelFwdIter tmp (*this); step_forward (); return tmp; } friend bool operator == (const TwoLevelFwdIter& left, const TwoLevelFwdIter& right) { return left.is_equal (right); } friend bool operator != (const TwoLevelFwdIter& left, const TwoLevelFwdIter& right) { return !left.is_equal (right); } }; //! Two-Level bidirectional iterator template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> class TwoLevelBiDirIter: public TwoLevelFwdIter<Outer, Inner, InnerBegFn, InnerEndFn> { protected: typedef TwoLevelFwdIter<Outer, Inner, InnerBegFn, InnerEndFn> FwdBase; protected: void prevOuter () { assert (!FwdBase::outerAtBegin ()); assert (!FwdBase::outerEmpty ()); TwoLevelIteratorImpl::safe_decrement (FwdBase::m_outer, FwdBase::m_beg_outer, FwdBase::m_end_outer); FwdBase::m_inner = FwdBase::innerEnd (); } void step_backward () { assert (!FwdBase::outerEmpty ()); assert (!FwdBase::outerAtBegin ()); // calling innerBegin when m_outer == m_end_outer is invalid // so call prevOuter first, and check for innerBegin afterwards if (FwdBase::outerAtEnd ()) { prevOuter (); } while (FwdBase::innerAtBegin ()) { assert (!FwdBase::outerAtBegin ()); prevOuter (); } assert (FwdBase::innerAtBegin () ? FwdBase::outerAtBegin () : true); --FwdBase::m_inner; } public: TwoLevelBiDirIter (): FwdBase () {} TwoLevelBiDirIter ( Outer beg_outer, Outer end_outer, InnerBegFn innerBegFn, InnerEndFn innerEndFn) : FwdBase (beg_outer, end_outer, innerBegFn, innerEndFn) {} TwoLevelBiDirIter& operator -- () { step_backward (); return *this; } TwoLevelBiDirIter operator -- (int) { TwoLevelBiDirIter tmp (*this); step_backward (); return tmp; } }; //! Two-Level random access iterator template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> class TwoLevelRandIter: public TwoLevelBiDirIter<Outer, Inner, InnerBegFn, InnerEndFn> { protected: typedef TwoLevelBiDirIter<Outer, Inner, InnerBegFn, InnerEndFn> BiDirBase; typedef typename BiDirBase::Traits::difference_type Diff_ty; void jump_forward (const Diff_ty d) { assert (!BiDirBase::outerEmpty ()); if (d < 0) { jump_backward (-d); } else { Diff_ty rem (d); while (rem > 0) { assert (!BiDirBase::outerAtEnd ()); Diff_ty avail = std::distance (BiDirBase::m_inner, BiDirBase::innerEnd ()); assert (avail >= 0); if (rem > avail) { rem -= avail; assert (!BiDirBase::outerAtEnd ()); BiDirBase::nextOuter (); } else { BiDirBase::m_inner += rem; rem = 0; } BiDirBase::seekValidBegin (); } } } void jump_backward (const Diff_ty d) { assert (!BiDirBase::outerEmpty ()); if (d < 0) { jump_forward (-d); } else { Diff_ty rem (d); if ((rem > 0) && BiDirBase::outerAtEnd ()) { BiDirBase::prevOuter (); } while (rem > 0) { Diff_ty avail = std::distance (BiDirBase::innerBegin (), BiDirBase::m_inner); assert (avail >= 0); if (rem > avail) { rem -= avail; assert (!BiDirBase::outerAtBegin ()); BiDirBase::prevOuter (); } else { BiDirBase::m_inner -= rem; rem = 0; break; } } } } Diff_ty compute_dist (const TwoLevelRandIter& that) const { if (std::distance (this->m_outer, that.m_outer) < 0) { // this->m_outer > that.m_outer return -(that.compute_dist (*this)); } else if (this->m_outer == that.m_outer) { if (!BiDirBase::outerAtEnd ()) { return std::distance (this->m_inner, that.m_inner); } else { return 0; } } else { assert (std::distance (this->m_outer, that.m_outer) > 0); // this->m_outer < that.m_outer; assert (!BiDirBase::outerAtEnd ()); TwoLevelRandIter tmp (*this); Diff_ty d = tmp.m_inner - tmp.m_inner; // 0 while (tmp.m_outer != that.m_outer) { d += std::distance (tmp.m_inner, tmp.innerEnd ()); tmp.nextOuter (); } assert (tmp.m_outer == that.m_outer); if (tmp.m_outer != tmp.m_end_outer) { d += std::distance (tmp.m_inner, that.m_inner); } assert (d >= 0); return d; } } public: TwoLevelRandIter (): BiDirBase () {} TwoLevelRandIter ( Outer beg_outer, Outer end_outer, InnerBegFn innerBegFn, InnerEndFn innerEndFn) : BiDirBase (beg_outer, end_outer, innerBegFn, innerEndFn) {} TwoLevelRandIter& operator += (Diff_ty d) { jump_forward (d); return *this; } TwoLevelRandIter& operator -= (Diff_ty d) { jump_backward (d); return *this; } friend TwoLevelRandIter operator + (const TwoLevelRandIter& it, Diff_ty d) { TwoLevelRandIter tmp (it); tmp += d; return tmp; } friend TwoLevelRandIter operator + (Diff_ty d, const TwoLevelRandIter& it) { return (it + d); } friend TwoLevelRandIter operator - (const TwoLevelRandIter& it, Diff_ty d) { TwoLevelRandIter tmp (it); tmp -= d; return tmp; } friend Diff_ty operator - (const TwoLevelRandIter& left, const TwoLevelRandIter& right) { return right.compute_dist (left); } typename BiDirBase::Traits::reference operator [] (Diff_ty d) const { return *((*this) + d); } friend bool operator < (const TwoLevelRandIter& left, const TwoLevelRandIter& right) { return ((left.m_outer == right.m_outer) ? (left.m_inner < right.m_inner) : (left.m_outer < right.m_outer)); } friend bool operator <= (const TwoLevelRandIter& left, const TwoLevelRandIter& right) { return (left < right) || (left == right); } friend bool operator > (const TwoLevelRandIter& left, const TwoLevelRandIter& right) { return !(left <= right); } friend bool operator >= (const TwoLevelRandIter& left, const TwoLevelRandIter& right) { return !(left < right); } }; namespace TwoLevelIteratorImpl { template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn, typename Cat> struct ByCategory {}; template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> struct ByCategory<Outer, Inner, InnerBegFn, InnerEndFn, std::forward_iterator_tag> { typedef TwoLevelFwdIter<Outer, Inner, InnerBegFn, InnerEndFn> type; }; template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> struct ByCategory<Outer, Inner, InnerBegFn, InnerEndFn, std::bidirectional_iterator_tag> { typedef TwoLevelBiDirIter<Outer, Inner, InnerBegFn, InnerEndFn> type; }; template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> struct ByCategory<Outer, Inner, InnerBegFn, InnerEndFn, std::random_access_iterator_tag> { typedef TwoLevelRandIter<Outer, Inner, InnerBegFn, InnerEndFn> type; }; // template <typename Outer, typename Inner> // struct IsRvrsIter { // // template <typename O, typename I> // struct IsRev { // static const bool VAL = false; // }; // // template <typename O> // struct IsRev<O, typename O::value_type::reverse_iterator> { // static const bool VAL = true; // }; // // template <typename O, typename I> // struct IsConstRev { // static const bool VAL = false; // }; // // template <typename O> // struct IsConstRev<O, typename O::value_type::const_reverse_iterator> { // static const bool VAL = true; // }; // // // static const bool VAL = // IsRev<Outer, Inner>::VAL || IsConstRev<Outer, Inner>::VAL; // }; } // end namespace impl //! Type function to select appropriate two-level iterator template <typename Outer, typename Inner, typename InnerBegFn, typename InnerEndFn> struct ChooseTwoLevelIterator { private: // typedef typename std::iterator_traits<Outer>::iterator_category CatOuter; typedef typename std::iterator_traits<Inner>::iterator_category CatInner; public: typedef typename TwoLevelIteratorImpl::ByCategory<Outer, Inner, InnerBegFn, InnerEndFn, CatInner>::type type; }; //! Creates two level iterator template <typename Outer, typename InnerBegFn, typename InnerEndFn> typename ChooseTwoLevelIterator<Outer, typename InnerBegFn::result_type, InnerBegFn, InnerEndFn>::type make_two_level_begin (Outer beg, Outer end, InnerBegFn innerBegFn, InnerEndFn innerEndFn) { const bool V = std::is_same<typename InnerBegFn::result_type, typename InnerEndFn::result_type>::value; assert (V); typedef typename InnerBegFn::result_type Inner; typedef typename ChooseTwoLevelIterator<Outer, Inner, InnerBegFn, InnerEndFn>::type Ret_ty; return Ret_ty (beg, end, innerBegFn, innerEndFn); } //! Creates two level iterator template <typename Outer, typename InnerBegFn, typename InnerEndFn> typename ChooseTwoLevelIterator<Outer, typename InnerBegFn::result_type, InnerBegFn, InnerEndFn>::type make_two_level_end (Outer beg, Outer end, InnerBegFn innerBegFn, InnerEndFn innerEndFn) { const bool V = std::is_same<typename InnerBegFn::result_type, typename InnerEndFn::result_type>::value; assert (V); typedef typename InnerBegFn::result_type Inner; typedef typename ChooseTwoLevelIterator<Outer, Inner, InnerBegFn, InnerEndFn>::type Ret_ty; return Ret_ty (end, end, innerBegFn, innerEndFn); } namespace TwoLevelIteratorImpl { template <typename C> struct GetBegin: public std::unary_function<C&, typename C::iterator> { inline typename C::iterator operator () (C& c) const { return c.begin (); } }; template <typename C> struct GetEnd: public std::unary_function<C&, typename C::iterator> { inline typename C::iterator operator () (C& c) const { return c.end (); } }; // TODO: update to c++11 names template <typename C> struct GetCbegin: public std::unary_function<const C&, typename C::const_iterator> { inline typename C::const_iterator operator () (const C& c) const { return c.begin (); } }; template <typename C> struct GetCend: public std::unary_function<const C&, typename C::const_iterator> { inline typename C::const_iterator operator () (const C& c) const { return c.end (); } }; template <typename C> struct GetRbegin: public std::unary_function<C&, typename C::reverse_iterator> { inline typename C::reverse_iterator operator () (C& c) const { return c.rbegin (); } }; template <typename C> struct GetRend: public std::unary_function<C&, typename C::reverse_iterator> { inline typename C::reverse_iterator operator () (C& c) const { return c.rend (); } }; // TODO: update to c++11 names template <typename C> struct GetCRbegin: public std::unary_function<const C&, typename C::const_reverse_iterator> { inline typename C::const_reverse_iterator operator () (const C& c) const { return c.rbegin (); } }; template <typename C> struct GetCRend: public std::unary_function<const C&, typename C::const_reverse_iterator> { inline typename C::const_reverse_iterator operator () (const C& c) const { return c.rend (); } }; enum StlIterKind { NORMAL, CONST, REVERSE, CONST_REVERSE }; template <typename C, typename I> struct IsConstIter { static const bool value = false; }; template <typename C> struct IsConstIter<C, typename C::const_iterator> { static const bool value = true; }; template <typename C, typename I> struct IsRvrsIter { static const bool value = false; }; template <typename C> struct IsRvrsIter<C, typename C::reverse_iterator> { static const bool value = true; }; template <typename C, typename I> struct IsRvrsConstIter { static const bool value = false; }; template <typename C> struct IsRvrsConstIter<C, typename C::const_reverse_iterator> { static const bool value = true; }; template <typename C, typename I> struct GetStlIterKind { static const bool isRvrs = IsRvrsIter<C, I>::value || IsRvrsConstIter<C, I>::value; static const bool isConst = IsConstIter<C, I>::value || IsRvrsConstIter<C, I>::value; static const StlIterKind value = isRvrs ? (isConst ? CONST_REVERSE: REVERSE) : (isConst ? CONST : NORMAL); }; template <typename C, typename I, enum StlIterKind> struct ChooseStlIter { typedef void Inner; }; template <typename C, typename I> struct ChooseStlIter<C, I, NORMAL> { typedef typename C::iterator Inner; typedef GetBegin<C> InnerBegFn; typedef GetEnd<C> InnerEndFn; }; template <typename C, typename I> struct ChooseStlIter<C, I, CONST> { typedef typename C::const_iterator Inner; typedef GetCbegin<C> InnerBegFn; typedef GetCend<C> InnerEndFn; }; template <typename C, typename I> struct ChooseStlIter<C, I, REVERSE> { typedef typename C::reverse_iterator Inner; typedef GetRbegin<C> InnerBegFn; typedef GetRend<C> InnerEndFn; }; template <typename C, typename I> struct ChooseStlIter<C, I, CONST_REVERSE> { typedef typename C::const_reverse_iterator Inner; typedef GetCRbegin<C> InnerBegFn; typedef GetCRend<C> InnerEndFn; }; template <typename Outer, typename Inner> struct ChooseStlTwoLevelIterImpl { typedef typename std::iterator_traits<Outer>::value_type C; static const TwoLevelIteratorImpl::StlIterKind KIND = TwoLevelIteratorImpl::GetStlIterKind<C, Inner>::value; typedef TwoLevelIteratorImpl::ChooseStlIter<C, Inner, KIND> CStl; typedef typename CStl::InnerBegFn InnerBegFn; typedef typename CStl::InnerEndFn InnerEndFn; typedef typename ChooseTwoLevelIterator<Outer, Inner, InnerBegFn, InnerEndFn>::type type; static type make (Outer beg, Outer end) { return type (beg, end, InnerBegFn (), InnerEndFn ()); } }; template <typename Outer> struct StlInnerIsIterator : public ChooseStlTwoLevelIterImpl<Outer, typename std::iterator_traits<Outer>::value_type::iterator> {}; template <typename Outer> struct StlInnerIsConstIterator : public ChooseStlTwoLevelIterImpl<Outer, typename std::iterator_traits<Outer>::value_type::const_iterator> {}; template <typename Outer> struct StlInnerIsRvrsIterator : public ChooseStlTwoLevelIterImpl<Outer, typename std::iterator_traits<Outer>::value_type::reverse_iterator> {}; template <typename Outer> struct StlInnerIsConstRvrsIterator : public ChooseStlTwoLevelIterImpl<Outer, typename std::iterator_traits<Outer>::value_type::const_reverse_iterator> {}; } // end namespace impl //! Type function to select appropriate two-level iterator template <typename Outer, typename Inner> struct ChooseStlTwoLevelIterator { typedef typename TwoLevelIteratorImpl::ChooseStlTwoLevelIterImpl<Outer, Inner>::type type; }; template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsIterator<Outer>::type stl_two_level_begin (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsIterator<Outer>::make (beg, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsIterator<Outer>::type stl_two_level_end (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsIterator<Outer>::make (end, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsConstIterator<Outer>::type stl_two_level_cbegin (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsConstIterator<Outer>::make (beg, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsConstIterator<Outer>::type stl_two_level_cend (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsConstIterator<Outer>::make (end, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsRvrsIterator<Outer>::type stl_two_level_rbegin (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsRvrsIterator<Outer>::make (beg, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsRvrsIterator<Outer>::type stl_two_level_rend (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsRvrsIterator<Outer>::make (end, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsConstRvrsIterator<Outer>::type stl_two_level_crbegin (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsConstRvrsIterator<Outer>::make (beg, end); } template <typename Outer> typename TwoLevelIteratorImpl::StlInnerIsConstRvrsIterator<Outer>::type stl_two_level_crend (Outer beg, Outer end) { return TwoLevelIteratorImpl::StlInnerIsConstRvrsIterator<Outer>::make (end, end); } } // end namespace Galois #endif // GALOIS_TWO_LEVEL_ITER_H
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Accumulator.h
/** Accumulator type -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2011, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_ACCUMULATOR_H #define GALOIS_ACCUMULATOR_H #include "Galois/Runtime/PerThreadStorage.h" #include <limits> namespace Galois { /** * GReducible stores per thread values of a variable of type T * * The final value is obtained by performing a reduction on per thread values * using the provided binary functor BinFunc. BinFunc updates values in place * and conforms to: * * void operator()(T& lhs, const T& rhs) * * Assumes that the initial value yields the identity element for binary functor. */ template<typename T, typename BinFunc> class GReducible { protected: BinFunc m_func; Galois::Runtime::PerThreadStorage<T> m_data; const T m_initial; explicit GReducible(const BinFunc& f, const T& initial): m_func(f), m_initial(initial) { } public: /** * @param f the binary functor acting as the reduction operator */ explicit GReducible(const BinFunc& f = BinFunc()): m_func(f), m_initial(T()) { } /** * Updates the thread local value by applying the reduction operator to * current and newly provided value */ void update(const T& rhs) { T& lhs = *m_data.getLocal(); m_func(lhs, rhs); } /** * Returns the final reduction value. Only valid outside the parallel region. */ T& reduce() { T& d0 = *m_data.getLocal(); for (unsigned int i = 1; i < m_data.size(); ++i) { T& d = *m_data.getRemote(i); m_func(d0, d); d = m_initial; } return d0; } /** * reset value */ void reset() { for (unsigned int i = 0; i < m_data.size(); ++i) { *m_data.getRemote(i) = m_initial; } } }; //! Operator form of max template<typename T> struct gmax { const T& operator()(const T& lhs, const T& rhs) const { return std::max<T>(lhs, rhs); } }; //! Operator form of min template<typename T> struct gmin { const T& operator()(const T& lhs, const T& rhs) const { return std::min<T>(lhs, rhs); } }; //! Turns binary functions over values into functions over references //! //! T operator()(const T& a, const T& b) => //! void operator()(T& a, const T& b) template<typename BinFunc> struct ReduceAssignWrap { BinFunc fn; ReduceAssignWrap(const BinFunc& f = BinFunc()): fn(f) { } template<typename T> void operator()(T& lhs, const T& rhs) const { lhs = fn(lhs, rhs); } }; //! Turns binary functions over item references into functions over vectors of items //! //! void operator()(T& a, const T& b) => //! void operator()(std::vector<T>& a, const std::vector<T>& b) template<typename BinFunc> struct ReduceVectorWrap { BinFunc fn; ReduceVectorWrap(const BinFunc& f = BinFunc()): fn(f) { } template<typename T> void operator()(T& lhs, const T& rhs) const { if (lhs.size() < rhs.size()) lhs.resize(rhs.size()); typename T::iterator ii = lhs.begin(); for (typename T::const_iterator jj = rhs.begin(), ej = rhs.end(); jj != ej; ++ii, ++jj) { fn(*ii, *jj); } } }; //! Turns binary functions over item (value) references into functions over maps of items //! //! void operator()(V& a, const V& b) => //! void operator()(std::map<K,V>& a, const std::map<K,V>& b) template<typename BinFunc> struct ReduceMapWrap { BinFunc fn; ReduceMapWrap(const BinFunc& f = BinFunc()): fn(f) { } template<typename T> void operator()(T& lhs, const T& rhs) const { for (typename T::const_iterator jj = rhs.begin(), ej = rhs.end(); jj != ej; ++jj) { fn(lhs[jj->first], jj->second); } } }; //! Turns functions over elements of a range into functions over collections //! //! void operator()(T a) => //! void operator()(Collection<T>& a, const Collection<T>& b) template<typename CollectionTy,template<class> class AdaptorTy> struct ReduceCollectionWrap { typedef typename CollectionTy::value_type value_type; void operator()(CollectionTy& lhs, const CollectionTy& rhs) { AdaptorTy<CollectionTy> adapt(lhs, lhs.begin()); std::copy(rhs.begin(), rhs.end(), adapt); } void operator()(CollectionTy& lhs, const value_type& rhs) { AdaptorTy<CollectionTy> adapt(lhs, lhs.begin()); *adapt = rhs; } }; /** * Simplification of GReducible where BinFunc calculates results by * value, i.e., BinFunc conforms to: * * T operator()(const T& a, const T& b); */ template<typename T, typename BinFunc> class GSimpleReducible: public GReducible<T, ReduceAssignWrap<BinFunc> > { typedef GReducible<T, ReduceAssignWrap<BinFunc> > base_type; public: explicit GSimpleReducible(const BinFunc& func = BinFunc()): base_type(func) { } }; //! Accumulator for T where accumulation is sum template<typename T> class GAccumulator: public GReducible<T, ReduceAssignWrap<std::plus<T> > > { typedef GReducible<T, ReduceAssignWrap<std::plus<T> > > base_type; public: GAccumulator& operator+=(const T& rhs) { base_type::update(rhs); return *this; } GAccumulator& operator-=(const T& rhs) { base_type::update(-rhs); return *this; } T unsafeRead() const { T d0 = *this->m_data.getRemote(0); for (unsigned int i = 1; i < this->m_data.size(); ++i) { const T& d = *this->m_data.getRemote(i); this->m_func(d0, d); } return d0; } }; //! General accumulator for collections following STL interface where //! accumulate means collection union. Since union/append/push_back are //! not standard among collections, the AdaptorTy template parameter //! allows users to provide an iterator adaptor along the lines of //! std::inserter or std::back_inserter. template<typename CollectionTy,template<class> class AdaptorTy> class GCollectionAccumulator: public GReducible<CollectionTy, ReduceCollectionWrap<CollectionTy, AdaptorTy> > { typedef ReduceCollectionWrap<CollectionTy, AdaptorTy> Func; typedef GReducible<CollectionTy, Func> base_type; typedef typename CollectionTy::value_type value_type; Func func; public: void update(const value_type& rhs) { CollectionTy& v = *this->m_data.getLocal(); func(v, rhs); } }; //! Accumulator for set where accumulation is union template<typename SetTy> class GSetAccumulator: public GCollectionAccumulator<SetTy, std::insert_iterator> { }; //! Accumulator for vector where accumulation is concatenation template<typename VectorTy> class GVectorAccumulator: public GCollectionAccumulator<VectorTy, std::back_insert_iterator> { }; //! Accumulator for vector where a vector is treated as a map and accumulate //! does element-wise addition among all entries template<typename VectorTy> class GVectorElementAccumulator: public GReducible<VectorTy, ReduceVectorWrap<ReduceAssignWrap<std::plus<typename VectorTy::value_type> > > > { typedef ReduceAssignWrap<std::plus<typename VectorTy::value_type> > ElementFunc; typedef GReducible<VectorTy, ReduceVectorWrap<ElementFunc> > base_type; typedef typename VectorTy::value_type value_type; ElementFunc func; public: void resize(size_t s) { for (int i = 0; i < this->m_data.size(); ++i) this->m_data.getRemote(i)->resize(s); } VectorTy& getLocal() { return *this->m_data.getLocal(); } void update(size_t index, const value_type& rhs) { VectorTy& v = *this->m_data.getLocal(); if (v.size() <= index) v.resize(index + 1); func(v[index], rhs); } }; //! Accumulator for map where accumulate does element-wise addition among //! all entries template<typename MapTy> class GMapElementAccumulator: public GReducible<MapTy, ReduceMapWrap<ReduceAssignWrap<std::plus<typename MapTy::mapped_type> > > > { typedef ReduceAssignWrap<std::plus<typename MapTy::mapped_type> > ElementFunc; typedef GReducible<MapTy, ReduceMapWrap<ElementFunc> > base_type; typedef typename MapTy::mapped_type mapped_type; typedef typename MapTy::key_type key_type; ElementFunc func; public: void update(const key_type& index, const mapped_type& rhs) { MapTy& v = *this->m_data.getLocal(); func(v[index], rhs); } }; //! Accumulator for T where accumulation is max template<typename T> class GReduceMax: public GReducible<T, ReduceAssignWrap<gmax<T> > > { typedef GReducible<T, ReduceAssignWrap<gmax<T> > > base_type; public: GReduceMax(): base_type(ReduceAssignWrap<gmax<T> >(), std::numeric_limits<T>::min()) { } }; //! Accumulator for T where accumulation is min template<typename T> class GReduceMin: public GReducible<T, ReduceAssignWrap<gmin<T> > > { typedef GReducible<T, ReduceAssignWrap<gmin<T> > > base_type; public: GReduceMin(): base_type(ReduceAssignWrap<gmin<T> >(), std::numeric_limits<T>::max()) { } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/DomainSpecificExecutors.h
#ifndef GALOIS_DOMAINSPECIFICEXECUTORS_H #define GALOIS_DOMAINSPECIFICEXECUTORS_H #include "LigraExecutor.h" #include "GraphChiExecutor.h" #include "GraphLabExecutor.h" #include "LigraGraphChiExecutor.h" #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/GraphChiExecutor.h
#ifndef GALOIS_GRAPHCHIEXECUTOR_H #define GALOIS_GRAPHCHIEXECUTOR_H #include "Galois/Graph/OCGraph.h" #include "Galois/Graph/GraphNodeBag.h" #include <boost/iterator/filter_iterator.hpp> #include <boost/utility.hpp> namespace Galois { //! Implementation of GraphChi DSL in Galois namespace GraphChi { namespace hidden { template<bool PassWrappedGraph> struct DispatchOperator { template<typename O,typename G,typename N> void run(O&& o, G&& g, N&& n) { std::forward<O>(o)(std::forward<G>(g), std::forward<N>(n)); } }; template<> struct DispatchOperator<false> { template<typename O,typename G,typename N> void run(O&& o, G&& g, N&& n) { std::forward<O>(o)(std::forward<N>(n)); } }; template<bool PassWrappedGraph,typename Graph,typename WrappedGraph,typename VertexOperator> class SparseVertexMap: public DispatchOperator<PassWrappedGraph> { typedef typename Graph::segment_type segment_type; typedef typename Graph::GraphNode GNode; Graph& graph; WrappedGraph& wrappedGraph; VertexOperator op; int& first; segment_type& prev; segment_type& cur; segment_type& next; bool updated; public: typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; SparseVertexMap(Graph& g, WrappedGraph& w, VertexOperator op, int& f, segment_type& p, segment_type& c, segment_type& n): graph(g), wrappedGraph(w), op(op), first(f), prev(p), cur(c), next(n), updated(false) { } void operator()(size_t n, Galois::UserContext<size_t>&) { (*this)(n); } void operator()(size_t n) { if (!updated) { if (first == 0 && __sync_bool_compare_and_swap(&first, 0, 1)) { if (prev.loaded()) { graph.unload(prev); } if (next) { graph.load(next); } } updated = true; } // Check if range if (!cur.containsNode(n)) { return; } this->run(op, wrappedGraph, graph.nodeFromId(n)); } }; template<bool CheckInput,bool PassWrappedGraph,typename Graph,typename WrappedGraph,typename VertexOperator,typename Bag> class DenseVertexMap: public DispatchOperator<PassWrappedGraph> { typedef typename Graph::segment_type segment_type; typedef typename Graph::GraphNode GNode; Graph& graph; WrappedGraph& wrappedGraph; VertexOperator op; Bag* bag; int& first; segment_type& prev; segment_type& cur; segment_type& next; bool updated; public: typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; DenseVertexMap(Graph& g, WrappedGraph& w, VertexOperator op, Bag* b, int& f, segment_type& p, segment_type& c, segment_type& n): graph(g), wrappedGraph(w), op(op), bag(b), first(f), prev(p), cur(c), next(n), updated(false) { } void operator()(GNode n, Galois::UserContext<GNode>&) { (*this)(n); } void operator()(GNode n) { if (!updated) { if (first == 0 && __sync_bool_compare_and_swap(&first, 0, 1)) { if (prev.loaded()) { graph.unload(prev); } if (next) { graph.load(next); } } updated = true; } if (CheckInput && !bag->contains(graph.idFromNode(n))) return; this->run(op, wrappedGraph, n); } }; template<typename Graph,typename Bag> struct contains_node { Graph* graph; Bag* bag; contains_node(Graph* g, Bag* b): graph(g), bag(b) { } bool operator()(typename Graph::GraphNode n) { return bag->contains(graph->idFromNode(n)); } }; template<typename EdgeTy> struct sizeof_edge { static const unsigned int value = sizeof(EdgeTy); }; template<> struct sizeof_edge<void> { static const unsigned int value = 0; }; struct logical_or { bool operator()(bool a, bool b) const { return a || b; } }; template<typename Graph,typename Seg,typename Bag> bool any_in_range(Graph& graph, const Seg& cur, Bag* input) { return std::find_if(graph.begin(cur), graph.end(cur), contains_node<Graph,Bag>(&graph, input)) != graph.end(cur); // TODO(ddn): Figure out the memory leak in ParallelSTL::find_if //return Galois::ParallelSTL::find_if(graph.begin(cur), graph.end(cur), contains_node<Graph>(&graph, input)) != graph.end(cur); //return Galois::ParallelSTL::map_reduce(graph.begin(cur), graph.end(cur), contains_node<Graph,Bag>(&graph, input), false, logical_or()); } template<typename Graph> size_t computeEdgeLimit(Graph& graph, size_t memoryLimit) { // Convert memoryLimit which is in MB into edges size_t bytes = memoryLimit; bytes *= 1024 * 1024; size_t sizeNodes = graph.size() * sizeof(uint64_t); if (bytes < sizeNodes) { GALOIS_DIE("Cannot limit graph in memory allotted"); } bytes -= sizeNodes; // double-buffering (2), in and out edges (2) size_t edgeBytes = 2 * 2 * (sizeof(uint64_t) + sizeof_edge<typename Graph::edge_data_type>::value); size_t edges = bytes / edgeBytes; return edges; } template<typename Graph> bool fitsInMemory(Graph& graph, size_t memoryLimit) { size_t bytes = memoryLimit; bytes *= 1024 * 1024; size_t nodeBytes = graph.size() * sizeof(uint64_t); size_t edgeBytes = graph.sizeEdges() * 2 * (sizeof(uint64_t) + sizeof_edge<typename Graph::edge_data_type>::value); return nodeBytes + edgeBytes < bytes; } template<bool CheckInput, bool PassWrappedGraph, typename Graph, typename WrappedGraph, typename VertexOperator, typename Bag> void vertexMap(Graph& graph, WrappedGraph& wgraph, VertexOperator op, Bag* input, size_t memoryLimit) { typedef typename Graph::segment_type segment_type; Galois::Statistic rounds("GraphChiRounds"); size_t edges = computeEdgeLimit(graph, memoryLimit); segment_type prev; segment_type cur = graph.nextSegment(edges); bool useDense; if (!CheckInput) { useDense = true; } else { // TODO improve this heuristic bool useSparse = (cur.size() > graph.size() / 2) && (input->getSize() < graph.size() / 4); useDense = !useSparse; } if (useDense && CheckInput) { input->densify(); } while (cur) { if (!CheckInput || !useDense || any_in_range(graph, cur, input)) { if (!cur.loaded()) { graph.load(cur); } segment_type next = graph.nextSegment(cur, edges); int first = 0; wgraph.setSegment(cur); if (useDense) { DenseVertexMap<CheckInput,PassWrappedGraph,Graph,WrappedGraph,VertexOperator,Bag> vop(graph, wgraph, op, input, first, prev, cur, next); Galois::for_each(graph.begin(cur), graph.end(cur), vop); } else { SparseVertexMap<PassWrappedGraph,Graph,WrappedGraph,VertexOperator> vop(graph, wgraph, op, first, prev, cur, next); Galois::for_each_local(*input, vop); } // XXX Shouldn't be necessary if (prev.loaded()) { abort(); graph.unload(prev); } rounds += 1; prev = cur; cur = next; } else { segment_type next = graph.nextSegment(cur, edges); if (prev.loaded()) graph.unload(prev); if (cur.loaded()) graph.unload(cur); cur = next; } } if (prev.loaded()) graph.unload(prev); } } // end namespace template<typename Graph, typename VertexOperator> void vertexMap(Graph& graph, VertexOperator op, size_t size) { Galois::Graph::BindSegmentGraph<Graph> wgraph(graph); hidden::vertexMap<false,true>(graph, wgraph, op, static_cast<GraphNodeBag<>*>(0), size); } template<typename Graph, typename VertexOperator, typename Bag> void vertexMap(Graph& graph, VertexOperator op, Bag& input, size_t size) { Galois::Graph::BindSegmentGraph<Graph> wgraph(graph); hidden::vertexMap<true,true>(graph, wgraph, op, &input, size); } } // end namespace } // end namespace #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Statistic.h
/** Statistic type -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2011, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_STATISTIC_H #define GALOIS_STATISTIC_H #include "Galois/config.h" #include "Galois/Runtime/Support.h" #include "Galois/Runtime/PerThreadStorage.h" #include "Galois/Runtime/Sampling.h" #include "Galois/Timer.h" #include "boost/utility.hpp" #include GALOIS_CXX11_STD_HEADER(deque) namespace Galois { /** * Basic per-thread statistics counter. */ class Statistic { std::string statname; std::string loopname; Galois::Runtime::PerThreadStorage<unsigned long> val; bool valid; public: Statistic(const std::string& _sn, std::string _ln = "(NULL)"): statname(_sn), loopname(_ln), valid(true) { } ~Statistic() { report(); } //! Adds stat to stat pool, usually deconsructor or StatManager calls this for you. void report() { if (valid) Galois::Runtime::reportStat(this); valid = false; } unsigned long getValue(unsigned tid) { return *val.getRemote(tid); } std::string& getLoopname() { return loopname; } std::string& getStatname() { return statname; } Statistic& operator+=(unsigned long v) { *val.getLocal() += v; return *this; } }; /** * Controls lifetime of stats. Users usually instantiate in main to print out * statistics at program exit. */ class StatManager: private boost::noncopyable { std::deque<Statistic*> stats; public: ~StatManager() { for (std::deque<Statistic*>::iterator ii = stats.begin(), ei = stats.end(); ii != ei; ++ii) { (*ii)->report(); } Galois::Runtime::printStats(); } //! Statistics that are not lexically scoped must be added explicitly void push(Statistic& s) { stats.push_back(&s); } }; //! Flag type for {@link StatTimer} struct start_now_t {}; #if defined(__IBMCPP__) && __IBMCPP__ <= 1210 static const start_now_t start_now = start_now_t(); #else constexpr start_now_t start_now = start_now_t(); #endif //! Provides statistic interface around timer class StatTimer : public TimeAccumulator { const char* name; const char* loopname; bool main; bool valid; protected: void init(const char* n, const char* l, bool m, bool s) { name = n; loopname = l; main = m; valid = false; if (s) start(); } public: StatTimer(const char* n) { init(n, 0, false, false); } StatTimer(const char* n, start_now_t t) { init(n, 0, false, true); } StatTimer(const char* n, const char* l) { init(n, l, false, false); } StatTimer(const char* n, const char* l, start_now_t t) { init(n, l, false, true); } StatTimer() { init("Time", 0, true, false); } StatTimer(start_now_t t) { init("Time", 0, true, true); } ~StatTimer() { if (valid) stop(); if (TimeAccumulator::get()) // only report non-zero stat Galois::Runtime::reportStat(loopname, name, get()); } void start() { if (main) Galois::Runtime::beginSampling(); TimeAccumulator::start(); valid = true; } void stop() { valid = false; TimeAccumulator::stop(); if (main) Galois::Runtime::endSampling(); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Galois.h
/** Galois user interface -*- C++ -*- * @file * This is the only file to include for basic Galois functionality. * * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #ifndef GALOIS_GALOIS_H #define GALOIS_GALOIS_H #include "Galois/config.h" #include "Galois/WorkList/WorkList.h" #include "Galois/UserContext.h" #include "Galois/Threads.h" #include "Galois/Runtime/ParallelWork.h" #include "Galois/Runtime/DoAll.h" #include "Galois/Runtime/DeterministicWork.h" #include "Galois/Runtime/OrderedWork.h" #ifdef GALOIS_USE_EXP #include "Galois/Runtime/ParallelWorkInline.h" #include "Galois/Runtime/ParaMeter.h" #endif #include GALOIS_CXX11_STD_HEADER(utility) #include GALOIS_CXX11_STD_HEADER(type_traits) #include GALOIS_CXX11_STD_HEADER(tuple) /** * Main Galois namespace. All the core Galois functionality will be found in here. */ namespace Galois { /** * Specify name to appear in statistics. Optional argument to {@link do_all()} * and {@link for_each()} loops. */ struct loopname { const char* n; loopname(const char* n = 0) :n(n) {} }; /** * Specify whether @{link do_all()} loops should perform work-stealing. Optional * argument to {@link do_all()} loops. */ struct do_all_steal { bool b; do_all_steal(bool b = false) :b(b) {} }; struct wl_tag {}; /** * Specify worklist to use. Optional argument to {@link for_each()} loops. */ template<typename WLTy> struct wl : public wl_tag { typedef WLTy WL; }; namespace HIDDEN { static constexpr unsigned GALOIS_DEFAULT_CHUNK_SIZE = 32; typedef WorkList::dChunkedFIFO<GALOIS_DEFAULT_CHUNK_SIZE> defaultWL; template <typename T, typename S, int i = std::tuple_size<T>::value - 1> struct tuple_index { enum { value = std::is_base_of<S, typename std::tuple_element<i, T>::type>::value || std::is_same<S, typename std::tuple_element<i, T>::type>::value ? i : tuple_index<T, S, i-1>::value }; }; template <typename T, typename S> struct tuple_index<T, S, -1> { enum { value = -1 }; }; template<typename RangeTy, typename FunctionTy, typename Tuple> void for_each_gen(RangeTy r, FunctionTy fn, Tuple tpl) { typedef Tuple tupleType; static_assert(-1 == tuple_index<tupleType, char*>::value, "old loopname"); static_assert(-1 == tuple_index<tupleType, char const*>::value, "old loopname"); static_assert(-1 == tuple_index<tupleType, bool>::value, "old steal"); // std::cout << tuple_index<tupleType, char*>::value << " " // << tuple_index<tupleType, char const*>::value << "\n"; constexpr unsigned iloopname = tuple_index<tupleType, loopname>::value; constexpr unsigned iwl = tuple_index<tupleType, wl_tag>::value; const char* ln = std::get<iloopname>(tpl).n; typedef typename std::tuple_element<iwl,tupleType>::type::WL WLTy; Runtime::for_each_impl<WLTy>(r, fn, ln); } template<typename RangeTy, typename FunctionTy, typename Tuple> FunctionTy do_all_gen(RangeTy r, FunctionTy fn, Tuple tpl) { typedef Tuple tupleType; static_assert(-1 == tuple_index<tupleType, char*>::value, "old loopname"); static_assert(-1 == tuple_index<tupleType, char const*>::value, "old loopname"); static_assert(-1 == tuple_index<tupleType, bool>::value, "old steal"); // std::cout << tuple_index<tupleType, char*>::value << " " // << tuple_index<tupleType, char const*>::value << "\n"; constexpr unsigned iloopname = tuple_index<tupleType, loopname>::value; constexpr unsigned isteal = tuple_index<tupleType, do_all_steal>::value; const char* ln = std::get<iloopname>(tpl).n; bool steal = std::get<isteal>(tpl).b; return Runtime::do_all_impl(r, fn, ln, steal); } } // namespace HIDDEN //////////////////////////////////////////////////////////////////////////////// // Foreach //////////////////////////////////////////////////////////////////////////////// /** * Galois unordered set iterator. * Operator should conform to <code>fn(item, UserContext<T>&)</code> where item is a value from the iteration * range and T is the type of item. * * @tparam WLTy Worklist policy {@see Galois::WorkList} * @param b begining of range of initial items * @param e end of range of initial items * @param fn operator * @param args optional arguments to loop, e.g., {@see loopname}, {@see wl} */ template<typename IterTy, typename FunctionTy, typename... Args> void for_each(IterTy b, IterTy e, FunctionTy fn, Args... args) { HIDDEN::for_each_gen(Runtime::makeStandardRange(b,e), fn, std::make_tuple(loopname(), wl<HIDDEN::defaultWL>(), args...)); } /** * Galois unordered set iterator. * Operator should conform to <code>fn(item, UserContext<T>&)</code> where item is i and T * is the type of item. * * @tparam WLTy Worklist policy {@link Galois::WorkList} * @param i initial item * @param fn operator * @param args optional arguments to loop */ template<typename ItemTy, typename FunctionTy, typename... Args> void for_each(ItemTy i, FunctionTy fn, Args... args) { ItemTy iwl[1] = {i}; HIDDEN::for_each_gen(Runtime::makeStandardRange(&iwl[0], &iwl[1]), fn, std::make_tuple(loopname(), wl<HIDDEN::defaultWL>(), args...)); } /** * Galois unordered set iterator with locality-aware container. * Operator should conform to <code>fn(item, UserContext<T>&)</code> where item is an element of c and T * is the type of item. * * @tparam WLTy Worklist policy {@link Galois::WorkList} * @param c locality-aware container * @param fn operator * @param args optional arguments to loop */ template<typename ConTy, typename FunctionTy, typename... Args> void for_each_local(ConTy& c, FunctionTy fn, Args... args) { HIDDEN::for_each_gen(Runtime::makeLocalRange(c), fn, std::make_tuple(loopname(), wl<HIDDEN::defaultWL>(), args...)); } /** * Standard do-all loop. All iterations should be independent. * Operator should conform to <code>fn(item)</code> where item is a value from the iteration range. * * @param b beginning of range of items * @param e end of range of items * @param fn operator * @param args optional arguments to loop * @returns fn */ template<typename IterTy,typename FunctionTy, typename... Args> FunctionTy do_all(const IterTy& b, const IterTy& e, FunctionTy fn, Args... args) { return HIDDEN::do_all_gen(Runtime::makeStandardRange(b, e), fn, std::make_tuple(loopname(), do_all_steal(), args...)); } /** * Standard do-all loop with locality-aware container. All iterations should be independent. * Operator should conform to <code>fn(item)</code> where item is an element of c. * * @param c locality-aware container * @param fn operator * @param args optional arguments to loop * @returns fn */ template<typename ConTy,typename FunctionTy, typename... Args> FunctionTy do_all_local(ConTy& c, FunctionTy fn, Args... args) { return HIDDEN::do_all_gen(Runtime::makeLocalRange(c), fn, std::make_tuple(loopname(), do_all_steal(), args...)); } /** * Low-level parallel loop. Operator is applied for each running thread. Operator * should confirm to <code>fn(tid, numThreads)</code> where tid is the id of the current thread and * numThreads is the total number of running threads. * * @param fn operator * @param loopname string to identify loop in statistics output */ template<typename FunctionTy> static inline void on_each(FunctionTy fn, const char* loopname = 0) { Runtime::on_each_impl(fn, loopname); } /** * Preallocates pages on each thread. * * @param num number of pages to allocate of size {@link Galois::Runtime::MM::pageSize} */ static inline void preAlloc(int num) { Runtime::preAlloc_impl(num); } /** * Reports number of pages allocated by the Galois system so far. The value is printing using * the statistics infrastructure. * * @param label Label to associated with report at this program point */ static inline void reportPageAlloc(const char* label) { Runtime::reportPageAlloc(label); } /** * Galois ordered set iterator for stable source algorithms. * * Operator should conform to <code>fn(item, UserContext<T>&)</code> where item is a value from the iteration * range and T is the type of item. Comparison function should conform to <code>bool r = cmp(item1, item2)</code> * where r is true if item1 is less than or equal to item2. Neighborhood function should conform to * <code>nhFunc(item)</code> and should visit every element in the neighborhood of active element item. * * @param b begining of range of initial items * @param e end of range of initial items * @param cmp comparison function * @param nhFunc neighborhood function * @param fn operator * @param loopname string to identity loop in statistics output */ template<typename Iter, typename Cmp, typename NhFunc, typename OpFunc> void for_each_ordered(Iter b, Iter e, const Cmp& cmp, const NhFunc& nhFunc, const OpFunc& fn, const char* loopname=0) { Runtime::for_each_ordered_impl(b, e, cmp, nhFunc, fn, loopname); } /** * Galois ordered set iterator for unstable source algorithms. * * Operator should conform to <code>fn(item, UserContext<T>&)</code> where item is a value from the iteration * range and T is the type of item. Comparison function should conform to <code>bool r = cmp(item1, item2)</code> * where r is true if item1 is less than or equal to item2. Neighborhood function should conform to * <code>nhFunc(item)</code> and should visit every element in the neighborhood of active element item. * The stability test should conform to <code>bool r = stabilityTest(item)</code> where r is true if * item is a stable source. * * @param b begining of range of initial items * @param e end of range of initial items * @param cmp comparison function * @param nhFunc neighborhood function * @param fn operator * @param stabilityTest stability test * @param loopname string to identity loop in statistics output */ template<typename Iter, typename Cmp, typename NhFunc, typename OpFunc, typename StableTest> void for_each_ordered(Iter b, Iter e, const Cmp& cmp, const NhFunc& nhFunc, const OpFunc& fn, const StableTest& stabilityTest, const char* loopname=0) { Runtime::for_each_ordered_impl(b, e, cmp, nhFunc, fn, stabilityTest, loopname); } } //namespace Galois #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/SparseBitVector.h
// Galois Managed Conflict type wrapper -*- C++ -*- /* Galois, a framework to exploit amorphous data-parallelism in irregular programs. Copyright (C) 2011, The University of Texas at Austin. All rights reserved. UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances shall University be liable for incidental, special, indirect, direct or consequential damages or loss of profits, interruption of business, or related expenses which may arise from use of Software or Documentation, including but not limited to those resulting from defects in Software and/or Documentation, or loss or inaccuracy of data of any kind. @author rupesh nasre. <[email protected]> */ #ifndef GALOIS_SPARSEBITVECTOR_H #define GALOIS_SPARSEBITVECTOR_H #include "Galois/Runtime/ll/SimpleLock.h" #include <vector> #include <string> #include <ostream> namespace Galois { /** * Concurrent version of sparse bit vector. * * Stores objects as indices in sparse bit vectors. * Saves space when the data to be stored is sparsely populated. */ struct SparseBitVector { typedef unsigned long WORD; typedef Galois::Runtime::LL::SimpleLock<true> LockType; static const unsigned wordsize = sizeof(WORD)*8; struct OneWord { WORD bits; unsigned base; struct OneWord *next; LockType kulup; bool set(unsigned oo) { WORD oribits = bits; bits |= ((WORD)1 << oo); return bits != oribits; } OneWord(unsigned bb, unsigned oo) { base = bb; set(oo); next = 0; } OneWord() { } unsigned unify(OneWord *second) { if (second) { WORD oribits = count(); bits |= second->bits; return count() - oribits; } return 0; } unsigned count() { unsigned numElements = 0; WORD powerof2 = 1; for (unsigned ii = 0; ii < wordsize; ++ii) { if (bits & powerof2) { ++numElements; } powerof2 <<= 1; } return numElements; } inline bool isSubsetEq(OneWord *second) { return (bits & second->bits) == bits; } OneWord *clone() { OneWord *newword = new OneWord(); newword->base = base; newword->bits = bits; newword->next = 0; return newword; } OneWord *cloneAll() { OneWord *newlist = clone(); OneWord *ptr2; for (OneWord *newlistptr = newlist, *ptr = next; ptr;) { //ptr->lock(); newlistptr->next = ptr->clone(); ptr2 = ptr->next; //ptr->unlock(); ptr = ptr2; newlistptr = newlistptr->next; } return newlist; } void getAllSetBits(std::vector<unsigned> &setbits) { WORD powerof2 = 1; unsigned bitno = 0; for (unsigned ii = 0; ii < wordsize; ++ii) { if (bits & powerof2) { setbits.push_back(base*wordsize + bitno); } powerof2 <<= 1; ++bitno; } } void lock() { kulup.lock(); } void unlock() { kulup.unlock(); } }; OneWord *head; LockType headkulup; SparseBitVector() { init(0); } void init() { init(0); } void init(unsigned nelements) { head = 0; } void lock() { headkulup.lock(); } void unlock() { headkulup.unlock(); } bool set(unsigned bit) { unsigned base, offset; getOffsets(bit, base, offset); OneWord *ptr, *prev; ptr = head; prev = 0; for (; ptr && ptr->base <= base; ptr = ptr->next) { // sorted order. if (ptr->base == base) { return ptr->set(offset); } prev = ptr; } OneWord *newword = new OneWord(base, offset); if (prev) { //prev->lock(); newword->next = prev->next; prev->next = newword; //prev->unlock(); } else { //lock(); newword->next = head; head = newword; //unlock(); } return true; } unsigned unify(SparseBitVector &second) { unsigned nchanged = 0; OneWord *prev = 0, *ptrone, *ptrtwo; for (ptrone = head, ptrtwo = second.head; ptrone && ptrtwo;) { if (ptrone->base == ptrtwo->base) { //ptrone->lock(); nchanged += ptrone->unify(ptrtwo); prev = ptrone; ptrone = ptrone->next; ptrtwo = ptrtwo->next; //prev->unlock(); } else if (ptrone->base < ptrtwo->base) { prev = ptrone; //prev->lock(); ptrone = ptrone->next; //prev->unlock(); } else { OneWord *newword = ptrtwo->clone(); newword->next = ptrone; if (prev) { //prev->lock(); prev->next = newword; //prev->unlock(); prev = newword; } else { //lock(); head = prev = newword; //unlock(); } ptrtwo = ptrtwo->next; } } if (ptrtwo) { OneWord *remaining = ptrtwo->cloneAll(); if (prev) { //prev->lock(); prev->next = remaining; //prev->unlock(); } else if (ptrtwo) { //lock(); head = remaining; //unlock(); } } return nchanged; } bool isSubsetEq(SparseBitVector &second) { OneWord *ptrone, *ptrtwo; for (ptrone = head, ptrtwo = second.head; ptrone && ptrtwo; ptrone = ptrone->next) { if (ptrone->base == ptrtwo->base) { if (!ptrone->isSubsetEq(ptrtwo)) { return false; } ptrtwo = ptrtwo->next; } else if (ptrone->base > ptrtwo->base) { return false; } } if (ptrone) { return false; } return true; } inline void getOffsets(unsigned bit, unsigned &ventry, unsigned &wbit) { ventry = bit / wordsize; wbit = bit % wordsize; } unsigned count() { unsigned nbits = 0; for (OneWord *ptr = head; ptr; ptr = ptr->next) { nbits += ptr->count(); } return nbits; } unsigned getAllSetBits(std::vector<unsigned> &setbits) { unsigned nnodes = 0; for (OneWord *ptr = head; ptr; ptr = ptr->next) { ptr->getAllSetBits(setbits); ++nnodes; } return nnodes; } void print(std::ostream& out, std::string prefix = std::string("")) { std::vector<unsigned> setbits; unsigned nnodes = getAllSetBits(setbits); out << "Elements(" << nnodes << "): "; for (std::vector<unsigned>::iterator ii = setbits.begin(); ii != setbits.end(); ++ii) { out << prefix << *ii << ", "; } out << "\n"; } }; } #endif // _GALOIS_SPARSEBITVECTOR_H
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/optional.h
/** Replacement for boost::optional -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * Replacement for <code>boost::optional</code> using {@link Galois::LazyObject}, * which conforms to a more strict aliasing policy. * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_OPTIONAL_H #define GALOIS_OPTIONAL_H #include "Galois/LazyObject.h" #include <cassert> namespace Galois { /** * Galois version of <code>boost::optional</code>. */ template<typename T> class optional { LazyObject<T> data_; bool initialized_; void construct(const T& val) { data_.construct(val); initialized_ = true; } void assign_impl(const T& val) { get_impl() = val; } void destroy() { if (initialized_) { data_.destroy(); initialized_ = false; } } T& get_impl() { return data_.get(); } const T& get_impl() const { return data_.get(); } public: typedef bool (optional::*unspecified_bool_type)() const; optional(): initialized_(false) { } optional(const T& val): initialized_(false) { construct(val); } optional(const optional& rhs): initialized_(false) { if (rhs.is_initialized()) construct(rhs.get_impl()); } template<typename U> explicit optional(const optional<U>& rhs): initialized_(false) { assign(rhs); } ~optional() { destroy(); } void assign(const optional& rhs) { if (is_initialized()) { if (rhs.is_initialized()) assign_impl(rhs.get_impl()); else destroy(); } else { if (rhs.is_initialized()) construct(rhs.get_impl()); } } template<typename U> void assign(const optional<U>& rhs) { if (is_initialized()) { if (rhs.is_initialized()) assign_impl(rhs.get_impl()); else destroy(); } else { if (rhs.is_initialized()) construct(rhs.get_impl()); } } void assign(const T& val) { if (is_initialized()) assign_impl(val); else construct(val); } bool is_initialized() const { return initialized_; } optional& operator=(const optional& rhs) { assign(rhs); return *this; } template<typename U> optional& operator=(const optional<U>& rhs) { assign(rhs); return *this; } optional& operator=(const T& val) { assign(val); return *this; } T& get() { assert(initialized_); return get_impl(); } const T& get() const { assert(initialized_); return get_impl(); } T& operator*() { return get(); } const T& operator*() const { return get(); } T* operator->() { assert(initialized_); return &get_impl(); } const T* operator->() const { assert(initialized_); return &get_impl(); } operator unspecified_bool_type() const { return initialized_ ? &optional::is_initialized : 0; } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/LazyObject.h
/** Lazy and strict object types -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_LAZYOBJECT_H #define GALOIS_LAZYOBJECT_H #include "Galois/config.h" #include "Galois/Runtime/ll/gio.h" #include "Galois/TypeTraits.h" // For consistent name, use boost rather than C++11 std::is_trivially_constuctible #include <boost/type_traits/has_trivial_constructor.hpp> #include GALOIS_CXX11_STD_HEADER(type_traits) #include GALOIS_CXX11_STD_HEADER(utility) namespace Galois { /** * Single object with specialization for void type. To take advantage of empty * member optimization, users should subclass this class, otherwise the * compiler will insert non-zero padding for fields (even when empty). */ template<typename T> class StrictObject { T data; public: typedef T value_type; typedef T& reference; typedef const T& const_reference; const static bool has_value = true; StrictObject() { } StrictObject(const_reference t): data(t) { } const_reference get() const { return data; } reference get() { return data; } }; template<> struct StrictObject<void> { typedef void* value_type; typedef void* reference; typedef void* const_reference; const static bool has_value = false; StrictObject() { } StrictObject(const_reference) { } reference get() const { return 0; } }; #if defined(__IBMCPP__) && __IBMCPP__ <= 1210 namespace LazyObjectDetail { template<typename T, typename CharData, bool> struct SafeDataBase { union type { CharData buf; T value_; T& value() { return value_; } const T& value() const { return value_; } }; }; template<typename T, typename CharData> struct SafeDataBase<T, CharData, false> { union type { CharData buf; T& value() { return *reinterpret_cast<T*>(&buf); } const T& value() const { return *reinterpret_cast<const T*>(&buf); } type() { // XXX: Keep this as a runtime exception rather than a compile-time one //GALOIS_DIE("Unsafe construct for type '", __PRETTY_FUNCTION__, "' when expecting strict aliasing"); } }; }; /** * Works around compilers that do not support non-trivially constructible * members in unions. */ template<typename T, typename CharData> struct SafeData: public SafeDataBase<T, CharData, boost::has_trivial_constructor<T>::value || Galois::has_known_trivial_constructor<T>::value > { }; } // end detail #endif /** * Single (uninitialized) object with specialization for void type. To take * advantage of empty member optimization, users should subclass this class, * otherwise the compiler will insert non-zero padding for fields (even when * empty). */ template<typename T> class LazyObject { typedef typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type CharData; #if defined(__IBMCPP__) && __IBMCPP__ <= 1210 typedef typename LazyObjectDetail::SafeData<T, CharData>::type Data; #else union Data { CharData buf; T value_; Data() { } ~Data() { } T& value() { return value_; } const T& value() const { return value_; } }; #endif Data data_; T* cast() { return &data_.value(); } const T* cast() const { return &data_.value(); } public: typedef T value_type; typedef T& reference; typedef const T& const_reference; const static bool has_value = true; // Can't support incomplete T's but provide same interface as // {@link Galois::LargeArray} for consistency struct size_of { const static size_t value = sizeof(T); }; void destroy() { cast()->~T(); } void construct(const_reference x) { new (cast()) T(x); } template<typename... Args> void construct(Args&&... args) { new (cast()) T(std::forward<Args>(args)...); } const_reference get() const { return *cast(); } reference get() { return *cast(); } }; template<> struct LazyObject<void> { typedef void* value_type; typedef void* reference; typedef void* const_reference; const static bool has_value = false; struct size_of { const static size_t value = 0; }; void destroy() { } void construct(const_reference x) { } template<typename... Args> void construct(Args&&... args) { } const_reference get() const { return 0; } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/CheckedObject.h
/** Galois Managed Conflict type wrapper -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2011, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_CHECKEDOBJECT_H #define GALOIS_CHECKEDOBJECT_H #include "Galois/Runtime/Context.h" namespace Galois { /** * Conflict-checking wrapper for any type. Performs global conflict detection * on the enclosed object. This enables arbitrary types to be managed by the * Galois runtime. */ template<typename T> class GChecked : public Galois::Runtime::Lockable { T val; public: template<typename... Args> GChecked(Args&&... args): val(std::forward<Args>(args)...) { } T& get(Galois::MethodFlag m = MethodFlag::ALL) { Galois::Runtime::acquire(this, m); return val; } const T& get(Galois::MethodFlag m = MethodFlag::ALL) const { Galois::Runtime::acquire(const_cast<GChecked*>(this), m); return val; } }; template<> class GChecked<void>: public Galois::Runtime::Lockable { public: void get(Galois::MethodFlag m = MethodFlag::ALL) const { Galois::Runtime::acquire(const_cast<GChecked*>(this), m); } }; } #endif // _GALOIS_CHECKEDOBJECT_H
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Threads.h
/** Galois user interface -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #ifndef GALOIS_THREADS_H #define GALOIS_THREADS_H namespace Galois { /** * Sets the number of threads to use when running any Galois iterator. Returns * the actual value of threads used, which could be less than the requested * value. System behavior is undefined if this function is called during * parallel execution or after the first parallel execution. */ unsigned int setActiveThreads(unsigned int num); /** * Returns the number of threads in use. */ unsigned int getActiveThreads(); } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/LigraGraphChiExecutor.h
#ifndef GALOIS_LIGRAGRAPHCHIEXECUTOR_H #define GALOIS_LIGRAGRAPHCHIEXECUTOR_H #include "LigraExecutor.h" #include "GraphChiExecutor.h" namespace Galois { //! Implementation of combination of Ligra and GraphChi DSL in Galois namespace LigraGraphChi { template<bool Forward,typename Graph,typename EdgeOperator,typename Bag> void edgeMap(size_t size, Graph& graph, EdgeOperator op, Bag& output) { typedef Galois::Graph::BindSegmentGraph<Graph> WrappedGraph; WrappedGraph wgraph(graph); output.densify(); Galois::GraphChi::hidden::vertexMap<false,false>(graph, wgraph, Galois::Ligra::hidden::DenseForwardOperator<WrappedGraph,Bag,EdgeOperator,Forward,true>(wgraph, output, output, op), static_cast<Bag*>(0), size); } template<bool Forward,typename Graph, typename EdgeOperator,typename Bag> void edgeMap(size_t size, Graph& graph, EdgeOperator op, Bag& input, Bag& output, bool denseForward) { typedef Galois::Graph::BindSegmentGraph<Graph> WrappedGraph; WrappedGraph wgraph(graph); size_t count = input.getCount(); if (!denseForward && count > graph.sizeEdges() / 20) { input.densify(); if (denseForward) { abort(); // Never used now output.densify(); Galois::GraphChi::hidden::vertexMap<false,false>(graph, wgraph, Galois::Ligra::hidden::DenseForwardOperator<WrappedGraph,Bag,EdgeOperator,Forward,false>(wgraph, input, output, op), static_cast<Bag*>(0), size); } else { Galois::GraphChi::hidden::vertexMap<false,false>(graph, wgraph, Galois::Ligra::hidden::DenseOperator<WrappedGraph,Bag,EdgeOperator,Forward>(wgraph, input, output, op), static_cast<Bag*>(0), size); } } else { Galois::GraphChi::hidden::vertexMap<true,false>(graph, wgraph, Galois::Ligra::hidden::SparseOperator<WrappedGraph,Bag,EdgeOperator,Forward>(wgraph, output, op), &input, size); } } template<bool Forward,typename Graph, typename EdgeOperator,typename Bag> void edgeMap(size_t size, Graph& graph, EdgeOperator op, typename Graph::GraphNode single, Bag& output) { Bag input(graph.size()); input.push(graph.idFromNode(single), 1); edgeMap<Forward>(size, graph, op, input, output, false); } template<typename... Args> void outEdgeMap(Args&&... args) { edgeMap<true>(std::forward<Args>(args)...); } template<typename... Args> void inEdgeMap(Args&&... args) { edgeMap<false>(std::forward<Args>(args)...); } template<bool UseGraphChi> struct ChooseExecutor { template<typename... Args> void inEdgeMap(size_t size, Args&&... args) { edgeMap<false>(size, std::forward<Args>(args)...); } template<typename... Args> void outEdgeMap(size_t size, Args&&... args) { edgeMap<true>(size, std::forward<Args>(args)...); } template<typename Graph> void checkIfInMemoryGraph(Graph& g, size_t size) { if (Galois::GraphChi::hidden::fitsInMemory(g, size)) { g.keepInMemory(); } } }; template<> struct ChooseExecutor<false> { template<typename... Args> void inEdgeMap(size_t size, Args&&... args) { Galois::Ligra::edgeMap<false>(std::forward<Args>(args)...); } template<typename... Args> void outEdgeMap(size_t size, Args&&... args) { Galois::Ligra::edgeMap<true>(std::forward<Args>(args)...); } template<typename Graph> void checkIfInMemoryGraph(Graph& g, size_t size) { } }; } // end namespace } // end namespace #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/FlatMap.h
/** STL style map using sorted vectors -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_FLATMAP_H #define GALOIS_FLATMAP_H #include "Galois/config.h" #include GALOIS_CXX11_STD_HEADER(functional) #include GALOIS_CXX11_STD_HEADER(algorithm) #include GALOIS_CXX11_STD_HEADER(utility) #include GALOIS_CXX11_STD_HEADER(vector) #include <stdexcept> namespace Galois { //! Simple map data structure, based off a single array. template< class _Key, class _Tp, class _Compare = std::less<_Key>, class _Alloc = std::allocator<std::pair<_Key, _Tp> > > class flat_map { public: typedef _Key key_type; typedef _Tp mapped_type; typedef std::pair<_Key, _Tp> value_type; typedef _Compare key_compare; typedef _Alloc allocator_type; class value_compare : public std::binary_function<value_type, value_type, bool> { friend class flat_map<_Key, _Tp, _Compare, _Alloc>; protected: _Compare comp; value_compare(_Compare __c): comp(__c) { } public: bool operator()(const value_type& __x, const value_type& __y) const { return comp(__x.first, __y.first); } }; private: /// This turns... typedef typename _Alloc::template rebind<value_type>::other _Pair_alloc_type; typedef std::vector<value_type, _Pair_alloc_type> _VectTy; _VectTy _data; _Compare _comp; class value_key_compare : public std::binary_function<value_type, key_type, bool> { friend class flat_map<_Key, _Tp, _Compare, _Alloc>; protected: _Compare comp; value_key_compare(_Compare __c): comp(__c) { } public: bool operator()(const value_type& __x, const key_type& __y) const { return comp(__x.first, __y); } }; value_key_compare value_key_comp() const { return value_key_compare(key_comp()); } bool key_eq(const key_type& k1, const key_type& k2) const { return !key_comp()(k1, k2) && !key_comp()(k2, k1); } void resort() { std::sort(_data.begin(), _data.end(), value_comp()); } public: typedef typename _Pair_alloc_type::pointer pointer; typedef typename _Pair_alloc_type::const_pointer const_pointer; typedef typename _Pair_alloc_type::reference reference; typedef typename _Pair_alloc_type::const_reference const_reference; typedef typename _VectTy::iterator iterator; typedef typename _VectTy::const_iterator const_iterator; typedef typename _VectTy::size_type size_type; typedef typename _VectTy::difference_type difference_type; typedef typename _VectTy::reverse_iterator reverse_iterator; typedef typename _VectTy::const_reverse_iterator const_reverse_iterator; flat_map() :_data(), _comp() {} explicit flat_map(const _Compare& __comp, const allocator_type& __a = allocator_type()) :_data(_Pair_alloc_type(__a)), _comp(__comp) {} flat_map(const flat_map& __x) :_data(__x._data), _comp(__x._comp) {} flat_map(flat_map&& __x) /* noexcept(std::is_nothrow_copy_constructible<_Compare>::value) */ : _data(std::move(__x._data)), _comp(std::move(__x._comp)) {} /* flat_map(std::initializer_list<value_type> __l, const _Compare& __comp = _Compare(), const allocator_type& __a = allocator_type()) : _data(__l, _Pair_alloc_type(__a)), _comp(__comp) { resort(); } */ template<typename _InputIterator> flat_map(_InputIterator __first, _InputIterator __last) : _data(__first, __last), _comp() { resort(); } template<typename _InputIterator> flat_map(_InputIterator __first, _InputIterator __last, const _Compare& __comp, const allocator_type& __a = allocator_type()) : _data(__first, __last, _Pair_alloc_type(__a)) { resort(); } flat_map& operator=(const flat_map& __x) { _data = __x._data; _comp = __x._comp; return *this; } flat_map& operator=(flat_map&& __x) { clear(); swap(__x); return *this; } /* flat_map& operator=(std::initializer_list<value_type> __l) { clear(); insert(__l.begin(), __l.end()); return *this; } */ allocator_type get_allocator() const /* noexcept */ { return allocator_type(_data.get_allocator()); } // iterators iterator begin() /* noexcept */ { return _data.begin(); } const_iterator begin() const /* noexcept */ { return _data.begin(); } iterator end() /* noexcept */ { return _data.end(); } const_iterator end() const /* noexcept */ { return _data.end(); } reverse_iterator rbegin() /* noexcept */ { return _data.rbegin(); } const_reverse_iterator rbegin() const /* noexcept */ { return _data.rbegin(); } reverse_iterator rend() /* noexcept */ { return _data.rend(); } const_reverse_iterator rend() const /* noexcept */ { return _data.rend(); } const_iterator cbegin() const /* noexcept */ { return _data.begin(); } const_iterator cend() const /* noexcept */ { return _data.end(); } const_reverse_iterator crbegin() const /* noexcept */ { return _data.rbegin(); } const_reverse_iterator crend() const /* noexcept */ { return _data.rend(); } bool empty() const /* noexcept */ { return _data.empty(); } size_type size() const /* noexcept */ { return _data.size(); } size_type max_size() const /* noexcept */ { return _data.max_size(); } mapped_type& operator[](const key_type& __k) { iterator __i = lower_bound(__k); // __i->first is greater than or equivalent to __k. if (__i == end() || key_comp()(__k, (*__i).first)) // __i = _data.emplace(__i, std::piecewise_construct, // std::tuple<const key_type&>(__k), // std::tuple<>()); #ifndef GALOIS_CXX11_VECTOR_HAS_NO_EMPLACE __i = _data.emplace(__i, __k, mapped_type()); #else __i = _data.insert(__i, value_type(__k, mapped_type())); #endif return (*__i).second; } mapped_type& operator[](key_type&& __k) { iterator __i = lower_bound(__k); // __i->first is greater than or equivalent to __k. if (__i == end() || key_comp()(__k, (*__i).first)) // __i = _data.emplace(__i, std::piecewise_construct, // std::forward_as_tuple(std::move(__k)), // std::tuple<>()); #ifndef GALOIS_CXX11_VECTOR_HAS_NO_EMPLACE __i = _data.emplace(__i, std::move(__k), mapped_type()); #else __i = _data.insert(__i, value_type(std::move(__k), mapped_type())); #endif return (*__i).second; } mapped_type& at(const key_type& __k) { iterator __i = lower_bound(__k); if (__i == end() || key_comp()(__k, (*__i).first)) throw std::out_of_range(__N("flat_map::at")); return (*__i).second; } const mapped_type& at(const key_type& __k) const { const_iterator __i = lower_bound(__k); if (__i == end() || key_comp()(__k, (*__i).first)) throw std::out_of_range(__N("flat_map::at")); return (*__i).second; } std::pair<iterator, bool> insert(const value_type& __x) { auto i = lower_bound(__x.first); if (i != end() && key_eq(i->first, __x.first)) return std::make_pair(i, false); return std::make_pair(_data.insert(i, __x), true); } //template<typename _Pair, typename = typename std::enable_if<std::is_constructible<value_type, _Pair&&>::value>::type> template<typename _Pair> std::pair<iterator, bool> insert(_Pair&& __x) { auto i = lower_bound(__x.first); if (i != end() && key_eq(i->first, __x.first)) return std::make_pair(i, false); return std::make_pair(_data.insert(i, std::forward<_Pair>(__x)), true); } /* void insert(std::initializer_list<value_type> __list) { insert(__list.begin(), __list.end()); } */ iterator insert(const_iterator __position, const value_type& __x) { return insert(__x).first; } //template<typename _Pair, typename = typename std::enable_if<std::is_constructible<value_type, _Pair&&>::value>::type> template<typename _Pair> iterator insert(const_iterator __position, _Pair&& __x) { return insert(std::forward<_Pair>(__x)).first; } template<typename _InputIterator> void insert(_InputIterator __first, _InputIterator __last) { while (__first != __last) insert(*__first++); } iterator erase(const_iterator __position) { return _data.erase(__position); } iterator erase(iterator __position) { return _data.erase(__position); } size_type erase(const key_type& __x) { auto i = find(__x); if (key_eq(__x, i->first)) { _data.erase(i); return 1; } return 0; } iterator erase(const_iterator __first, const_iterator __last) { return _data.erase(__first, __last); } void swap(flat_map& __x) { _data.swap(__x._data); std::swap(_comp, __x._comp); } void clear() /* noexcept */ { _data.clear(); } key_compare key_comp() const { return _comp; } value_compare value_comp() const { return value_compare(key_comp()); } iterator find(const key_type& __x) { auto i = lower_bound(__x); if (key_eq(i->first, __x)) return i; return end(); } const_iterator find(const key_type& __x) const { auto i = lower_bound(__x); if (key_eq(i->first, __x)) return i; return end(); } size_type count(const key_type& __x) const { return find(__x) == end() ? 0 : 1; } iterator lower_bound(const key_type& __x) { return std::lower_bound(_data.begin(), _data.end(), __x, value_key_comp()); } const_iterator lower_bound(const key_type& __x) const { return std::lower_bound(_data.begin(), _data.end(), __x, value_key_comp()); } iterator upper_bound(const key_type& __x) { return std::upper_bound(_data.begin(), _data.end(), __x, value_key_comp()); } const_iterator upper_bound(const key_type& __x) const { return std::upper_bound(_data.begin(), _data.end(), __x, value_key_comp()); } std::pair<iterator, iterator> equal_range(const key_type& __x) { return std::make_pair(lower_bound(__x), upper_bound(__x)); } std::pair<const_iterator, const_iterator> equal_range(const key_type& __x) const { return std::make_pair(lower_bound(__x), upper_bound(__x)); } template<typename _K1, typename _T1, typename _C1, typename _A1> friend bool operator==(const flat_map<_K1, _T1, _C1, _A1>&, const flat_map<_K1, _T1, _C1, _A1>&); template<typename _K1, typename _T1, typename _C1, typename _A1> friend bool operator<(const flat_map<_K1, _T1, _C1, _A1>&, const flat_map<_K1, _T1, _C1, _A1>&); }; template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator==(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return __x._data == __y._data; } template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator<(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return __x._data < __y._data; } /// Based on operator== template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator!=(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return !(__x == __y); } /// Based on operator< template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator>(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return __y < __x; } /// Based on operator< template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator<=(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return !(__y < __x); } /// Based on operator< template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline bool operator>=(const flat_map<_Key, _Tp, _Compare, _Alloc>& __x, const flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { return !(__x < __y); } } namespace std { /// See Galois::flat_map::swap(). template<typename _Key, typename _Tp, typename _Compare, typename _Alloc> inline void swap(Galois::flat_map<_Key, _Tp, _Compare, _Alloc>& __x, Galois::flat_map<_Key, _Tp, _Compare, _Alloc>& __y) { __x.swap(__y); } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/LargeArray.h
/** Large array types -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_LARGEARRAY_H #define GALOIS_LARGEARRAY_H #include "Galois/config.h" #include "Galois/gstl.h" #include "Galois/Runtime/ll/gio.h" #include "Galois/Runtime/mm/Mem.h" #include <boost/utility.hpp> #include GALOIS_CXX11_STD_HEADER(utility) namespace Galois { /** * Large array of objects with proper specialization for void type and * supporting various allocation and construction policies. * * @tparam T value type of container */ template<typename T> class LargeArray: private boost::noncopyable { T* m_data; size_t m_size; int allocated; public: typedef T raw_value_type; typedef T value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef value_type& reference; typedef const value_type& const_reference; typedef value_type* pointer; typedef const value_type* const_pointer; typedef pointer iterator; typedef const_pointer const_iterator; const static bool has_value = true; // Extra indirection to support incomplete T's struct size_of { const static size_t value = sizeof(T); }; protected: void allocate(size_type n, bool interleave, bool prefault) { assert(!m_data); allocated = interleave ? 1 : 2; m_size = n; if (interleave) m_data = reinterpret_cast<T*>(Galois::Runtime::MM::largeInterleavedAlloc(sizeof(T) * n)); else if (prefault) m_data = reinterpret_cast<T*>(Galois::Runtime::MM::largeAlloc(sizeof(T) * n, true)); else m_data = reinterpret_cast<T*>(Galois::Runtime::MM::largeAlloc(sizeof(T) * n, false)); } public: /** * Wraps existing buffer in LargeArray interface. */ LargeArray(void* d, size_t s): m_data(reinterpret_cast<T*>(d)), m_size(s), allocated(0) { } LargeArray(): m_data(0), m_size(0), allocated(0) { } ~LargeArray() { destroy(); deallocate(); } const_reference at(difference_type x) const { return m_data[x]; } reference at(difference_type x) { return m_data[x]; } const_reference operator[](size_type x) const { return m_data[x]; } reference operator[](size_type x) { return m_data[x]; } void set(difference_type x, const_reference v) { m_data[x] = v; } size_type size() const { return m_size; } iterator begin() { return m_data; } const_iterator begin() const { return m_data; } iterator end() { return m_data + m_size; } const_iterator end() const { return m_data + m_size; } //! Allocates interleaved across NUMA (memory) nodes. Must void allocateInterleaved(size_type n) { allocate(n, true, true); } /** * Allocates using default memory policy (usually first-touch) * * @param n number of elements to allocate * @param prefault Prefault/touch memory to place it local to the currently executing * thread. By default, true because concurrent page-faulting can be a * scalability bottleneck. */ void allocateLocal(size_type n, bool prefault = true) { allocate(n, false, prefault); } template<typename... Args> void construct(Args&&... args) { for (T* ii = m_data, *ei = m_data + m_size; ii != ei; ++ii) new (ii) T(std::forward<Args>(args)...); } template<typename... Args> void constructAt(size_type n, Args&&... args) { new (&m_data[n]) T(std::forward<Args>(args)...); } //! Allocate and construct template<typename... Args> void create(size_type n, Args&&... args) { allocateInterleaved(n); construct(std::forward<Args>(args)...); } void deallocate() { if (!allocated) return; if (allocated == 1) Galois::Runtime::MM::largeInterleavedFree(m_data, sizeof(T) * m_size); else if (allocated == 2) Galois::Runtime::MM::largeFree(m_data, sizeof(T) * m_size); else GALOIS_DIE("Unknown allocation type"); m_data = 0; m_size = 0; } void destroy() { if (!allocated) return; if (!m_data) return; uninitialized_destroy(m_data, m_data + m_size); } void destroyAt(size_type n) { assert(allocated); (&m_data[n])->~T(); } // The following methods are not shared with void specialization const_pointer data() const { return m_data; } pointer data() { return m_data; } }; //! Void specialization template<> class LargeArray<void>: private boost::noncopyable { public: LargeArray(void* d, size_t s) { } LargeArray() { } typedef void raw_value_type; typedef void* value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef value_type reference; typedef const value_type const_reference; typedef value_type* pointer; typedef const value_type* const_pointer; typedef pointer iterator; typedef const_pointer const_iterator; const static bool has_value = false; struct size_of { const static size_t value = 0; }; const_reference at(difference_type x) const { return 0; } reference at(difference_type x) { return 0; } const_reference operator[](size_type x) const { return 0; } void set(difference_type x, const_reference v) { } size_type size() const { return 0; } iterator begin() { return 0; } const_iterator begin() const { return 0; } iterator end() { return 0; } const_iterator end() const { return 0; } void allocateInterleaved(size_type n) { } void allocateLocal(size_type n, bool prefault = true) { } template<typename... Args> void construct(Args&&... args) { } template<typename... Args> void constructAt(size_type n, Args&&... args) { } template<typename... Args> void create(size_type n, Args&&... args) { } void deallocate() { } void destroy() { } void destroyAt(size_type n) { } const_pointer data() const { return 0; } pointer data() { return 0; } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/GraphLabExecutor.h
#ifndef GALOIS_GRAPHLABEXECUTOR_H #define GALOIS_GRAPHLABEXECUTOR_H #include "Galois/Bag.h" #include <boost/mpl/has_xxx.hpp> namespace Galois { //! Implementation of GraphLab v2/PowerGraph DSL in Galois namespace GraphLab { BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_gather_in_edges) template<typename T> struct needs_gather_in_edges: public has_tt_needs_gather_in_edges<T> {}; BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_gather_out_edges) template<typename T> struct needs_gather_out_edges: public has_tt_needs_gather_out_edges<T> {}; BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_scatter_in_edges) template<typename T> struct needs_scatter_in_edges: public has_tt_needs_scatter_in_edges<T> {}; BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_scatter_out_edges) template<typename T> struct needs_scatter_out_edges: public has_tt_needs_scatter_out_edges<T> {}; struct EmptyMessage { EmptyMessage& operator+=(const EmptyMessage&) { return *this; } }; template<typename Graph, typename Operator> struct Context { typedef typename Graph::GraphNode GNode; typedef typename Operator::message_type message_type; typedef std::pair<GNode,message_type> WorkItem; private: template<typename,typename> friend class AsyncEngine; template<typename,typename> friend class SyncEngine; typedef std::pair<int,message_type> Message; typedef std::deque<Message> MyMessages; typedef Galois::Runtime::PerPackageStorage<MyMessages> Messages; Galois::UserContext<WorkItem>* ctx; Graph* graph; Galois::LargeArray<int>* scoreboard; Galois::InsertBag<GNode>* next; Messages* messages; Context(Galois::UserContext<WorkItem>* c): ctx(c) { } #if defined(__IBMCPP__) && __IBMCPP__ <= 1210 public: #endif Context(Graph* g, Galois::LargeArray<int>* s, Galois::InsertBag<GNode>* n, Messages* m): graph(g), scoreboard(s), next(n), messages(m) { } public: void push(GNode node, const message_type& message) { if (ctx) { ctx->push(WorkItem(node, message)); } else { size_t id = graph->idFromNode(node); { int val = (*scoreboard)[id]; if (val == 0 && __sync_bool_compare_and_swap(&(*scoreboard)[id], 0, 1)) { next->push(node); } } if (messages) { MyMessages& m = *messages->getLocal(); int val; while (true) { val = m[id].first; if (val == 0 && __sync_bool_compare_and_swap(&m[id].first, 0, 1)) { m[id].second += message; m[id].first = 0; return; } } } } } }; template<typename Graph, typename Operator> class AsyncEngine { typedef typename Operator::message_type message_type; typedef typename Operator::gather_type gather_type; typedef typename Graph::GraphNode GNode; typedef typename Graph::in_edge_iterator in_edge_iterator; typedef typename Graph::edge_iterator edge_iterator; typedef typename Context<Graph,Operator>::WorkItem WorkItem; struct Initialize { AsyncEngine* self; Galois::InsertBag<WorkItem>& bag; Initialize(AsyncEngine* s, Galois::InsertBag<WorkItem>& b): self(s), bag(b) { } void operator()(GNode n) { bag.push(WorkItem(n, message_type())); } }; struct Process { AsyncEngine* self; Process(AsyncEngine* s): self(s) { } void operator()(const WorkItem& item, Galois::UserContext<WorkItem>& ctx) { Operator op(self->origOp); GNode node = item.first; message_type msg = item.second; if (needs_gather_in_edges<Operator>::value || needs_scatter_in_edges<Operator>::value) { self->graph.in_edge_begin(node, Galois::MethodFlag::ALL); } if (needs_gather_out_edges<Operator>::value || needs_scatter_out_edges<Operator>::value) { self->graph.edge_begin(node, Galois::MethodFlag::ALL); } op.init(self->graph, node, msg); gather_type sum; if (needs_gather_in_edges<Operator>::value) { for (in_edge_iterator ii = self->graph.in_edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.in_edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.gather(self->graph, node, self->graph.getInEdgeDst(ii), node, sum, self->graph.getInEdgeData(ii)); } } if (needs_gather_out_edges<Operator>::value) { for (edge_iterator ii = self->graph.edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.gather(self->graph, node, node, self->graph.getEdgeDst(ii), sum, self->graph.getEdgeData(ii)); } } op.apply(self->graph, node, sum); if (!op.needsScatter(self->graph, node)) return; Context<Graph,Operator> context(&ctx); if (needs_scatter_in_edges<Operator>::value) { for (in_edge_iterator ii = self->graph.in_edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.in_edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.scatter(self->graph, node, self->graph.getInEdgeDst(ii), node, context, self->graph.getInEdgeData(ii)); } } if (needs_scatter_out_edges<Operator>::value) { for (edge_iterator ii = self->graph.edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.scatter(self->graph, node, node, self->graph.getEdgeDst(ii), context, self->graph.getEdgeData(ii)); } } } }; Graph& graph; Operator origOp; public: AsyncEngine(Graph& g, Operator o): graph(g), origOp(o) { } void execute() { typedef typename Context<Graph,Operator>::WorkItem WorkItem; typedef Galois::WorkList::dChunkedFIFO<256> WL; Galois::InsertBag<WorkItem> bag; Galois::do_all_local(graph, Initialize(this, bag)); Galois::for_each_local(bag, Process(this), Galois::wl<WL>()); } }; template<typename Graph, typename Operator> class SyncEngine { typedef typename Operator::message_type message_type; typedef typename Operator::gather_type gather_type; typedef typename Graph::GraphNode GNode; typedef typename Graph::in_edge_iterator in_edge_iterator; typedef typename Graph::edge_iterator edge_iterator; static const bool NeedMessages = !std::is_same<EmptyMessage,message_type>::value; typedef Galois::WorkList::dChunkedFIFO<256> WL; typedef std::pair<int,message_type> Message; typedef std::deque<Message> MyMessages; typedef Galois::Runtime::PerPackageStorage<MyMessages> Messages; Graph& graph; Operator origOp; Galois::LargeArray<Operator> ops; Messages messages; Galois::LargeArray<int> scoreboard; Galois::InsertBag<GNode> wls[2]; Galois::Runtime::LL::SimpleLock<true> lock; struct Gather { SyncEngine* self; typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; Gather(SyncEngine* s): self(s) { } void operator()(GNode node, Galois::UserContext<GNode>&) { size_t id = self->graph.idFromNode(node); Operator& op = self->ops[id]; gather_type sum; if (needs_gather_in_edges<Operator>::value) { for (in_edge_iterator ii = self->graph.in_edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.in_edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.gather(self->graph, node, self->graph.getInEdgeDst(ii), node, sum, self->graph.getInEdgeData(ii)); } } if (needs_gather_out_edges<Operator>::value) { for (edge_iterator ii = self->graph.edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.gather(self->graph, node, node, self->graph.getEdgeDst(ii), sum, self->graph.getEdgeData(ii)); } } op.apply(self->graph, node, sum); } }; template<typename Container> struct Scatter { typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; SyncEngine* self; Context<Graph,Operator> context; Scatter(SyncEngine* s, Container& next): self(s), context(&self->graph, &self->scoreboard, &next, NeedMessages ? &self->messages : 0) { } void operator()(GNode node, Galois::UserContext<GNode>&) { size_t id = self->graph.idFromNode(node); Operator& op = self->ops[id]; if (!op.needsScatter(self->graph, node)) return; if (needs_scatter_in_edges<Operator>::value) { for (in_edge_iterator ii = self->graph.in_edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.in_edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.scatter(self->graph, node, self->graph.getInEdgeDst(ii), node, context, self->graph.getInEdgeData(ii)); } } if (needs_scatter_out_edges<Operator>::value) { for (edge_iterator ii = self->graph.edge_begin(node, Galois::MethodFlag::NONE), ei = self->graph.edge_end(node, Galois::MethodFlag::NONE); ii != ei; ++ii) { op.scatter(self->graph, node, node, self->graph.getEdgeDst(ii), context, self->graph.getEdgeData(ii)); } } } }; template<bool IsFirst> struct Initialize { typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; SyncEngine* self; Initialize(SyncEngine* s): self(s) { } void allocateMessages() { unsigned tid = Galois::Runtime::LL::getTID(); if (!Galois::Runtime::LL::isPackageLeader(tid) || tid == 0) return; MyMessages& m = *self->messages.getLocal(); self->lock.lock(); m.resize(self->graph.size()); self->lock.unlock(); } message_type getMessage(size_t id) { message_type ret; if (NeedMessages) { for (unsigned int i = 0; i < self->messages.size(); ++i) { if (!Galois::Runtime::LL::isPackageLeader(i)) continue; MyMessages& m = *self->messages.getRemote(i); if (m.empty()) continue; ret += m[id].second; m[id] = std::make_pair(0, message_type()); // During initialization, only messages from thread zero if (IsFirst) break; } } return ret; } void operator()(GNode n, Galois::UserContext<GNode>&) { size_t id = self->graph.idFromNode(n); if (IsFirst && NeedMessages) { allocateMessages(); } else if (!IsFirst) { self->scoreboard[id] = 0; } Operator& op = self->ops[id]; op = self->origOp; op.init(self->graph, n, getMessage(id)); // Hoist as much as work as possible behind first barrier if (needs_gather_in_edges<Operator>::value || needs_gather_out_edges<Operator>::value) return; gather_type sum; op.apply(self->graph, n, sum); if (needs_scatter_in_edges<Operator>::value || needs_scatter_out_edges<Operator>::value) return; } }; template<bool IsFirst,typename Container1, typename Container2> void executeStep(Container1& cur, Container2& next) { Galois::for_each_local(cur, Initialize<IsFirst>(this), Galois::wl<WL>()); if (needs_gather_in_edges<Operator>::value || needs_gather_out_edges<Operator>::value) { Galois::for_each_local(cur, Gather(this), Galois::wl<WL>()); } if (needs_scatter_in_edges<Operator>::value || needs_scatter_out_edges<Operator>::value) { Galois::for_each_local(cur, Scatter<Container2>(this, next), Galois::wl<WL>()); } } public: SyncEngine(Graph& g, Operator op): graph(g), origOp(op) { ops.create(graph.size()); scoreboard.create(graph.size()); if (NeedMessages) messages.getLocal()->resize(graph.size()); } void signal(GNode node, const message_type& msg) { if (NeedMessages) { MyMessages& m = *messages.getLocal(); m[graph.idFromNode(node)].second = msg; } } void execute() { Galois::Statistic rounds("GraphLabRounds"); Galois::InsertBag<GNode>* next = &wls[0]; Galois::InsertBag<GNode>* cur = &wls[1]; executeStep<true>(graph, *next); rounds += 1; while (!next->empty()) { std::swap(cur, next); executeStep<false>(*cur, *next); rounds += 1; cur->clear(); } } }; } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/NoDerefIterator.h
/** Wrapper around an iterator such that *it == it -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_NODEREFITERATOR_H #define GALOIS_NODEREFITERATOR_H #include "boost/iterator/iterator_adaptor.hpp" namespace Galois { //! Modify an iterator so that *it == it template<typename Iterator> struct NoDerefIterator : public boost::iterator_adaptor< NoDerefIterator<Iterator>, Iterator, Iterator, boost::use_default, const Iterator&> { NoDerefIterator(): NoDerefIterator::iterator_adaptor_() { } explicit NoDerefIterator(Iterator it): NoDerefIterator::iterator_adaptor_(it) { } const Iterator& dereference() const { return NoDerefIterator::iterator_adaptor_::base_reference(); } Iterator& dereference() { return NoDerefIterator::iterator_adaptor_::base_reference(); } }; //! Convenience function to create {@link NoDerefIterator}. template<typename Iterator> NoDerefIterator<Iterator> make_no_deref_iterator(Iterator it) { return NoDerefIterator<Iterator>(it); } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/UnionFind.h
/** Union-find -*- C++ -*- * @file * * A minimum spanning tree algorithm to demonstrate the Galois system. * * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_UNIONFIND_H #define GALOIS_UNIONFIND_H namespace Galois { /** * Intrusive union-find implementation. Users subclass this to get disjoint * functionality for the subclass object. */ template<typename T> class UnionFindNode { T* findImpl() const { if (isRep()) return m_component; T* rep = m_component; while (rep->m_component != rep) { T* next = rep->m_component; rep = next; } return rep; } protected: T* m_component; UnionFindNode(): m_component(reinterpret_cast<T*>(this)) { } public: typedef UnionFindNode<T> SuperTy; bool isRep() const { return m_component == this; } const T* find() const { return findImpl(); } T* find() { return findImpl(); } T* findAndCompress() { // Basic outline of race in synchronous path compression is that two path // compressions along two different paths to the root can create a cycle // in the union-find tree. Prevent that from happening by compressing // incrementally. if (isRep()) return m_component; T* rep = m_component; T* prev = 0; while (rep->m_component != rep) { T* next = rep->m_component; if (prev && prev->m_component == rep) { prev->m_component = next; } prev = rep; rep = next; } return rep; } //! Lock-free merge. Returns if merge was done. T* merge(T* b) { T* a = m_component; while (true) { a = a->findAndCompress(); b = b->findAndCompress(); if (a == b) return 0; // Avoid cycles by directing edges consistently if (a > b) std::swap(a, b); if (__sync_bool_compare_and_swap(&a->m_component, a, b)) { return b; } } } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Mem.h
/** User-visible allocators -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_MEM_H #define GALOIS_MEM_H #include "Galois/Runtime/mm/Mem.h" namespace Galois { //! Base allocator for per-iteration allocator typedef Galois::Runtime::MM::SimpleBumpPtrWithMallocFallback<Galois::Runtime::MM::FreeListHeap<Galois::Runtime::MM::SystemBaseAlloc> > IterAllocBaseTy; //! Per-iteration allocator that conforms to STL allocator interface typedef Galois::Runtime::MM::ExternRefGaloisAllocator<char, IterAllocBaseTy> PerIterAllocTy; //! Scalable fixed-sized allocator for T that conforms to STL allocator interface but //! does not support variable sized allocations template<typename Ty> struct GFixedAllocator : public Galois::Runtime::MM::FSBGaloisAllocator<Ty> { }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/LazyArray.h
/** Lazy Static Array -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * Implements something close to std::array, but which does not initialize its * elements. It is the user's responsibility to make sure memory is properly * initialized before using. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_LAZYARRAY_H #define GALOIS_LAZYARRAY_H #include "Galois/config.h" #include "Galois/LazyObject.h" #include <iterator> #include <stdexcept> #include <cstddef> #include GALOIS_CXX11_STD_HEADER(algorithm) #include GALOIS_CXX11_STD_HEADER(utility) #include GALOIS_CXX11_STD_HEADER(type_traits) namespace Galois { /** * This is a container that encapsulates space for a constant size array. The * initialization and destruction of items is explicitly under the control of * the user. */ template<typename _Tp, unsigned _Size> class LazyArray { typedef typename std::aligned_storage<sizeof(_Tp), std::alignment_of<_Tp>::value>::type CharData; LazyObject<_Tp> data_[(_Size > 0 ? _Size : 1)]; _Tp* get(size_t __n) { return &data_[__n].get(); } const _Tp* get(size_t __n) const { return &data_[__n].get(); } public: typedef _Tp value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef value_type& reference; typedef const value_type& const_reference; typedef value_type* pointer; typedef const value_type* const_pointer; typedef pointer iterator; typedef const_pointer const_iterator; typedef std::reverse_iterator<iterator> reverse_iterator; typedef std::reverse_iterator<const_iterator> const_reverse_iterator; //iterators: iterator begin() { return iterator(get(0)); } const_iterator begin() const { return const_iterator(get(0)); } iterator end() { return iterator(get(_Size)); } const_iterator end() const { return const_iterator(get(_Size)); } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } const_reverse_iterator crbegin() const { return rbegin(); } const_reverse_iterator crend() const { return rend(); } //capacity: size_type size() const { return _Size; } size_type max_size() const { return _Size; } bool empty() const { return _Size == 0; } //element access: reference operator[](size_type __n) { return *get(__n); } const_reference operator[](size_type __n) const { return *get(__n); } reference at(size_type __n) { if (__n >= _Size) throw std::out_of_range("lazyArray::at"); return get(__n); } const_reference at(size_type __n) const { if (__n >= _Size) throw std::out_of_range("lazyArray::at"); return get(__n); } reference front() { return *get(0); } const_reference front() const { return *get(0); } reference back() { return *get(_Size > 0 ? _Size - 1 : 0); } const_reference back() const { return *get(_Size > 0 ? _Size - 1 : 0); } pointer data() { return get(0); } const_pointer data() const { return get(0); } //missing: fill swap template<typename... Args> pointer emplace(size_type __n, Args&&... args) { return new (get(__n)) _Tp(std::forward<Args>(args)...); } pointer construct(size_type __n, const _Tp& val) { return emplace(__n, val); } pointer construct(size_type __n, _Tp&& val) { return emplace(__n, std::move(val)); } void destroy(size_type __n) { (get(__n))->~_Tp(); } }; } #endif // GALOIS_LAZYARRAY_H
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Endian.h
/** Endian utility functions -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_ENDIAN_H #define GALOIS_ENDIAN_H #include "Galois/config.h" #include <stdint.h> #define USE_NAIVE_BYTE_SWAP #ifdef HAVE_ENDIAN_H # include <endian.h> #endif namespace Galois { // NB: Wrap these standard functions with different names because // sometimes le64toh and such are implemented as macros and we don't // want any nasty surprises. static inline uint64_t convert_le64(uint64_t x) { #if !defined(HAVE_BIG_ENDIAN) return x; #elif defined(USE_NAIVE_BYTE_SWAP) || !defined(HAVE_LE64TOH) return ((x<<56) & 0xFF00000000000000) | ((x<<40) & 0x00FF000000000000) | ((x<<24) & 0x0000FF0000000000) | ((x<<8 ) & 0x000000FF00000000) | ((x>>8 ) & 0x00000000FF000000) | ((x>>24) & 0x0000000000FF0000) | ((x>>40) & 0x000000000000FF00) | ((x>>56) & 0x00000000000000FF); #else return le64toh(x); #endif } static inline uint32_t convert_le32(uint32_t x) { #if !defined(HAVE_BIG_ENDIAN) return x; #elif defined(USE_NAIVE_BYTE_SWAP) || !defined(HAVE_LE64TOH) return ((x<<24) & 0xFF000000) | ((x<<8 ) & 0x00FF0000) | ((x>>8 ) & 0x0000FF00) | ((x>>24) & 0x000000FF); #else return le32toh(x); #endif } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/MethodFlags.h
/** Galois Conflict flags -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_METHODFLAGS_H #define GALOIS_METHODFLAGS_H namespace Galois { /** What should the runtime do when executing a method. * * Various methods take an optional parameter indicating what actions * the runtime should do on the user's behalf: (1) checking for conflicts, * and/or (2) saving undo information. By default, both are performed (ALL). */ enum MethodFlag { NONE = 0, CHECK_CONFLICT = 1, SAVE_UNDO = 2, ALL = 3, WRITE = 4 }; //! Bitwise & for method flags inline MethodFlag operator&(MethodFlag x, MethodFlag y) { return (MethodFlag)(((int) x) & ((int) y)); } //! Bitwise | for method flags inline MethodFlag operator|(MethodFlag x, MethodFlag y) { return (MethodFlag)(((int) x) | ((int) y)); } } #endif //GALOIS_METHODFLAGS_H
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/UserContext.h
/** User Facing loop api -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_USERCONTEXT_H #define GALOIS_USERCONTEXT_H #include "Galois/Mem.h" #include "Galois/gdeque.h" #include "Galois/Runtime/Context.h" #include "Galois/Runtime/MethodFlags.h" namespace Galois { /** * This is the object passed to the user's parallel loop. This * provides the in-loop api. */ template<typename T> class UserContext: private boost::noncopyable { protected: // TODO: move to a separate class for dedicated for sepculative executors #ifdef GALOIS_USE_EXP using Closure = std::function<void (void)>; using UndoLog = Galois::gdeque<Closure, 8>; using CommitLog = UndoLog; UndoLog undoLog; CommitLog commitLog; #endif //! Allocator stuff IterAllocBaseTy IterationAllocatorBase; PerIterAllocTy PerIterationAllocator; void __resetAlloc() { IterationAllocatorBase.clear(); } #ifdef GALOIS_USE_EXP void __rollback() { for (auto ii = undoLog.end (), ei = undoLog.begin(); ii != ei; ) { --ii; (*ii)(); } } void __commit() { for (auto ii = commitLog.begin (), ei = commitLog.end(); ii != ei; ++ii) { (*ii)(); } } void __resetUndoLog() { undoLog.clear(); } void __resetCommitLog() { commitLog.clear(); } #endif //! push stuff typedef gdeque<T> PushBufferTy; PushBufferTy pushBuffer; PushBufferTy& __getPushBuffer() { return pushBuffer; } void __resetPushBuffer() { pushBuffer.clear(); } void* localState; bool localStateUsed; void __setLocalState(void *p, bool used) { localState = p; localStateUsed = used; } static const unsigned int fastPushBackLimit = 64; typedef std::function<void(PushBufferTy&)> FastPushBack; FastPushBack fastPushBack; void __setFastPushBack(FastPushBack f) { fastPushBack = f; } bool* didBreak; public: UserContext() :IterationAllocatorBase(), PerIterationAllocator(&IterationAllocatorBase), didBreak(0) { } //! Signal break in parallel loop void breakLoop() { *didBreak = true; } //! Acquire a per-iteration allocator PerIterAllocTy& getPerIterAlloc() { return PerIterationAllocator; } //! Push new work template<typename... Args> void push(Args&&... args) { Galois::Runtime::checkWrite(MethodFlag::WRITE, true); pushBuffer.emplace_back(std::forward<Args>(args)...); if (fastPushBack && pushBuffer.size() > fastPushBackLimit) fastPushBack(pushBuffer); } //! Force the abort of this iteration void abort() { Galois::Runtime::forceAbort(); } //! Store and retrieve local state for deterministic void* getLocalState(bool& used) { used = localStateUsed; return localState; } #ifdef GALOIS_USE_EXP void addUndoAction(const Closure& f) { undoLog.push_back(f); } void addCommitAction(const Closure& f) { commitLog.push_back(f); } #endif }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/gstl.h
/** Simple STL style algorithms -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_GSTL_H #define GALOIS_GSTL_H #include <algorithm> #include <iterator> #include <utility> namespace Galois { template<typename IterTy, class Distance> IterTy safe_advance_dispatch(IterTy b, IterTy e, Distance n, std::random_access_iterator_tag) { if (std::distance(b,e) < n) return b + n; else return e; } template<typename IterTy, class Distance> IterTy safe_advance_dispatch(IterTy b, IterTy e, Distance n, std::input_iterator_tag) { while (b != e && n--) ++b; return b; } /** * Like std::advance but returns end if end is closer than the advance amount. */ template<typename IterTy, class Distance> IterTy safe_advance(IterTy b, IterTy e, Distance n) { typename std::iterator_traits<IterTy>::iterator_category category; return safe_advance_dispatch(b,e,n,category); } /** * Finds the midpoint of a range. The first half is always be bigger than * the second half if the range has an odd length. */ template<typename IterTy> IterTy split_range(IterTy b, IterTy e) { std::advance(b, (std::distance(b,e) + 1) / 2); return b; } /** * Returns a continuous block from the range based on the number of * divisions and the id of the block requested */ template<typename IterTy> std::pair<IterTy, IterTy> block_range(IterTy b, IterTy e, unsigned id, unsigned num) { unsigned int dist = std::distance(b, e); unsigned int numper = std::max((dist + num - 1) / num, 1U); //round up unsigned int A = std::min(numper * id, dist); unsigned int B = std::min(numper * (id + 1), dist); std::advance(b, A); if (dist != B) { e = b; std::advance(e, B - A); } return std::make_pair(b,e); } //! Destroy a range template<class InputIterator> void uninitialized_destroy ( InputIterator first, InputIterator last ) { typedef typename std::iterator_traits<InputIterator>::value_type T; for (; first!=last; ++first) (&*first)->~T(); } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/OnlineStats.h
/** Online Stats -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2011, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_ONLINESTATS_H #define GALOIS_ONLINESTATS_H class OnlineStat { unsigned int n; double mean; double M2; public: OnlineStat() :n(0), mean(0.0), M2(0.0) {} void reset() { M2 = mean = 0.0; n = 0; } void insert(double x) { n += 1; double delta = x - mean; mean += delta / n; M2 += delta * (x - mean); } double getVariance() const { return M2/(n - 1); } double getStdDeviation() const { return M2/n; } unsigned int getCount() const { return n; } double getMean() const { return mean; } }; #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/config.h.in
#ifndef CONFIG_H #define CONFIG_H #define GALOIS_VERSION_MAJOR @GALOIS_VERSION_MAJOR@ #define GALOIS_VERSION_MINOR @GALOIS_VERSION_MINOR@ #define GALOIS_VERSION_PATCH @GALOIS_VERSION_PATCH@ #define GALOIS_COPYRIGHT_YEAR_STRING "@GALOIS_COPYRIGHT_YEAR@" #define GALOIS_VERSION_STRING "@GALOIS_VERSION@" #cmakedefine HAVE_LE64TOH #cmakedefine HAVE_LE32TOH #cmakedefine HAVE_ENDIAN_H #cmakedefine HAVE_BIG_ENDIAN #cmakedefine HAVE_CXX11_UNIFORM_INT_DISTRIBUTION #cmakedefine HAVE_CXX11_UNIFORM_REAL_DISTRIBUTION #cmakedefine HAVE_CXX11_CHRONO #cmakedefine HAVE_CXX11_ALIGNOF #cmakedefine GALOIS_USE_SVNVERSION #cmakedefine GALOIS_USE_NUMA #cmakedefine GALOIS_USE_NUMA_OLD #cmakedefine GALOIS_USE_VTUNE #cmakedefine GALOIS_USE_PAPI #cmakedefine GALOIS_USE_HTM #cmakedefine GALOIS_USE_SEQ_ONLY #cmakedefine GALOIS_USE_CXX11_COMPAT #cmakedefine GALOIS_USE_LONGJMP #ifdef GALOIS_USE_CXX11_COMPAT #define GALOIS_CXX11_STD_HEADER(name) <Galois/c++11-compat/name.h> #else #define GALOIS_CXX11_STD_HEADER(name) <name> #endif #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/TypeTraits.h
/** Galois type traits -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * There are two ways to declare a typetrait. First, with a typedef or other * valid name declaration: * \code * struct MyClass { * typedef int tt_needs_parallel_break; * .... * }; * \endcode * * The second way is by specializing a function: * \code * namespace Galois { * template<> * struct needs_parallel_break<MyClass> : public boost::true_type {}; * } * \endcode * * Since the compiler doesn't check the names of these traits, a good * programming practice is to add a <code>static_assert</code> to check if * everything is ok: * \code * struct MyClass { * typedef int tt_needs_parallel_break; * static_assert(Galois::needs_parallel_break<MyClass>::value, "Oops!"); * ... * }; * \endcode * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_TYPETRAITS_H #define GALOIS_TYPETRAITS_H #include "Galois/Runtime/ll/CompilerSpecific.h" #include <boost/mpl/has_xxx.hpp> namespace Galois { #define GALOIS_HAS_MEM_FUNC(func, name) \ template<typename T, typename Sig> \ struct has_##name { \ typedef char yes[1]; \ typedef char no[2]; \ template<typename U, U> struct type_check; \ template<typename W> static yes& test(type_check<Sig, &W::func>*); \ template<typename > static no& test(...); \ static const bool value = sizeof(test<T>(0)) == sizeof(yes); \ } #define GALOIS_HAS_MEM_FUNC_ANY(func, name) \ template<typename T> \ struct has_##name { \ typedef char yes[1]; \ typedef char no[2]; \ template<typename U, U> struct type_check; \ template<typename W> static yes& test(type_check<decltype(&W::func), &W::func>*); \ template<typename > static no& test(...); \ static const bool value = sizeof(test<T>(0)) == sizeof(yes); \ } #define GALOIS_HAS_MEM_TYPE(func, name) \ template<typename T> \ struct has_##name { \ typedef char yes[1]; \ typedef char no[2]; \ template<typename W> static yes& test(typename W::func*); \ template<typename > static no& test(...); \ static const bool value = sizeof(test<T>(0)) == sizeof(yes); \ } GALOIS_HAS_MEM_FUNC(galoisDeterministicParallelBreak, tf_deterministic_parallel_break); /** * Indicates the operator has a member function that allows a {@link Galois::for_each} * loop to be exited deterministically. * * The function has the following signature: * \code * struct T { * bool galoisDeterministicParallelBreak() { * // returns true if loop should end * } * }; * \endcode * * This function will be periodically called by the deterministic scheduler. * If it returns true, the loop ends as if calling {@link * UserContext::breakLoop}, but unlike that function, these breaks are * deterministic. */ template<typename T> struct has_deterministic_parallel_break : public has_tf_deterministic_parallel_break<T, bool(T::*)()> {}; GALOIS_HAS_MEM_FUNC_ANY(galoisDeterministicId, tf_deterministic_id); /** * Indicates the operator has a member function that optimizes the generation * of unique ids for active elements. This function should be thread-safe. * * The type conforms to the following: * \code * struct T { * uintptr_t galoisDeterministicId(const A& item) const { * // returns a unique identifier for item * } * }; * \endcode */ template<typename T> struct has_deterministic_id : public has_tf_deterministic_id<T> {}; GALOIS_HAS_MEM_TYPE(GaloisDeterministicLocalState, tf_deterministic_local_state); /** * Indicates the operator has a member type that encapsulates state that is passed between * the suspension and resumpsion of an operator during deterministic scheduling. * * The type conforms to the following: * \code * struct T { * struct GaloisDeteministicLocalState { * int x, y, z; // Local state * GaloisDeterministicLocalState(T& self, Galois::PerIterAllocTy& alloc) { * // initialize local state * } * }; * * void operator()(const A& item, Galois::UserContext<A>&) { * // An example of using local state * typedef GaloisDeterministicLocalState LS; * bool used; * LS* p = (LS*) ctx.getLocalState(used); * if (used) { * // operator is being resumed; use p * } else { * // operator hasn't been suspended yet; execute normally * // save state into p to be used when operator resumes * } * } * }; * \endcode */ template<typename T> struct has_deterministic_local_state : public has_tf_deterministic_local_state<T> {}; /** * Indicates the operator may request the parallel loop to be suspended and a * given function run in serial */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_parallel_break) template<typename T> struct needs_parallel_break : public has_tt_needs_parallel_break<T> {}; /** * Indicates the operator does not generate new work and push it on the worklist */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_does_not_need_push) template<typename T> struct does_not_need_push : public has_tt_does_not_need_push<T> {}; /** * Indicates the operator may request the access to a per-iteration * allocator */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_needs_per_iter_alloc) template<typename T> struct needs_per_iter_alloc : public has_tt_needs_per_iter_alloc<T> {}; /** * Indicates the operator doesn't need its execution stats recorded */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_does_not_need_stats) template<typename T> struct does_not_need_stats : public has_tt_does_not_need_stats<T> {}; /** * Indicates the operator doesn't need abort support */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_does_not_need_aborts) template<typename T> struct does_not_need_aborts : public has_tt_does_not_need_aborts<T> {}; /** * Indicates that the neighborhood set does not change through out i.e. is not * dependent on computed values. Examples of such fixed neighborhood is e.g. the * neighborhood being all the neighbors of a node in the input graph, while the * counter example is the neighborhood being some of the neighbors based on * some predicate. */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_has_fixed_neighborhood) template <typename T> struct has_fixed_neighborhood: public has_tt_has_fixed_neighborhood<T> {}; /** * Temporary type trait for pre-C++11 compilers, which don't support exact * std::is_trivially_constructible. */ BOOST_MPL_HAS_XXX_TRAIT_DEF(tt_has_known_trivial_constructor) template <typename T> struct has_known_trivial_constructor: public has_tt_has_known_trivial_constructor<T> { }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Timer.h
/** Simple timer support -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2011, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_TIMER_H #define GALOIS_TIMER_H #include "Galois/config.h" #ifdef HAVE_CXX11_CHRONO #include <chrono> #endif namespace Galois { #ifdef HAVE_CXX11_CHRONO class Timer { typedef std::chrono::steady_clock clockTy; //typedef std::chrono::high_resolution_clock clockTy; std::chrono::time_point<clockTy> startT, stopT; public: void start() { startT = clockTy::now(); } void stop() { stopT = clockTy::now(); } unsigned long get() const { return std::chrono::duration_cast<std::chrono::milliseconds>(stopT-startT).count(); } unsigned long get_usec() const { return std::chrono::duration_cast<std::chrono::microseconds>(stopT-startT).count(); } }; #else //! A simple timer class Timer { //This is so that implementations can vary without //forcing includes of target specific headers unsigned long _start_hi; unsigned long _start_low; unsigned long _stop_hi; unsigned long _stop_low; public: Timer(); void start(); void stop(); unsigned long get() const; unsigned long get_usec() const; }; #endif //! A multi-start time accumulator. //! Gives the final runtime for a series of intervals class TimeAccumulator { Timer ltimer; unsigned long acc; public: TimeAccumulator(); void start(); //!adds the current timed interval to the total void stop(); unsigned long get() const; TimeAccumulator& operator+=(const TimeAccumulator& rhs); TimeAccumulator& operator+=(const Timer& rhs); }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Atomic.h
/** Atomic Types type -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author M. Amber Hassaan <[email protected]> */ #ifndef GALOIS_ATOMIC_H #define GALOIS_ATOMIC_H #include "Galois/Runtime/ll/CacheLineStorage.h" #include <iterator> namespace Galois { namespace AtomicImpl { /** * Common implementation. */ template<typename T, template <typename _> class W> class GAtomicImpl { // Galois::Runtime::LL::CacheLineStorage<T> val; W<T> val; public: //! Initialize with a value explicit GAtomicImpl(const T& i): val(i) {} //! default constructor GAtomicImpl() {} //! atomic add and fetch T operator+=(const T& rhs) { return __sync_add_and_fetch(&val.data, rhs); } //! atomic sub and fetch T operator-=(const T& rhs) { return __sync_sub_and_fetch(&(val.data), rhs); } //! atomic increment and fetch T operator++() { return __sync_add_and_fetch(&(val.data), 1); } //! atomic fetch and increment T operator++(int) { return __sync_fetch_and_add(&(val.data), 1); } //! atomic decrement and fetch T operator--() { return __sync_sub_and_fetch(&(val.data), 1); } //! atomic fetch and decrement T operator--(int) { return __sync_fetch_and_sub(&(val.data), 1); } //! conversion operator to base data type operator T() const { return val.data; } //! assign from underlying type T& operator=(const T& i) { return val.data = i; } //! assignment operator T& operator=(const GAtomicImpl& i) { return val.data = i.val.data; } //! direct compare and swap bool cas (const T& expected, const T& updated) { if (val.data != expected) { return false; } #if defined(__INTEL_COMPILER) return __sync_bool_compare_and_swap( &val.data, *reinterpret_cast<const ptrdiff_t*>(&expected), *reinterpret_cast<const ptrdiff_t*>(&updated)); #else return __sync_bool_compare_and_swap (&val.data, expected, updated); #endif } }; //! Basic atomic template <typename T, template <typename _> class W> class GAtomicBase: public GAtomicImpl<T, W> { typedef GAtomicImpl<T, W> Super_ty; public: //! Initialize with a value explicit GAtomicBase(const T& i): Super_ty (i) {} //! default constructor GAtomicBase(): Super_ty () {} T& operator=(const GAtomicBase& that) { return Super_ty::operator=(that); } T& operator=(const T& that) { return Super_ty::operator=(that); } }; //! Specialization for pointers template <typename T, template <typename _> class W> class GAtomicBase<T*, W>: public GAtomicImpl<T*, W> { typedef GAtomicImpl<T*, W> Super_ty; public: typedef typename std::iterator_traits<T*>::difference_type difference_type; GAtomicBase(): Super_ty() {} GAtomicBase(T* i): Super_ty(i) {} T*& operator=(const GAtomicBase& that) { return Super_ty::operator=(that); } T*& operator=(T* that) { return Super_ty::operator=(that); } T* operator+=(const difference_type& rhs) { return __sync_add_and_fetch(&Super_ty::val.data, rhs); } T* operator-=(const difference_type& rhs) { return __sync_sub_and_fetch(&Super_ty::val.data, rhs); } }; //! Specialization for const pointers template <typename T, template <typename _> class W> class GAtomicBase<const T*, W>: public GAtomicImpl<const T*, W> { typedef GAtomicImpl<const T*, W> Super_ty; public: typedef typename std::iterator_traits<const T*>::difference_type difference_type; GAtomicBase(): Super_ty() {} GAtomicBase(const T* i): Super_ty(i) {} const T*& operator=(const GAtomicBase& that) { return Super_ty::operator=(that); } const T*& operator=(const T* that) { return Super_ty::operator=(that); } const T* operator+=(const difference_type& rhs) { return __sync_add_and_fetch(&Super_ty::val.data, rhs); } const T* operator-=(const difference_type& rhs) { return __sync_sub_and_fetch(&Super_ty::val.data, rhs); } }; //! Specialization for bools template<template <typename _> class W> class GAtomicBase<bool, W>: private GAtomicImpl<bool, W> { typedef GAtomicImpl<bool, W> Super_ty; public: //! Initialize with a value explicit GAtomicBase(bool i): Super_ty(i) {} GAtomicBase(): Super_ty() {} //! conversion operator to base data type operator bool() const { return Super_ty::operator bool (); } //! assignment operator bool& operator=(const GAtomicBase<bool, W>& i) { return Super_ty::operator=(i); } //! assign from underlying type bool& operator=(bool i) { return Super_ty::operator=(i); } //! direct compare and swap bool cas(bool expected, bool updated) { return Super_ty::cas(expected, updated); } }; template <typename T> struct DummyWrapper { T data; explicit DummyWrapper(const T& d): data (d) {} DummyWrapper() {} }; } // end namespace impl /** * An atomic wrapper that provides sensible atomic behavior for most * primative data types. Operators return the value of type T so as to * retain atomic RMW semantics. */ template <typename T> class GAtomic: public AtomicImpl::GAtomicBase <T, AtomicImpl::DummyWrapper> { typedef AtomicImpl::GAtomicBase<T, AtomicImpl::DummyWrapper> Super_ty; public: GAtomic(): Super_ty() {} explicit GAtomic(const T& v): Super_ty(v) {} T& operator=(const GAtomic& that) { return Super_ty::operator=(that); } T& operator=(const T& that) { return Super_ty::operator=(that); } }; /** * Cache-line padded version of {@link GAtomic}. */ template <typename T> class GAtomicPadded: public AtomicImpl::GAtomicBase<T, Galois::Runtime::LL::CacheLineStorage> { typedef AtomicImpl::GAtomicBase<T, Galois::Runtime::LL::CacheLineStorage> Super_ty; public: GAtomicPadded(): Super_ty () {} explicit GAtomicPadded(const T& v): Super_ty (v) {} T& operator=(const GAtomicPadded& that) { return Super_ty::operator=(that); } T& operator=(const T& that) { return Super_ty::operator=(that); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/PriorityQueue.h
/** TODO -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author <[email protected]> */ #ifndef GALOIS_PRIORITYQUEUE_H #define GALOIS_PRIORITYQUEUE_H #include "Galois/Runtime/ll/PaddedLock.h" #include "Galois/Runtime/ll/CompilerSpecific.h" #include <vector> #include <algorithm> #include <set> #include "Galois/Mem.h" namespace Galois { /** * Thread-safe ordered set. Faster than STL heap operations (about 10%-15% faster on serially) and * can use scalable allocation, e.g., {@link GFixedAllocator}. */ template <typename T, typename Cmp=std::less<T>, typename Alloc=Galois::GFixedAllocator<T> > class ThreadSafeOrderedSet { typedef std::set<T, Cmp, Alloc> Set; public: typedef Set container_type; typedef typename container_type::value_type value_type; typedef typename container_type::reference reference; typedef typename container_type::const_reference const_reference; typedef typename container_type::pointer pointer; typedef typename container_type::size_type size_type; typedef typename container_type::const_iterator iterator; typedef typename container_type::const_iterator const_iterator; typedef typename container_type::const_reverse_iterator reverse_iterator; typedef typename container_type::const_reverse_iterator const_reverse_iterator; typedef Galois::Runtime::LL::SimpleLock<true> Lock_ty; private: GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE Lock_ty mutex; Set orderedSet; public: explicit ThreadSafeOrderedSet(const Cmp& cmp=Cmp(), const Alloc& alloc=Alloc()): orderedSet(cmp, alloc) {} template <typename Iter> ThreadSafeOrderedSet(Iter b, Iter e, const Cmp& cmp=Cmp(), const Alloc& alloc=Alloc()) : orderedSet(cmp, alloc) { for (; b != e; ++b) { orderedSet.insert(*b); } } bool empty() const { mutex.lock(); bool ret = orderedSet.empty(); mutex.unlock(); return ret; } size_type size() const { mutex.lock(); size_type sz = orderedSet.size(); mutex.unlock(); return sz; } value_type top() const { mutex.lock(); value_type x = *orderedSet.begin(); mutex.unlock(); return x; } bool find(const value_type& x) const { mutex.lock(); bool ret = (orderedSet.find(x) != orderedSet.end()); mutex.unlock(); return ret; } void push(const value_type& x) { mutex.lock(); orderedSet.insert(x); mutex.unlock(); } value_type pop() { mutex.lock(); value_type x = *orderedSet.begin(); orderedSet.erase(orderedSet.begin()); mutex.unlock(); return x; } bool remove(const value_type& x) { mutex.lock(); bool ret = false; if (x == *orderedSet.begin()) { orderedSet.erase(orderedSet.begin()); ret = true; } else { size_type s = orderedSet.erase(x); ret = (s > 0); } mutex.unlock(); return ret; } const_iterator begin() const { return orderedSet.begin(); } const_iterator end() const { return orderedSet.end(); } }; template <typename T, typename Cmp=std::less<T>, typename Cont=std::vector<T> > class MinHeap { public: typedef Cont container_type; typedef typename container_type::value_type value_type; typedef typename container_type::reference reference; typedef typename container_type::const_reference const_reference; typedef typename container_type::pointer pointer; typedef typename container_type::size_type size_type; typedef typename container_type::const_iterator iterator; typedef typename container_type::const_iterator const_iterator; typedef typename container_type::const_reverse_iterator reverse_iterator; typedef typename container_type::const_reverse_iterator const_reverse_iterator; // typedef typename container_type::const_iterator iterator; protected: struct RevCmp { Cmp cmp; explicit RevCmp(const Cmp& cmp): cmp(cmp) {} bool operator()(const T& left, const T& right) const { return !cmp(left, right); } }; Cont container; RevCmp revCmp; const_reference top_internal() const { assert(!container.empty()); return container.front(); } value_type pop_internal() { assert(!container.empty()); std::pop_heap(container.begin(), container.end(), revCmp); value_type x = container.back(); container.pop_back(); return x; } public: explicit MinHeap(const Cmp& cmp=Cmp(), const Cont& container=Cont()) : container(container), revCmp(cmp) {} template <typename Iter> MinHeap(Iter b, Iter e, const Cmp& cmp=Cmp()) : container(b, e), revCmp(cmp) { std::make_heap(container.begin(), container.end()); } bool empty() const { return container.empty(); } size_type size() const { return container.size(); } const_reference top() const { return container.front(); } void push(const value_type& x) { container.push_back(x); std::push_heap(container.begin(), container.end(), revCmp); } value_type pop() { assert(!container.empty()); std::pop_heap(container.begin(), container.end(), revCmp); value_type x = container.back(); container.pop_back(); return x; } bool remove(const value_type& x) { bool ret = false; // TODO: write a better remove method if (x == top()) { pop(); ret = true; } else { typename container_type::iterator nend = std::remove(container.begin(), container.end(), x); ret = (nend != container.end()); container.erase(nend, container.end()); std::make_heap(container.begin(), container.end(), revCmp); } return ret; } bool find(const value_type& x) const { return (std::find(begin(), end(), x) != end()); } const_iterator begin() const { return container.begin(); } const_iterator end() const { return container.end(); } void reserve(size_type s) { container.reserve(s); } }; /** * Thread-safe min heap. */ template <typename T, typename Cmp=std::less<T> > class ThreadSafeMinHeap { public: typedef MinHeap<T, Cmp> container_type; typedef typename container_type::value_type value_type; typedef typename container_type::reference reference; typedef typename container_type::const_reference const_reference; typedef typename container_type::pointer pointer; typedef typename container_type::size_type size_type; typedef typename container_type::const_iterator iterator; typedef typename container_type::const_iterator const_iterator; typedef typename container_type::const_reverse_iterator reverse_iterator; typedef typename container_type::const_reverse_iterator const_reverse_iterator; protected: typedef Galois::Runtime::LL::SimpleLock<true> Lock_ty; GALOIS_ATTRIBUTE_ALIGN_CACHE_LINE Lock_ty mutex; container_type heap; public: explicit ThreadSafeMinHeap(const Cmp& cmp=Cmp()) : heap(cmp) {} template <typename Iter> ThreadSafeMinHeap(Iter b, Iter e, const Cmp& cmp=Cmp()) : heap(b, e, cmp) {} bool empty() const { mutex.lock(); bool ret = heap.empty(); mutex.unlock(); return ret; } size_type size() const { mutex.lock(); size_type sz = heap.size(); mutex.unlock(); return sz; } // can't return a reference, because the reference may not be pointing // to a valid location due to vector doubling in size and moving to // another memory location value_type top() const { mutex.lock(); value_type x = heap.top(); mutex.unlock(); return x; } void push(const value_type& x) { mutex.lock(); heap.push(x); mutex.unlock(); } value_type pop() { mutex.lock(); value_type x = heap.pop(); mutex.unlock(); return x; } bool remove(const value_type& x) { // TODO: write a better remove method mutex.lock(); bool ret = heap.remove(x); mutex.unlock(); return ret; } bool find(const value_type& x) const { mutex.lock(); bool ret = heap.find(x); mutex.unlock(); return ret; } // TODO: can't use in parallel context const_iterator begin() const { return heap.begin(); } const_iterator end() const { return heap.end(); } void reserve(size_type s) { heap.reserve(s); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Bag.h
/** Bags -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * Large unordered collections of things. * * @author Donald Nguyen <[email protected]> * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_BAG_H #define GALOIS_BAG_H #include "Galois/config.h" #include "Galois/gstl.h" #include "Galois/Runtime/PerThreadStorage.h" #include "Galois/Runtime/ll/gio.h" #include "Galois/Runtime/mm/Mem.h" #include <boost/iterator/iterator_facade.hpp> #include GALOIS_CXX11_STD_HEADER(algorithm) namespace Galois { /** * Bag for only concurrent insertions. This data structure * supports scalable concurrent pushes but reading the bag * can only be done serially. */ template<typename T, unsigned int BlockSize = 0> class InsertBag: private boost::noncopyable { struct header { header* next; T* dbegin; //start of interesting data T* dend; //end of valid data T* dlast; //end of storage }; public: template<typename U> class Iterator: public boost::iterator_facade<Iterator<U>, U, boost::forward_traversal_tag> { friend class boost::iterator_core_access; Galois::Runtime::PerThreadStorage<std::pair<header*,header*> >* hd; unsigned int thr; header* p; U* v; bool init_thread() { p = thr < hd->size() ? hd->getRemote(thr)->first : 0; v = p ? p->dbegin : 0; return p; } bool advance_local() { if (p) { ++v; return v != p->dend; } return false; } bool advance_chunk() { if (p) { p = p->next; v = p ? p->dbegin : 0; } return p; } void advance_thread() { while (thr < hd->size()) { ++thr; if (init_thread()) return; } } void increment() { if (advance_local()) return; if (advance_chunk()) return; advance_thread(); } template<typename OtherTy> bool equal(const Iterator<OtherTy>& o) const { return hd == o.hd && thr == o.thr && p == o.p && v == o.v; } U& dereference() const { return *v; } public: Iterator(): hd(0), thr(0), p(0), v(0) { } template<typename OtherTy> Iterator(const Iterator<OtherTy>& o): hd(o.hd), thr(o.thr), p(o.p), v(o.v) { } Iterator(Galois::Runtime::PerThreadStorage<std::pair<header*,header*> >* h, unsigned t): hd(h), thr(t), p(0), v(0) { // find first valid item if (!init_thread()) advance_thread(); } }; private: Galois::Runtime::MM::FixedSizeAllocator heap; Galois::Runtime::PerThreadStorage<std::pair<header*,header*> > heads; void insHeader(header* h) { std::pair<header*,header*>& H = *heads.getLocal(); if (H.second) { H.second->next = h; H.second = h; } else { H.first = H.second = h; } } header* newHeaderFromAllocator(void *m, unsigned size) { header* H = new (m) header(); int offset = 1; if (sizeof(T) < sizeof(header)) offset += sizeof(header)/sizeof(T); T* a = reinterpret_cast<T*>(m); H->dbegin = &a[offset]; H->dend = H->dbegin; H->dlast = &a[(size / sizeof(T))]; H->next = 0; return H; } header* newHeader() { if (BlockSize) { return newHeaderFromAllocator(heap.allocate(BlockSize), BlockSize); } else { return newHeaderFromAllocator(Galois::Runtime::MM::pageAlloc(), Galois::Runtime::MM::pageSize); } } void destruct() { for (unsigned x = 0; x < heads.size(); ++x) { std::pair<header*,header*>& hpair = *heads.getRemote(x); header*& h = hpair.first; while (h) { uninitialized_destroy(h->dbegin, h->dend); header* h2 = h; h = h->next; if (BlockSize) heap.deallocate(h2); else Galois::Runtime::MM::pageFree(h2); } hpair.second = 0; } } public: // static_assert(BlockSize == 0 || BlockSize >= (2 * sizeof(T) + sizeof(header)), // "BlockSize should larger than sizeof(T) + O(1)"); InsertBag(): heap(BlockSize) { } ~InsertBag() { destruct(); } void clear() { destruct(); } typedef T value_type; typedef const T& const_reference; typedef T& reference; typedef Iterator<T> iterator; typedef Iterator<const T> const_iterator; typedef iterator local_iterator; iterator begin() { return iterator(&heads, 0); } iterator end() { return iterator(&heads, heads.size()); } const_iterator begin() const { return const_iterator(&heads, 0); } const_iterator end() const { return const_iterator(&heads, heads.size()); } local_iterator local_begin() { return local_iterator(&heads, Galois::Runtime::LL::getTID()); } local_iterator local_end() { return local_iterator(&heads, Galois::Runtime::LL::getTID() + 1); } bool empty() const { for (unsigned x = 0; x < heads.size(); ++x) { header* h = heads.getRemote(x)->first; if (h) return false; } return true; } //! Thread safe bag insertion template<typename... Args> reference emplace(Args&&... args) { header* H = heads.getLocal()->second; T* rv; if (!H || H->dend == H->dlast) { H = newHeader(); insHeader(H); } rv = new (H->dend) T(std::forward<Args>(args)...); H->dend++; return *rv; } //! Thread safe bag insertion reference push(const T& val) { return emplace(val); } //! Thread safe bag insertion reference push(T&& val) { return emplace(std::move(val)); } //! Thread safe bag insertion reference push_back(const T& val) { return emplace(val); } //! Thread safe bag insertion reference push_back(T&& val) { return emplace(std::move(val)); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/Version.h
/** Version -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_VERSION_H #define GALOIS_VERSION_H #include "Galois/config.h" #ifdef GALOIS_USE_SVNVERSION #include "Galois/svnversion.h" #else #define GALOIS_SVNVERSION 0 #endif #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/gslist.h
/** Low-space overhead list -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * Container for when you want to minimize meta-data overhead but still * want a custom allocator. * * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_GSLIST_H #define GALOIS_GSLIST_H #include "Galois/Runtime/mm/Mem.h" #include "Galois/FixedSizeRing.h" #include <iterator> namespace Galois { //! Singly linked list. To conserve space, allocator is maintained //! external to the list. template<typename T, int ChunkSize=16> class gslist { struct Block: public FixedSizeRing<T,ChunkSize> { Block* next; Block(): next() {} }; Block* first; template<typename HeapTy> Block* alloc_block(HeapTy& heap) { return new (heap.allocate(sizeof(Block))) Block(); } template<typename HeapTy> void free_block(HeapTy& heap, Block* b) { b->~Block(); heap.deallocate(b); } template<typename HeapTy> void extend_first(HeapTy& heap) { Block* b = alloc_block(heap); b->next = first; first = b; } template<typename HeapTy> void shrink_first(HeapTy& heap) { Block* b = first; first = b->next; free_block(heap, b); } public: //! External allocator must be able to allocate this type typedef Block block_type; typedef T value_type; gslist(): first() { } ~gslist() { assert(empty() && "Memory leak if gslist is not empty before destruction"); } class iterator : public std::iterator<std::forward_iterator_tag, T> { Block* b; unsigned offset; void advance() { if (!b) return; ++offset; if (offset == b->size()) { b = b->next; offset = 0; } } public: iterator(Block* _b = 0, unsigned _off = 0): b(_b), offset(_off) {} bool operator==(const iterator& rhs) const { return b == rhs.b && offset == rhs.offset; } bool operator!=(const iterator& rhs) const { return b != rhs.b || offset != rhs.offset; } T& operator*() const { return b->getAt(offset); } iterator& operator++() { advance(); return *this; } iterator operator++(int) { iterator tmp(*this); advance(); return tmp; } }; iterator begin() const { return iterator(first); } iterator end() const { return iterator(); } bool empty() const { return first == NULL; } value_type& front() { return first->front(); } template<typename HeapTy> void push_front(HeapTy& heap, const value_type& v) { if (first && first->push_front(v)) return; extend_first(heap); first->push_front(v); } template<typename HeapTy> void pop_front(HeapTy& heap) { first->pop_front(); if (first->empty()) shrink_first(heap); } template<typename HeapTy> void clear(HeapTy& heap) { while (first) { first->clear(); shrink_first(heap); } } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/FixedSizeRing.h
/** Fixed-size ring buffer -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_FIXEDSIZERING_H #define GALOIS_FIXEDSIZERING_H #include "Galois/config.h" #include "Galois/optional.h" #include "Galois/LazyArray.h" #include <boost/iterator/iterator_facade.hpp> #include <boost/utility.hpp> #include GALOIS_CXX11_STD_HEADER(utility) namespace Galois { //! Unordered collection of bounded size template<typename T, unsigned chunksize = 64> class FixedSizeBag: private boost::noncopyable { LazyArray<T, chunksize> datac; unsigned count; T* at(unsigned i) { return &datac[i]; } const T* at(unsigned i) const { return &datac[i]; } bool precondition() const { return count <= chunksize; } public: typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef pointer iterator; typedef const_pointer const_iterator; FixedSizeBag(): count(0) { } ~FixedSizeBag() { clear(); } unsigned size() const { assert(precondition()); return count; } bool empty() const { assert(precondition()); return count == 0; } bool full() const { assert(precondition()); return count == chunksize; } void clear() { assert(precondition()); for (unsigned x = 0; x < count; ++x) datac.destroy(x); count = 0; } template<typename U> pointer push_front(U&& val) { return push_back(std::forward<U>(val)); } template<typename... Args> pointer emplace_front(Args&&... args) { return emplace_back(std::forward<Args>(args)...); } template<typename U> pointer push_back(U&& val) { if (full()) return 0; unsigned end = count; ++count; return datac.construct(end, std::forward<U>(val)); } template<typename... Args> pointer emplace_back(Args&&... args) { if (full()) return 0; unsigned end = count; ++count; return datac.emplace(end, std::forward<Args>(args)...); } reference front() { return back(); } const_reference front() const { return back(); } Galois::optional<value_type> extract_front() { return extract_back(); } void pop_front() { pop_back(); } reference back() { assert(precondition()); assert(!empty()); return *at(count - 1); } const_reference back() const { assert(precondition()); assert(!empty()); return *at(count - 1); } Galois::optional<value_type> extract_back() { if (!empty()) { Galois::optional<value_type> retval(back()); pop_back(); return retval; } return Galois::optional<value_type>(); } void pop_back() { assert(precondition()); assert(!empty()); unsigned end = (count - 1); datac.destroy(end); --count; } iterator begin() { return &datac[0]; } iterator end() { return &datac[count]; } const_iterator begin() const { return &datac[0]; } const_iterator end() const { return &datac[count]; } }; //! Ordered collection of bounded size template<typename T, unsigned chunksize = 64> class FixedSizeRing: private boost::noncopyable { LazyArray<T, chunksize> datac; unsigned start; unsigned count; T* at(unsigned i) { return &datac[i]; } const T* at(unsigned i) const { return &datac[i]; } bool precondition() const { return count <= chunksize && start <= chunksize; } template<typename U, bool isForward> class Iterator: public boost::iterator_facade<Iterator<U,isForward>, U, boost::forward_traversal_tag> { friend class boost::iterator_core_access; U* base; unsigned cur; unsigned count; template<typename OtherTy, bool OtherIsForward> bool equal(const Iterator<OtherTy, OtherIsForward>& o) const { return base + cur == o.base + o.cur && count == o.count; } U& dereference() const { return base[cur]; } void increment() { if (--count == 0) { base = 0; cur = 0; } else { cur = isForward ? (cur + 1) % chunksize : (cur + chunksize - 1) % chunksize; } } public: Iterator(): base(0), cur(0), count(0) { } template<typename OtherTy, bool OtherIsForward> Iterator(const Iterator<OtherTy, OtherIsForward>& o): base(o.base), cur(o.cur), count(o.count) { } Iterator(U* b, unsigned c, unsigned co): base(b), cur(c), count(co) { if (count == 0) { base = 0; cur = 0; } } }; public: typedef T value_type; typedef T* pointer; typedef T& reference; typedef const T& const_reference; typedef Iterator<T, true> iterator; typedef Iterator<const T, true> const_iterator; typedef Iterator<T, false> reverse_iterator; typedef Iterator<const T, false> const_reverse_iterator; FixedSizeRing(): start(0), count(0) { } ~FixedSizeRing() { clear(); } unsigned size() const { assert(precondition()); return count; } bool empty() const { assert(precondition()); return count == 0; } bool full() const { assert(precondition()); return count == chunksize; } reference getAt(unsigned x) { assert(precondition()); assert(!empty()); return *at((start + x) % chunksize); } const_reference getAt(unsigned x) const { assert(precondition()); assert(!empty()); return *at((start + x) % chunksize); } void clear() { assert(precondition()); for (unsigned x = 0; x < count; ++x) datac.destroy((start + x) % chunksize); count = 0; start = 0; } template<typename U> pointer push_front(U&& val) { if (full()) return 0; start = (start + chunksize - 1) % chunksize; ++count; return datac.construct(start, std::forward<U>(val)); } template<typename... Args> pointer emplace_front(Args&&... args) { if (full()) return 0; start = (start + chunksize - 1) % chunksize; ++count; return datac.emplace(start, std::forward<Args>(args)...); } template<typename U> pointer push_back(U&& val) { if (full()) return 0; unsigned end = (start + count) % chunksize; ++count; return datac.construct(end, std::forward<U>(val)); } template<typename... Args> pointer emplace_back(Args&&... args) { if (full()) return 0; unsigned end = (start + count) % chunksize; ++count; return datac.emplace(end, std::forward<Args>(args)...); } reference front() { assert(precondition()); assert(!empty()); return *at(start); } const_reference front() const { assert(precondition()); assert(!empty()); return *at(start); } Galois::optional<value_type> extract_front() { if (!empty()) { Galois::optional<value_type> retval(front()); pop_front(); return retval; } return Galois::optional<value_type>(); } void pop_front() { assert(precondition()); assert(!empty()); datac.destroy(start); start = (start + 1) % chunksize; --count; } reference back() { assert(precondition()); assert(!empty()); return *at((start + count - 1) % chunksize); } const_reference back() const { assert(precondition()); assert(!empty()); return *at((start + count - 1) % chunksize); } Galois::optional<value_type> extract_back() { if (!empty()) { Galois::optional<value_type> retval(back()); pop_back(); return retval; } return Galois::optional<value_type>(); } void pop_back() { assert(precondition()); assert(!empty()); unsigned end = (start + count - 1) % chunksize; datac.destroy(end); --count; } iterator begin() { return iterator(&datac[0], start, count); } iterator end() { return iterator(); } const_iterator begin() const { return const_iterator(&datac[0], start, count); } const_iterator end() const { return const_iterator(); } reverse_iterator rbegin() { return reverse_iterator(&datac[0], (start + count - 1) % chunksize, count); } reverse_iterator rend() { return reverse_iterator(); } const_iterator rbegin() const { const_reverse_iterator(&datac[0], (start + count - 1) % chunksize, count); } const_iterator rend() const { const_reverse_iterator(); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/gdeque.h
/** deque like structure with scalable allocator usage -*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @author Andrew Lenharth <[email protected]> */ #ifndef GALOIS_GDEQUE_H #define GALOIS_GDEQUE_H #include "Galois/config.h" #include "Galois/FixedSizeRing.h" #include "Galois/Runtime/mm/Mem.h" #include <boost/iterator/iterator_facade.hpp> #include GALOIS_CXX11_STD_HEADER(algorithm) #include GALOIS_CXX11_STD_HEADER(utility) namespace Galois { //! Like std::deque but use Galois memory management functionality template<typename T, unsigned ChunkSize=64, typename ContainerTy=FixedSizeRing<T, ChunkSize> > class gdeque: private boost::noncopyable { protected: struct Block: ContainerTy { Block* next; Block* prev; Block(): next(), prev() {} }; Block* first; private: Block* last; unsigned num; Galois::Runtime::MM::FixedSizeAllocator heap; Block* alloc_block() { return new (heap.allocate(sizeof(Block))) Block(); } bool precondition() const { return (num == 0 && first == NULL && last == NULL) || (num > 0 && first != NULL && last != NULL); } void free_block(Block* b) { b->~Block(); heap.deallocate(b); } void extend_first() { Block* b = alloc_block(); b->next = first; if (b->next) b->next->prev = b; first = b; if (!last) last = b; } void extend_last() { Block* b = alloc_block(); b->prev = last; if (b->prev) b->prev->next = b; last = b; if (!first) first = b; } void shrink_first() { Block* b = first; first = b->next; if (b->next) b->next->prev = 0; else last = 0; free_block(b); } void shrink_last() { Block* b = last; last = b->prev; if (b->prev) b->prev->next = 0; else first = 0; free_block(b); } public: template<typename U> struct Iterator: public boost::iterator_facade<Iterator<U>, U, boost::forward_traversal_tag> { friend class boost::iterator_core_access; Block* b; unsigned offset; private: void increment() { if (!b) return; ++offset; if (offset == b->size()) { b = b->next; offset = 0; } } template<typename OtherTy> bool equal(const Iterator<OtherTy>& o) const { return b == o.b && offset == o.offset; } U& dereference() const { return b->getAt(offset); } public: Iterator(Block* _b = 0, unsigned _off = 0) :b(_b), offset(_off) { } template<typename OtherTy> Iterator(const Iterator<OtherTy>& o): b(o.b), offset(o.offset) { } }; typedef T value_type; typedef T* pointer; typedef T& reference; typedef const T& const_reference; typedef Iterator<T> iterator; typedef Iterator<const T> const_iterator; gdeque(): first(), last(), num(), heap(sizeof(Block)) { } ~gdeque() { clear(); } iterator begin() { assert(precondition()); return iterator(first); } iterator end() { assert(precondition()); return iterator(); } const_iterator begin() const { assert(precondition()); return const_iterator(first); } const_iterator end() const { assert(precondition()); return const_iterator(); } size_t size() const { assert(precondition()); return num; } bool empty() const { assert(precondition()); return num == 0; } reference front() { assert(!empty()); return first->front(); } const_reference front() const { assert(!empty()); return first->front(); } reference back() { assert(!empty()); return last->back(); } const_reference back() const { assert(!empty()); return last->back(); } void pop_back() { assert(!empty()); --num; last->pop_back(); if (last->empty()) shrink_last(); } void pop_front() { assert(!empty()); --num; first->pop_front(); if (first->empty()) shrink_first(); } void clear() { assert(precondition()); Block* b = first; while (b) { b->clear(); Block* old = b; b = b->next; free_block(old); } first = last = NULL; num = 0; } //FIXME: support alternate insert locations iterator insert(iterator position, size_t n, const value_type& val) { assert(position == end()); if (!n) return end(); push_back(val); iterator retval = iterator(last, last->size()-1); for (size_t x = 1; x < n; ++x) push_back(val); return retval; } template<typename... Args> void emplace_back(Args&&... args) { assert(precondition()); ++num; if (last && last->emplace_back(std::forward<Args>(args)...)) return; extend_last(); pointer p = last->emplace_back(std::forward<Args>(args)...); assert(p); } void push_back(value_type&& v) { emplace_back(std::move(v)); } void push_back(const value_type& v) { emplace_back(v); } template<typename... Args> void emplace_front(Args&&... args) { assert(precondition()); ++num; if (first && first->emplace_front(std::forward<Args>(args)...)) return; extend_first(); pointer p = first->emplace_front(std::forward<Args>(args)...); assert(p); } void push_front(value_type&& v) { emplace_front(std::move(v)); } void push_front(const value_type& v) { emplace_front(v); } }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/TwoLevelIteratorA.h
/** Two Level Iterator-*- C++ -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * @author <[email protected]> * @author Donald Nguyen <[email protected]> */ #ifndef GALOIS_TWOLEVELITERATORA_H #define GALOIS_TWOLEVELITERATORA_H #include "Galois/config.h" #include "Galois/Runtime/ll/gio.h" #include <boost/iterator/iterator_adaptor.hpp> #include <cassert> #include <iterator> #include GALOIS_CXX11_STD_HEADER(type_traits) #include GALOIS_CXX11_STD_HEADER(utility) namespace Galois { /** * Alternate implementation of {@link ChooseTwoLevelIterator}. */ template<class OuterIter, class InnerIter, class CategoryOrTraversal, class InnerBeginFn, class InnerEndFn> class TwoLevelIteratorA : public boost::iterator_adaptor< TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn>, InnerIter, boost::use_default, CategoryOrTraversal > { public: typedef typename TwoLevelIteratorA::iterator_adaptor_::difference_type difference_type; private: OuterIter m_outer_begin; // TODO could skip this field when modeling a forward iterator OuterIter m_outer_end; OuterIter m_outer; InnerBeginFn m_inner_begin_fn; InnerEndFn m_inner_end_fn; #if __cplusplus >= 201103L static_assert(std::is_convertible<InnerIter, typename std::result_of<InnerBeginFn(decltype(*std::declval<OuterIter>()))>::type>::value, "InnerIter should be convertable to result of InnerBeginFn(*OuterIter)"); static_assert(std::is_convertible<InnerIter, typename std::result_of<InnerEndFn(decltype(*std::declval<OuterIter>()))>::type>::value, "InnerIter should be convertable to result of InnerEndFn(*OuterIter)"); #endif friend class boost::iterator_core_access; /** * Update base iterator to beginning of first non-empty inner range after * current one. Also update outer iterators appropriately. */ void seek_forward() { if (this->base_reference() != m_inner_end_fn(*m_outer)) return; ++m_outer; for (; m_outer != m_outer_end; ++m_outer) { this->base_reference() = m_inner_begin_fn(*m_outer); if (this->base_reference() != m_inner_end_fn(*m_outer)) break; } } template<class Iter> void safe_decrement_dispatch(std::forward_iterator_tag, Iter& it, Iter begin) { Iter prev = begin; for (; begin != it; ++begin) prev = begin; return prev; } template<class Iter> void safe_decrement_dispatch(std::bidirectional_iterator_tag, Iter& it, const Iter& begin) { --it; } //! Decrement iterator or return true if it == begin. template<class Iter> bool safe_decrement(Iter& it, const Iter& begin) { if (it == begin) return true; safe_decrement_dispatch(typename std::iterator_traits<Iter>::iterator_category(), it, begin); return false; } template<class Iter> typename std::iterator_traits<Iter>::difference_type safe_difference_dispatch(Iter it1, Iter it2, Iter end, std::input_iterator_tag) const { if (it1 == it2) return 0; Iter it1_orig(it1); Iter it2_orig(it2); typename std::iterator_traits<Iter>::difference_type count1 = 0; typename std::iterator_traits<Iter>::difference_type count2 = 0; while (true) { if (it1 != end) { ++count1; if (++it1 == it2_orig) return count1; } if (it2 != end) { ++count2; if (++it2 == it1_orig) return -count2; } } } template<class Iter> typename std::iterator_traits<Iter>::difference_type safe_difference_dispatch(Iter it1, Iter it2, Iter end, std::random_access_iterator_tag) const { return std::distance(it1, it2); } /** * Returns correct distances even for forward iterators when it2 is not * reachable from it1. */ template<class Iter> typename std::iterator_traits<Iter>::difference_type safe_distance(Iter it1, Iter it2, Iter end) const { return safe_difference_dispatch(it1, it2, end, typename std::iterator_traits<Iter>::iterator_category()); } /** * Update base iterator to end of first non-empty inner range before current * one. Also update outer iterators appropriately. */ void seek_backward() { InnerIter end; for (end = m_inner_end_fn(*m_outer); m_inner_begin_fn(*m_outer) == end; ) { bool too_far = safe_decrement(m_outer, m_outer_begin); assert(!too_far); end = m_inner_end_fn(*m_outer); } this->base_reference() = end; } void increment() { ++this->base_reference(); seek_forward(); } void decrement() { if (m_outer == m_outer_end) { bool too_far = safe_decrement(m_outer, m_outer_begin); assert(!too_far); seek_backward(); } else if (!safe_decrement(this->base_reference(), m_inner_begin_fn(*m_outer))) { // Common case return; } else { // Reached end of inner range bool too_far = safe_decrement(m_outer, m_outer_begin); assert(!too_far); seek_backward(); } bool too_far = safe_decrement(this->base_reference(), m_inner_begin_fn(*m_outer)); assert(!too_far); } template<class DiffType = difference_type> void advance_dispatch(DiffType n, std::input_iterator_tag) { if (n < 0) { for (; n; ++n) decrement(); } else if (n > 0) { for (; n; --n) increment(); } } template<class DiffType = difference_type> void jump_forward(DiffType n) { while (n) { difference_type k = std::distance(this->base_reference(), m_inner_end_fn(*m_outer)); difference_type s = std::min(k, n); n -= s; std::advance(this->base_reference(), s); if (s == k) seek_forward(); } } template<class DiffType = difference_type> void jump_backward(DiffType n) { // Note: not the same as jump_forward due to difference between beginning // and end of ranges if (n && m_outer == m_outer_end) { decrement(); --n; } while (n) { difference_type k = std::distance(m_inner_begin_fn(*m_outer), this->base_reference()); if (k == 0) { decrement(); --n; } else if (k <= n) { std::advance(this->base_reference(), -n); n = 0; } else { seek_backward(); n -= k; } } } template<class DiffType = difference_type> void advance_dispatch(DiffType n, std::random_access_iterator_tag) { if (n < 0) { jump_backward(-n); } else if (n > 0) { jump_forward(n); } } void advance(difference_type n) { advance_dispatch(n, typename std::iterator_traits<InnerIter>::iterator_category()); } template<class Other> difference_type distance_to_dispatch(Other it2, std::input_iterator_tag) const { // Inline safe_distance here otherwise there is a cyclic dependency: // std::distance -> iterator_adaptor -> distance_to -> safe_distance -> std::distance if (*this == it2) return 0; TwoLevelIteratorA it1(*this); TwoLevelIteratorA it2_orig(it2); difference_type count1 = 0; difference_type count2 = 0; while (true) { if (it1.m_outer != it1.m_outer_end) { ++count1; if (++it1 == it2_orig) return count1; } if (it2.m_outer != it2.m_outer_end) { ++count2; if (++it2 == *this) return -count2; } } } template<class Other> difference_type distance_to_dispatch(const Other& x, std::random_access_iterator_tag) const { if (*this == x) return 0; else if (m_outer == x.m_outer) return safe_distance(this->base_reference(), x.base_reference(), m_inner_end_fn(*m_outer)); else if (safe_distance(m_outer, x.m_outer, m_outer_end) < 0) return -x.distance_to(*this); difference_type me_count = 0; TwoLevelIteratorA me(*this); while (me.m_outer != me.m_outer_end) { difference_type d; if (me.m_outer != x.m_outer) d = std::distance(me.base_reference(), me.m_inner_end_fn(*me.m_outer)); else d = std::distance(me.base_reference(), x.base_reference()); me_count += d; std::advance(me, d); if (me == x) return me_count; } GALOIS_DIE("invalid iterator ", std::distance(m_outer, x.m_outer)); return 0; } template<class OtherOuterIter, class OtherInnerIter, class C, class BF, class EF> difference_type distance_to(const TwoLevelIteratorA<OtherOuterIter, OtherInnerIter, C, BF, EF>& x) const { return distance_to_dispatch(x, typename std::iterator_traits<InnerIter>::iterator_category()); } template<class OtherOuterIter, class OtherInnerIter, class C, class BF, class EF> bool equal(const TwoLevelIteratorA<OtherOuterIter, OtherInnerIter, C, BF, EF>& x) const { // All outer_end iterators are equal if (m_outer == m_outer_end && m_outer == x.m_outer) return true; return this->base_reference() == x.base_reference() && m_outer == x.m_outer; } public: TwoLevelIteratorA() { } TwoLevelIteratorA( OuterIter outer_begin, OuterIter outer_end, OuterIter outer, InnerBeginFn inner_begin_fn, InnerEndFn inner_end_fn): m_outer_begin(outer_begin), m_outer_end(outer_end), m_outer(outer), m_inner_begin_fn(inner_begin_fn), m_inner_end_fn(inner_end_fn) { if (m_outer != m_outer_end) { this->base_reference() = m_inner_begin_fn(*m_outer); seek_forward(); } } }; //! Helper functor, returns <code>t.end()</code> struct GetBegin { template<class T> auto operator()(T&& x) const -> decltype(std::forward<T>(x).begin()) { return std::forward<T>(x).begin(); } }; //! Helper functor, returns <code>t.end()</code> struct GetEnd { template<class T> auto operator()(T&& x) const -> decltype(std::forward<T>(x).end()) { return std::forward<T>(x).end(); } }; #if __cplusplus >= 201103L template< class CategoryOrTraversal = std::forward_iterator_tag, class OuterIter, class InnerIter = decltype(std::declval<OuterIter>()->begin()), class InnerBeginFn = GetBegin, class InnerEndFn = GetEnd, class Iter = TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn> > std::pair<Iter,Iter> make_two_level_iterator(OuterIter outer_begin, OuterIter outer_end) { return std::make_pair( Iter(outer_begin, outer_end, outer_begin, InnerBeginFn(), InnerEndFn()), Iter(outer_begin, outer_end, outer_end, InnerBeginFn(), InnerEndFn())); } #else // XXX(ddn): More direct encoding crashes XL 12.1, so lean towards more verbose types template< class CategoryOrTraversal, class OuterIter, class InnerIter, class InnerBeginFn, class InnerEndFn > std::pair< TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn>, TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn> > make_two_level_iterator(OuterIter outer_begin, OuterIter outer_end) { return std::make_pair( TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn> (outer_begin, outer_end, outer_begin, InnerBeginFn(), InnerEndFn()), TwoLevelIteratorA<OuterIter, InnerIter, CategoryOrTraversal, InnerBeginFn, InnerEndFn> (outer_begin, outer_end, outer_end, InnerBeginFn(), InnerEndFn())); } #endif } // end namespace Galois #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/ParallelSTL/ParallelSTL.h
/** Parallel STL equivalents -*- C++ -*- * @file * This is the only file to include for basic Galois functionality. * * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2012, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #ifndef GALOIS_PARALLELSTL_PARALLELSTL_H #define GALOIS_PARALLELSTL_PARALLELSTL_H #include "Galois/UserContext.h" #include "Galois/NoDerefIterator.h" #include "Galois/WorkList/WorkList.h" #include "Galois/Runtime/ParallelWork.h" #include "Galois/Runtime/DoAll.h" namespace Galois { //! Parallel versions of STL library algorithms. namespace ParallelSTL { template<typename Predicate> struct count_if_helper { Predicate f; ptrdiff_t ret; count_if_helper(Predicate p): f(p), ret(0) { } template<typename T> void operator()(const T& v) { if (f(v)) ++ret; } }; struct count_if_reducer { template<typename CIH> void operator()(CIH& dest, const CIH& src) { dest.ret += src.ret; } }; template<class InputIterator, class Predicate> ptrdiff_t count_if(InputIterator first, InputIterator last, Predicate pred) { return Runtime::do_all_impl(Runtime::makeStandardRange(first, last), count_if_helper<Predicate>(pred), count_if_reducer(), "count_if").ret; } template<typename InputIterator, class Predicate> struct find_if_helper { typedef int tt_does_not_need_stats; typedef int tt_does_not_need_push; typedef int tt_does_not_need_aborts; typedef int tt_needs_parallel_break; typedef Galois::optional<InputIterator> ElementTy; typedef Runtime::PerThreadStorage<ElementTy> AccumulatorTy; AccumulatorTy& accum; Predicate& f; find_if_helper(AccumulatorTy& a, Predicate& p): accum(a), f(p) { } void operator()(const InputIterator& v, UserContext<InputIterator>& ctx) { if (f(*v)) { *accum.getLocal() = v; ctx.breakLoop(); } } }; template<class InputIterator, class Predicate> InputIterator find_if(InputIterator first, InputIterator last, Predicate pred) { typedef find_if_helper<InputIterator,Predicate> HelperTy; typedef typename HelperTy::AccumulatorTy AccumulatorTy; typedef Galois::WorkList::dChunkedFIFO<256> WL; AccumulatorTy accum; HelperTy helper(accum, pred); Runtime::for_each_impl<WL>(Runtime::makeStandardRange( make_no_deref_iterator(first), make_no_deref_iterator(last)), helper, 0); for (unsigned i = 0; i < accum.size(); ++i) { if (*accum.getRemote(i)) return **accum.getRemote(i); } return last; } template<class Iterator> Iterator choose_rand(Iterator first, Iterator last) { size_t dist = std::distance(first,last); if (dist) std::advance(first, rand() % dist); return first; } template<class Compare> struct sort_helper { typedef int tt_does_not_need_aborts; Compare comp; //! Not equal in terms of less-than template<class value_type> struct neq_to: public std::binary_function<value_type,value_type,bool> { Compare comp; neq_to(Compare c): comp(c) { } bool operator()(const value_type& a, const value_type& b) const { return comp(a, b) || comp(b, a); } }; sort_helper(Compare c): comp(c) { } template <class RandomAccessIterator, class Context> void operator()(std::pair<RandomAccessIterator,RandomAccessIterator> bounds, Context& ctx) { if (std::distance(bounds.first, bounds.second) <= 1024) { std::sort(bounds.first, bounds.second, comp); } else { typedef typename std::iterator_traits<RandomAccessIterator>::value_type VT; RandomAccessIterator pivot = choose_rand(bounds.first, bounds.second); VT pv = *pivot; pivot = std::partition(bounds.first, bounds.second, std::bind(comp, std::placeholders::_1, pv)); //push the lower bit if (bounds.first != pivot) ctx.push(std::make_pair(bounds.first, pivot)); //adjust the upper bit pivot = std::find_if(pivot, bounds.second, std::bind(neq_to<VT>(comp), std::placeholders::_1, pv)); //push the upper bit if (bounds.second != pivot) ctx.push(std::make_pair(pivot, bounds.second)); } } }; template<typename RandomAccessIterator, class Predicate> std::pair<RandomAccessIterator, RandomAccessIterator> dual_partition(RandomAccessIterator first1, RandomAccessIterator last1, RandomAccessIterator first2, RandomAccessIterator last2, Predicate pred) { typedef std::reverse_iterator<RandomAccessIterator> RI; RI first3(last2), last3(first2); while (true) { while (first1 != last1 && pred(*first1)) ++first1; if (first1 == last1) break; while (first3 != last3 && !pred(*first3)) ++first3; if (first3 == last3) break; std::swap(*first1++, *first3++); } return std::make_pair(first1, first3.base()); } template<typename RandomAccessIterator, class Predicate> struct partition_helper { typedef std::pair<RandomAccessIterator, RandomAccessIterator> RP; struct partition_helper_state { RandomAccessIterator first, last; RandomAccessIterator rfirst, rlast; Runtime::LL::SimpleLock<true> Lock; Predicate pred; typename std::iterator_traits<RandomAccessIterator>::difference_type BlockSize() { return 1024; } partition_helper_state(RandomAccessIterator f, RandomAccessIterator l, Predicate p) :first(f), last(l), rfirst(l), rlast(f), pred(p) {} RP takeHigh() { Lock.lock(); unsigned BS = std::min(BlockSize(), std::distance(first,last)); last -= BS; RandomAccessIterator rv = last; Lock.unlock(); return std::make_pair(rv, rv+BS); } RP takeLow() { Lock.lock(); unsigned BS = std::min(BlockSize(), std::distance(first,last)); RandomAccessIterator rv = first; first += BS; Lock.unlock(); return std::make_pair(rv, rv+BS); } void update(RP low, RP high) { Lock.lock(); if (low.first != low.second) { rfirst = std::min(rfirst, low.first); rlast = std::max(rlast, low.second); } if (high.first != high.second) { rfirst = std::min(rfirst, high.first); rlast = std::max(rlast, high.second); } Lock.unlock(); } }; partition_helper(partition_helper_state* s) :state(s) {} partition_helper_state* state; void operator()(unsigned, unsigned) { RP high, low; do { RP parts = dual_partition(low.first, low.second, high.first, high.second, state->pred); low.first = parts.first; high.second = parts.second; if (low.first == low.second) low = state->takeLow(); if (high.first == high.second) high = state->takeHigh(); } while (low.first != low.second && high.first != high.second); state->update(low,high); } }; template<class RandomAccessIterator, class Predicate> RandomAccessIterator partition(RandomAccessIterator first, RandomAccessIterator last, Predicate pred) { if (std::distance(first, last) <= 1024) return std::partition(first, last, pred); typedef partition_helper<RandomAccessIterator, Predicate> P; typename P::partition_helper_state s(first, last, pred); Runtime::on_each_impl(P(&s), 0); if (s.rfirst == first && s.rlast == last) { //perfect ! //abort(); return s.first; } return std::partition(s.rfirst, s.rlast, pred); } struct pair_dist { template<typename RP> bool operator()(const RP& x, const RP& y) { return std::distance(x.first, x.second) > std::distance(y.first, y.second); } }; template <class RandomAccessIterator,class Compare> void sort(RandomAccessIterator first, RandomAccessIterator last, Compare comp) { if (std::distance(first, last) <= 1024) { std::sort(first, last, comp); return; } typedef Galois::WorkList::dChunkedFIFO<1> WL; typedef std::pair<RandomAccessIterator,RandomAccessIterator> Pair; Pair initial[1] = { std::make_pair(first, last) }; Runtime::for_each_impl<WL>(Runtime::makeStandardRange(&initial[0], &initial[1]), sort_helper<Compare>(comp), 0); } template<class RandomAccessIterator> void sort(RandomAccessIterator first, RandomAccessIterator last) { Galois::ParallelSTL::sort(first, last, std::less<typename std::iterator_traits<RandomAccessIterator>::value_type>()); } template<typename T, typename BinOp> struct accumulate_helper { T init; BinOp op; accumulate_helper(T i, BinOp o) :init(i), op(o) {} void operator()(const T& v) { init = op(init,v); } }; template<typename BinOp> struct accumulate_helper_reduce { BinOp op; accumulate_helper_reduce(BinOp o) :op(o) {} template<typename T> void operator()(T& dest, const T& src) const { dest.init = op(dest.init, src.init); } }; template <class InputIterator, class T, typename BinaryOperation> T accumulate (InputIterator first, InputIterator last, T init, BinaryOperation binary_op) { return Runtime::do_all_impl(Runtime::makeStandardRange(first, last), accumulate_helper<T,BinaryOperation>(init, binary_op), accumulate_helper_reduce<BinaryOperation>(binary_op), "accumulate").init; } template<class InputIterator, class T> T accumulate(InputIterator first, InputIterator last, T init) { return accumulate(first, last, init, std::plus<T>()); } template<typename T, typename MapFn, typename ReduceFn> struct map_reduce_helper { T init; MapFn fn; ReduceFn reduce; map_reduce_helper(T i, MapFn fn, ReduceFn reduce) :init(i), fn(fn), reduce(reduce) {} template<typename U> void operator()(U&& v) { init = reduce(fn(std::forward<U>(v)), init); } }; template<class InputIterator, class MapFn, class T, class ReduceFn> T map_reduce(InputIterator first, InputIterator last, MapFn fn, T init, ReduceFn reduce) { return Runtime::do_all_impl(Runtime::makeStandardRange(first, last), map_reduce_helper<T,MapFn,ReduceFn>(init, fn, reduce), accumulate_helper_reduce<ReduceFn>(reduce), "map_reduce").init; } } } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/atomic_internal_gcc_generic.h
namespace detail { template<class _Tp> bool atomic_compare_exchange_strong(volatile _Tp* __a, _Tp* __e, _Tp* __d, std::memory_order _succ, std::memory_order _fail) { static_assert(sizeof(_Tp) <= 8, "Operation undefined on larger types"); return __sync_bool_compare_and_swap(__a, *__e, *__d); } } // end detail template<class _Tp> void __atomic_store(volatile _Tp* __a, _Tp* __i, std::memory_order _m) { switch (_m) { case std::memory_order_relaxed: *__a = *__i; break; default: __sync_synchronize(); *__a = *__i; __sync_synchronize(); break; } } template<class _Tp> void __atomic_load(volatile _Tp* __a, _Tp* __i, std::memory_order _m) { switch (_m) { case std::memory_order_relaxed: *__i = *__a; break; default: __sync_synchronize(); *__i = *__a; __sync_synchronize(); break; } } template<class _Tp> void __atomic_load(volatile const _Tp* __a, _Tp* __i, std::memory_order _m) { switch (_m) { case std::memory_order_relaxed: *__i = *__a; break; default: __sync_synchronize(); *__i = *__a; __sync_synchronize(); break; } } template<class _Tp> bool __atomic_compare_exchange(volatile _Tp* __a, _Tp* __e, _Tp* __d, bool _weak, std::memory_order _succ, std::memory_order _fail) { return detail::atomic_compare_exchange_strong(__a, __e, __d, _succ, _fail); } template<class _Tp> _Tp __atomic_fetch_xor(volatile _Tp* __a, _Tp __i, std::memory_order _m) { return __sync_fetch_and_xor(__a, __i); } template<class _Tp> _Tp __atomic_fetch_or(volatile _Tp* __a, _Tp __i, std::memory_order _m) { return __sync_fetch_and_or(__a, __i); } template<class _Tp> _Tp __atomic_fetch_add(volatile _Tp* __a, _Tp __i, std::memory_order _m) { return __sync_fetch_and_add(__a, __i); } template<class _Tp> _Tp __atomic_exchange(volatile _Tp* __a, _Tp __i, std::memory_order _m) { // XXX built-in assumes memory_order_acquire return __sync_lock_test_and_set(__a, __i); }
0
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/array.h
#ifndef GALOIS_C__11_COMPAT_ARRAY_H #define GALOIS_C__11_COMPAT_ARRAY_H #include <boost/tr1/array.hpp> namespace std { using namespace std::tr1; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/mutex.h
#ifndef GALOIS_C__11_COMPAT_MUTEX_H #define GALOIS_C__11_COMPAT_MUTEX_H namespace std { template<typename _Mutex> class lock_guard { public: typedef _Mutex mutex_type; explicit lock_guard(mutex_type& __m): _M_device(__m) { _M_device.lock(); } ~lock_guard() { _M_device.unlock(); } private: lock_guard(const lock_guard&); lock_guard& operator=(const lock_guard&); mutex_type& _M_device; }; } #endif
0
rapidsai_public_repos/code-share/maxflow/galois/include/Galois
rapidsai_public_repos/code-share/maxflow/galois/include/Galois/c++11-compat/tuple.h
#ifndef GALOIS_C__11_COMPAT_TUPLE_H #define GALOIS_C__11_COMPAT_TUPLE_H #include <boost/tr1/tuple.hpp> namespace std { using namespace std::tr1; } #endif
0