hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794140981b4b53fb9fa8f39ffcf08faa359aa893 | 516 | py | Python | riboraptor/snakemake/wrappers/uniq_mapping_wrapper.py | saketkc/riboraptor | cc17e82b11da743e88ec9b4126a8909705e83c4b | [
"BSD-3-Clause"
] | 10 | 2018-04-23T03:50:32.000Z | 2021-10-08T00:44:09.000Z | riboraptor/snakemake/wrappers/uniq_mapping_wrapper.py | saketkc/riboraptor | cc17e82b11da743e88ec9b4126a8909705e83c4b | [
"BSD-3-Clause"
] | 5 | 2018-05-28T06:37:00.000Z | 2021-03-13T15:45:25.000Z | riboraptor/snakemake/wrappers/uniq_mapping_wrapper.py | saketkc/riboraptor | cc17e82b11da743e88ec9b4126a8909705e83c4b | [
"BSD-3-Clause"
] | 3 | 2018-04-27T22:28:21.000Z | 2022-01-29T18:28:41.000Z | import tempfile
from snakemake.shell import shell
with tempfile.TemporaryDirectory(dir=snakemake.params.tmp_dir) as temp_dir:
shell(
r"""samtools view -b -q 255 \
{snakemake.input} -o {snakemake.output}.temp \
&& samtools sort -@ {snakemake.threads} \
{snakemake.output}.temp -o {snakemake.output} \
-T {temp_dir}/{snakemake.wildcards.sample}_sort \
&& rm -rf {snakemake.output}.temp \
&& samtools index {snakemake.output}
"""
)
| 34.4 | 75 | 0.614341 |
794141a49c4075075062bc83e5bf0a2ecffa2747 | 38,753 | py | Python | skbuild/setuptools_wrap.py | abravalheri/scikit-build | e02c9b391d54dedd7cf1a5c4177bc15436ab4a28 | [
"MIT"
] | null | null | null | skbuild/setuptools_wrap.py | abravalheri/scikit-build | e02c9b391d54dedd7cf1a5c4177bc15436ab4a28 | [
"MIT"
] | null | null | null | skbuild/setuptools_wrap.py | abravalheri/scikit-build | e02c9b391d54dedd7cf1a5c4177bc15436ab4a28 | [
"MIT"
] | null | null | null | """This module provides functionality for wrapping key infrastructure components
from distutils and setuptools.
"""
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
# pylint: disable-next=wrong-import-order
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
# Must be imported before distutils
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
"""Create and return a scikit-build argument parser."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
"""Return True if ``arg`` is a relevant argument to pass to cmake when configuring a project."""
for cmake_arg in (
"-C", # initial-cache
"-D", # <var>[:<type>]=<value>
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
"""
Parse arguments in the scikit-build argument set. Convert specified
arguments to proper format and append to cmake_args and build_tool_args.
Returns the tuple ``(remaining arguments, cmake executable, skip_generator_test)``.
"""
parser = create_skbuild_argparser()
# Consider CMake arguments passed as global setuptools options
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
# ... and remove them from the list
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
# Construct CMake argument list
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
# Construct build tool argument list
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
"""This function parses the command-line arguments ``sys.argv`` and returns
the tuple ``(setuptools_args, cmake_executable, skip_generator_test, cmake_args, build_tool_args)``
where each ``*_args`` element corresponds to a set of arguments separated by ``--``."""
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
"""This function instantiates a Distribution object and
parses the command line arguments.
It returns the tuple ``(display_only, help_commands, commands, hide_listing, force_cmake, skip_cmake, plat_name)``
where
- display_only is a boolean indicating if an argument like '--help',
'--help-commands' or '--author' was passed.
- help_commands is a boolean indicating if argument '--help-commands'
was passed.
- commands contains the list of commands that were passed.
- hide_listing is a boolean indicating if the list of files being included
in the distribution is displayed or not.
- force_cmake a boolean indicating that CMake should always be executed.
- skip_cmake is a boolean indicating if the execution of CMake should
explicitly be skipped.
- plat_name is a string identifying the platform name to embed in generated
filenames. It defaults to :func:`skbuild.constants.skbuild_plat_name()`.
- build_ext_inplace is a boolean indicating if ``build_ext`` command was
specified along with the --inplace argument.
Otherwise it raises DistutilsArgError exception if there are
any error on the command-line, and it raises DistutilsGetoptError
if there any error in the command 'options' attribute.
The code has been adapted from the setup() function available
in distutils/core.py.
"""
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
# Update class attribute to also ensure the argument is processed
# when ``setuptools.setup`` is called.
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
"""Given ``package_parts`` (e.g. ``['foo', 'bar']``) and a
``module_file`` (e.g. ``foo/bar/jaz/rock/roll.py``), starting
from the left, this function will strip the parts of the path
matching the package parts and return a new string
(e.g ``jaz/rock/roll.py``).
The function will work as expected for either Windows or Unix-style
``module_file`` and this independently of the platform.
"""
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
"""Return True if the ``module`` is contained
in the ``package_data``.
``module`` is a tuple of the form
``(package, modulename, module_file)``.
"""
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
"""Return True if at least one command requiring ``cmake`` to run
is found in ``commands``."""
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
"""Save the CMake spec to disk"""
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
"""This function wraps setup() so that we can run cmake, make,
CMake build, then proceed as usual with setuptools, appending the
CMake-generated output as necessary.
The CMake project is re-configured only if needed. This is achieved by (1) retrieving the environment mapping
associated with the generator set in the ``CMakeCache.txt`` file, (2) saving the CMake configure arguments and
version in :func:`skbuild.constants.CMAKE_SPEC_FILE()`: and (3) re-configuring only if either the generator or
the CMake specs change.
"""
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
# Handle cmake_install_target
# get the target (next item after '--install-target') or return '' if no --install-target
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
# Setting target from command takes precedence
# cmake_install_target_from_setup has the default 'install',
# so cmake_install_target would never be empty.
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
if sys.platform == "darwin":
# If no ``--plat-name`` argument was passed, set default value.
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
# The loop here allows for CMAKE_OSX_* command line arguments to overload
# values passed with either the ``--plat-name`` command-line argument
# or the ``cmake_args`` setup option.
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
# Set platform env. variable so that commands (e.g. bdist_wheel)
# uses this information. The _PYTHON_HOST_PLATFORM env. variable is
# used in distutils.util.get_platform() function.
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
# Set CMAKE_OSX_DEPLOYMENT_TARGET and CMAKE_OSX_ARCHITECTURES if not already
# specified
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
# Install cmake if listed in `setup_requires`
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
# Considering packages associated with "setup_requires" keyword are
# installed in .eggs subdirectory without honoring setuptools "console_scripts"
# entry_points and without settings the expected executable permissions, we are
# taking care of it below.
import cmake # pylint: disable=import-outside-toplevel
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
# Languages are used to determine a working generator
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
# Used to confirm that the cmake executable is the same, and that the environment
# didn't change
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
"""This function consolidates packages having modules located in
both the source tree and the CMake install tree into one location.
The one location is the CMake install tree
(see :func:`.constants.CMAKE_INSTALL_DIR()`).
Why ? This is a necessary evil because ``Setuptools`` keeps track of
packages and modules files to install using a dictionary of lists where
the key are package names (e.g ``foo.bar``) and the values are lists of
module files (e.g ``['__init__.py', 'baz.py']``. Since this doesn't allow
to "split" files associated with a given module in multiple location, one
location is selected, and files are copied over.
How? It currently searches for modules across both locations using
the :class:`.utils.PythonModuleFinder`. then with the help
of :func:`_package_data_contain_module`, it identifies which
one are either already included or missing from the distribution.
Once a module has been identified as ``missing``, it is both copied
into the :func:`.constants.CMAKE_INSTALL_DIR()` and added to the
``package_data`` dictionary so that it can be considered by
the upstream setup function.
"""
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
"""This function copies package data files specified using the ``package_data`` keyword
into :func:`.constants.CMAKE_INSTALL_DIR()`.
::
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
Considering that (1) the packages associated with modules located in both the source tree and
the CMake install tree are consolidated into the CMake install tree, and (2) the consolidated
package path set in the ``package_dir`` dictionary and later used by setuptools to package
(or install) modules and data files is :func:`.constants.CMAKE_INSTALL_DIR()`, copying the data files
is required to ensure setuptools can find them when it uses the package directory.
"""
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| 38.521869 | 124 | 0.655949 |
794141e9fea50d267c030e401fcf94d7135ebe0d | 3,004 | py | Python | money.py | Dannyaffleck/stock | 9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33 | [
"Apache-2.0"
] | null | null | null | money.py | Dannyaffleck/stock | 9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33 | [
"Apache-2.0"
] | null | null | null | money.py | Dannyaffleck/stock | 9c6c62b798e4e3306a7bf4a185a0b4fca37cdd33 | [
"Apache-2.0"
] | 1 | 2020-07-08T19:59:38.000Z | 2020-07-08T19:59:38.000Z | import math
from scipy import stats
import numpy as np
listy=[1.58,1.57,1.54,1.51,1.51,1.51,1.5099,1.5,1.48,1.44,1.44,1.43,1.44,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.455,1.445,1.44,1.44,1.43,1.46,1.46,1.46,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.45,1.44,1.44,1.44,1.45,1.4477,1.44,1.45,1.45,1.45,1.45,1.45,1.45,1.4499,1.44,1.45,1.45,1.44,1.44,1.44,1.45,1.4496,1.44,1.44,1.44,1.44,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.41,1.4,1.395,1.39,1.39,1.39,1.4,1.41,1.41,1.41,1.41,1.41,1.41,1.42,1.42,1.42,1.42,1.41,1.41,1.41,1.41,1.41,1.41,1.4099,1.41,1.41,1.41,1.4,1.4,1.4,1.4,1.4,1.3999,1.4,1.4,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.3899,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.41,1.41,1.41,1.41,1.41,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.3999,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.3994,1.3881,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.3899,1.3899,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.39,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.41,1.41,1.41,1.41,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.4197,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.42,1.43,1.43,1.429,1.43,1.43,1.43,1.4277,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.4268,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.425,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.4295,1.43,1.43,1.43,1.4277,1.43,1.43,1.43,1.425,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.43,1.44,1.45,1.46,1.46,1.46,1.46,1.46,1.46,1.46,1.45, 0]
listx = [idx for idx,val in enumerate(listy)]
i=4
principle=6000
shares=0
slope2 = -1
purchaseprice=0
while i<len(listy)-3:
price1=float(listy[i])
#price2=float(list[B+1])
#price3=float(list[B+2])
#slope1= (float(price2)-float(price1))
#slope2= (float(price3)-float(price2))
da_y = np.array(listy[i-4:i])
da_x = np.array(listx[i-4:i])
slope1=slope2
slope2, intercept, r_value, p_value, std_err = stats.linregress(da_x,da_y)
if slope1<=0 and slope2>0:
howmany= math.trunc(principle/price1)
print(f"buy at {howmany}@{price1}")
shares= howmany
principle= principle-(howmany*price1)
purchaseprice= price1
elif slope2<=0 and slope1>0 and purchaseprice != price1:
howmany= shares
print(f"sell at {howmany}@{price1}")
principle +=shares*price1
shares=0
i = i+1
print(f"principle:{principle} shares:{shares}")
print(f"total principle: {principle+(shares*price1)}@{price1}")
| 61.306122 | 1,947 | 0.614181 |
794142836652ce39830eb0247e3e51ba9ee4e104 | 2,557 | py | Python | doc/source/reference/random/performance.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 20,453 | 2015-01-02T09:00:47.000Z | 2022-03-31T23:35:56.000Z | doc/source/reference/random/performance.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 14,862 | 2015-01-01T01:28:34.000Z | 2022-03-31T23:48:52.000Z | doc/source/reference/random/performance.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 9,362 | 2015-01-01T15:49:43.000Z | 2022-03-31T21:26:51.000Z | from timeit import repeat
import pandas as pd
import numpy as np
from numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64
PRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64]
funcs = {}
integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")'
funcs['32-bit Unsigned Ints'] = integers.format(bits=32)
funcs['64-bit Unsigned Ints'] = integers.format(bits=64)
funcs['Uniforms'] = 'random(size=1000000)'
funcs['Normals'] = 'standard_normal(size=1000000)'
funcs['Exponentials'] = 'standard_exponential(size=1000000)'
funcs['Gammas'] = 'standard_gamma(3.0,size=1000000)'
funcs['Binomials'] = 'binomial(9, .1, size=1000000)'
funcs['Laplaces'] = 'laplace(size=1000000)'
funcs['Poissons'] = 'poisson(3.0, size=1000000)'
setup = """
from numpy.random import {prng}, Generator
rg = Generator({prng}())
"""
test = "rg.{func}"
table = {}
for prng in PRNGS:
print(prng)
col = {}
for key in funcs:
t = repeat(test.format(func=funcs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
col = pd.Series(col)
table[prng().__class__.__name__] = col
npfuncs = {}
npfuncs.update(funcs)
npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)'
npfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype="uint64",size=1000000)'
setup = """
from numpy.random import RandomState
rg = RandomState()
"""
col = {}
for key in npfuncs:
t = repeat(test.format(func=npfuncs[key]),
setup.format(prng=prng().__class__.__name__),
number=1, repeat=3)
col[key] = 1000 * min(t)
table['RandomState'] = pd.Series(col)
columns = ['MT19937', 'PCG64', 'PCG64DXSM', 'Philox', 'SFC64', 'RandomState']
table = pd.DataFrame(table)
order = np.log(table).mean().sort_values().index
table = table.T
table = table.reindex(columns)
table = table.T
table = table.reindex([k for k in funcs], axis=0)
print(table.to_csv(float_format='%0.1f'))
rel = table.loc[:, ['RandomState']].values @ np.ones(
(1, table.shape[1])) / table
rel.pop('RandomState')
rel = rel.T
rel['Overall'] = np.exp(np.log(rel).mean(1))
rel *= 100
rel = np.round(rel)
rel = rel.T
print(rel.to_csv(float_format='%0d'))
# Cross-platform table
rows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials']
xplat = rel.reindex(rows, axis=0)
xplat = 100 * (xplat / xplat.MT19937.values[:,None])
overall = np.exp(np.log(xplat).mean(0))
xplat = xplat.T.copy()
xplat['Overall']=overall
print(xplat.T.round(1))
| 29.390805 | 90 | 0.663277 |
794142d46c5bb29cfd5258b33769bbcda530f78d | 5,419 | py | Python | qiskit/providers/builtinsimulators/statevector_simulator.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | 1 | 2020-09-03T12:28:44.000Z | 2020-09-03T12:28:44.000Z | qiskit/providers/builtinsimulators/statevector_simulator.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | null | null | null | qiskit/providers/builtinsimulators/statevector_simulator.py | ismaila-at-za-ibm/qiskit-terra | 08303ec98ac7b33fde55266dc3a74466fbdcae95 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
# pylint: disable=arguments-differ
"""Contains a (slow) python statevector simulator.
It simulates the statevector through a quantum circuit. It is exponential in
the number of qubits.
We advise using the c++ simulator or online simulator for larger size systems.
The input is a qobj dictionary and the output is a Result object.
The input qobj to this simulator has no shots, no measures, no reset, no noise.
"""
import logging
from math import log2
from qiskit._util import local_hardware_info
from qiskit.providers.builtinsimulators.exceptions import SimulatorError
from qiskit.providers.models import BackendConfiguration
from .qasm_simulator import QasmSimulatorPy
logger = logging.getLogger(__name__)
class StatevectorSimulatorPy(QasmSimulatorPy):
"""Python statevector simulator."""
MAX_QUBITS_MEMORY = int(log2(local_hardware_info()['memory'] * (1024 ** 3) / 16))
DEFAULT_CONFIGURATION = {
'backend_name': 'statevector_simulator',
'backend_version': '1.0.0',
'n_qubits': min(24, MAX_QUBITS_MEMORY),
'url': 'https://github.com/Qiskit/qiskit-terra',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'description': 'A Python statevector simulator for qobj files',
'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'snapshot'],
'gates': [
{
'name': 'u1',
'parameters': ['lambda'],
'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'
},
{
'name': 'u2',
'parameters': ['phi', 'lambda'],
'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'
},
{
'name': 'u3',
'parameters': ['theta', 'phi', 'lambda'],
'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'
},
{
'name': 'cx',
'parameters': ['c', 't'],
'qasm_def': 'gate cx c,t { CX c,t; }'
},
{
'name': 'id',
'parameters': ['a'],
'qasm_def': 'gate id a { U(0,0,0) a; }'
},
{
'name': 'snapshot',
'parameters': ['slot'],
'qasm_def': 'gate snapshot(slot) q { TODO }'
}
]
}
# Override base class value to return the final state vector
SHOW_FINAL_STATE = True
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=(configuration or
BackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),
provider=provider)
def run(self, qobj, backend_options=None):
"""Run qobj asynchronously.
Args:
qobj (Qobj): payload of the experiment
backend_options (dict): backend options
Returns:
SimulatorsJob: derived from BaseJob
Additional Information::
backend_options: Is a dict of options for the backend. It may contain
* "initial_statevector": vector_like
* "chop_threshold": double
The "initial_statevector" option specifies a custom initial
initial statevector for the simulator to be used instead of the all
zero state. This size of this vector must be correct for the number
of qubits in all experiments in the qobj.
The "chop_threshold" option specifies a trunctation value for
setting small values to zero in the output statevector. The default
value is 1e-15.
Example::
backend_options = {
"initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2),
"chop_threshold": 1e-15
}
"""
return super().run(qobj, backend_options=backend_options)
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas.
Some of these may later move to backend schemas.
1. No shots
2. No measurements in the middle
"""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise SimulatorError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
if qobj.config.shots != 1:
logger.info('"%s" only supports 1 shot. Setting shots=1.',
self.name())
qobj.config.shots = 1
for experiment in qobj.experiments:
name = experiment.header.name
if getattr(experiment.config, 'shots', 1) != 1:
logger.info('"%s" only supports 1 shot. '
'Setting shots=1 for circuit "%s".',
self.name(), name)
experiment.config.shots = 1
| 35.887417 | 100 | 0.556745 |
7941443e530fa56a6f98d1d552a1199973f0d62d | 1,108 | py | Python | kubernetes/test/test_v1alpha1_policy_rule.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_policy_rule.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_policy_rule.py | amanagarwal33/python | e31693557f75950805fb4dc5af4cb7434a470e26 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1alpha1_policy_rule import V1alpha1PolicyRule # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1alpha1PolicyRule(unittest.TestCase):
"""V1alpha1PolicyRule unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1PolicyRule(self):
"""Test V1alpha1PolicyRule"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1alpha1_policy_rule.V1alpha1PolicyRule() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.086957 | 124 | 0.705776 |
79414746c5cda8018cfd20a9e19b525b782170aa | 41,797 | py | Python | supervised/tuner/mljar_tuner.py | brianWeng0223/mljar-supervised | d7c6a969792c6efbff264c8743a645b098e24dd2 | [
"MIT"
] | null | null | null | supervised/tuner/mljar_tuner.py | brianWeng0223/mljar-supervised | d7c6a969792c6efbff264c8743a645b098e24dd2 | [
"MIT"
] | null | null | null | supervised/tuner/mljar_tuner.py | brianWeng0223/mljar-supervised | d7c6a969792c6efbff264c8743a645b098e24dd2 | [
"MIT"
] | null | null | null | import os
import copy
import json
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from supervised.tuner.random_parameters import RandomParameters
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
from supervised.tuner.preprocessing_tuner import PreprocessingTuner
from supervised.tuner.hill_climbing import HillClimbing
from supervised.algorithms.registry import (
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
REGRESSION,
)
from supervised.algorithms.xgboost import xgboost_eval_metric
from supervised.algorithms.lightgbm import lightgbm_eval_metric
from supervised.algorithms.catboost import catboost_eval_metric
from supervised.utils.utils import dump_data
import logging
from supervised.utils.config import LOG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
class MljarTuner:
def __init__(
self,
tuner_params,
algorithms,
ml_task,
eval_metric,
validation_strategy,
explain_level,
data_info,
golden_features,
features_selection,
train_ensemble,
stack_models,
adjust_validation,
boost_on_errors,
kmeans_features,
mix_encoding,
optuna_time_budget,
optuna_init_params,
optuna_verbose,
n_jobs,
seed,
):
logger.debug("MljarTuner.__init__")
self._start_random_models = tuner_params.get("start_random_models", 5)
self._hill_climbing_steps = tuner_params.get("hill_climbing_steps", 3)
self._top_models_to_improve = tuner_params.get("top_models_to_improve", 3)
self._algorithms = algorithms
self._ml_task = ml_task
self._validation_strategy = validation_strategy
self._explain_level = explain_level
self._data_info = data_info
self._golden_features = golden_features
self._features_selection = features_selection
self._train_ensemble = train_ensemble
self._stack_models = stack_models
self._adjust_validation = adjust_validation
self._boost_on_errors = boost_on_errors
self._kmeans_features = kmeans_features
self._mix_encoding = mix_encoding
self._optuna_time_budget = optuna_time_budget
self._optuna_init_params = optuna_init_params
self._optuna_verbose = optuna_verbose
self._eval_metric = eval_metric
self._n_jobs = n_jobs
self._seed = seed
self._unique_params_keys = []
def _apply_categorical_strategies(self):
if self._data_info is None:
return []
if self._data_info.get("columns_info") is None:
return []
strategies = []
for k, v in self._data_info["columns_info"].items():
# if (
# "categorical" in v
# and PreprocessingTuner.CATEGORICALS_LOO not in strategies
# ):
# strategies += [PreprocessingTuner.CATEGORICALS_LOO]
if (
PreprocessingCategorical.FEW_CATEGORIES in v
and PreprocessingTuner.CATEGORICALS_MIX not in strategies
and self._mix_encoding
):
strategies += [PreprocessingTuner.CATEGORICALS_MIX]
if len(strategies) == 1: # disable loo encoding
# cant add more
# stop
break
return strategies
def _can_apply_kmeans_features(self):
if self._data_info is None:
return False
# are there any continous
continous_cols = 0
for k, v in self._data_info["columns_info"].items():
if "categorical" not in v:
continous_cols += 1
# too little columns
if continous_cols == 0:
return False
# too many columns
if continous_cols > 300:
return False
# all good, can apply kmeans
return True
def _can_apply_golden_features(self):
if self._data_info is None:
return False
# are there any continous
continous_cols = 0
for k, v in self._data_info["columns_info"].items():
if "categorical" not in v:
continous_cols += 1
# too little columns
if continous_cols == 0:
return False
# all good, can apply golden features
return True
def steps(self):
all_steps = []
if self._adjust_validation:
all_steps += ["adjust_validation"]
all_steps += ["simple_algorithms", "default_algorithms"]
if self._start_random_models > 1:
all_steps += ["not_so_random"]
categorical_strategies = self._apply_categorical_strategies()
if PreprocessingTuner.CATEGORICALS_MIX in categorical_strategies:
all_steps += ["mix_encoding"]
if PreprocessingTuner.CATEGORICALS_LOO in categorical_strategies:
all_steps += ["loo_encoding"]
if self._golden_features and self._can_apply_golden_features():
all_steps += ["golden_features"]
if self._kmeans_features and self._can_apply_kmeans_features():
all_steps += ["kmeans_features"]
if self._features_selection:
all_steps += ["insert_random_feature"]
all_steps += ["features_selection"]
for i in range(self._hill_climbing_steps):
all_steps += [f"hill_climbing_{i+1}"]
if self._boost_on_errors:
all_steps += ["boost_on_errors"]
if self._train_ensemble:
all_steps += ["ensemble"]
if self._stack_models:
all_steps += ["stack"]
if self._train_ensemble:
all_steps += ["ensemble_stacked"]
return all_steps
def get_model_name(self, model_type, models_cnt, special=""):
return f"{models_cnt}_" + special + model_type.replace(" ", "")
def filter_random_feature_model(self, models):
return [m for m in models if "RandomFeature" not in m.get_name()]
def generate_params(
self, step, models, results_path, stacked_models, total_time_limit
):
try:
models_cnt = len(models)
if step == "adjust_validation":
return self.adjust_validation_params(models_cnt)
elif step == "simple_algorithms":
return self.simple_algorithms_params(models_cnt)
elif step == "default_algorithms":
return self.default_params(models_cnt)
elif step == "not_so_random":
return self.get_not_so_random_params(models_cnt)
elif step == "mix_encoding":
return self.get_mix_categorical_strategy(models, total_time_limit)
elif step == "loo_encoding":
return self.get_loo_categorical_strategy(models, total_time_limit)
elif step == "golden_features":
return self.get_golden_features_params(
models, results_path, total_time_limit
)
elif step == "kmeans_features":
return self.get_kmeans_features_params(
models, results_path, total_time_limit
)
elif step == "insert_random_feature":
return self.get_params_to_insert_random_feature(
models, total_time_limit
)
elif step == "features_selection":
return self.get_features_selection_params(
self.filter_random_feature_model(models),
results_path,
total_time_limit,
)
elif "hill_climbing" in step:
return self.get_hill_climbing_params(
self.filter_random_feature_model(models)
)
elif step == "boost_on_errors":
return self.boost_params(models, results_path, total_time_limit)
elif step == "ensemble":
return [
{
"model_type": "ensemble",
"is_stacked": False,
"name": "Ensemble",
"status": "initialized",
"final_loss": None,
"train_time": None,
}
]
elif step == "stack":
return self.get_params_stack_models(stacked_models)
elif step == "ensemble_stacked":
# do we have stacked models?
any_stacked = False
for m in models:
if m._is_stacked:
any_stacked = True
if not any_stacked:
return []
return [
{
"model_type": "ensemble",
"is_stacked": True,
"name": "Ensemble_Stacked",
"status": "initialized",
"final_loss": None,
"train_time": None,
}
]
# didnt find anything matching the step, return empty array
return []
except Exception as e:
import traceback
print(str(e), traceback.format_exc())
return []
def get_params_stack_models(self, stacked_models):
if stacked_models is None or len(stacked_models) == 0:
return []
X_train_stacked_path = ""
added_columns = []
# model_types = ["Xgboost", "LightGBM", "CatBoost"]
model_types = [
"Xgboost",
"LightGBM",
"CatBoost",
"Random Forest",
"Extra Trees",
"Neural Network",
]
generated_params = {m: [] for m in model_types}
types_score_order = []
# resue old params
for m in stacked_models:
# use only Xgboost, LightGBM and CatBoost as stacked models
if m.get_type() not in model_types:
continue
if m.get_type() not in types_score_order:
types_score_order += [m.get_type()]
if m.params.get("injected_sample_weight", False):
# dont use boost_on_errors model for stacking
# there will be additional boost_on_errors step
continue
params = copy.deepcopy(m.params)
params["validation_strategy"]["X_path"] = params["validation_strategy"][
"X_path"
].replace("X.data", "X_stacked.data")
params["name"] = params["name"] + "_Stacked"
params["is_stacked"] = True
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] += "_stacked"
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
if "model_architecture_json" in params["learner"]:
# the new model will be created with wider input size
del params["learner"]["model_architecture_json"]
if self._ml_task == REGRESSION:
# scale added predictions in regression if the target was scaled (in the case of NN)
# this piece of code might not work, leave it as it is, because NN is not used for training with Stacked Data
target_preprocessing = params["preprocessing"]["target_preprocessing"]
scale = None
if "scale_log_and_normal" in target_preprocessing:
scale = "scale_log_and_normal"
elif "scale_normal" in target_preprocessing:
scale = "scale_normal"
if scale is not None:
for col in added_columns:
params["preprocessing"]["columns_preprocessing"][col] = [scale]
generated_params[m.get_type()] += [params]
return_params = []
for i in range(100):
total = 0
for m in types_score_order:
if generated_params[m]:
return_params += [generated_params[m].pop(0)]
total += len(generated_params[m])
if total == 0:
break
return return_params
def adjust_validation_params(self, models_cnt):
generated_params = []
for model_type in ["Decision Tree"]:
models_to_check = 1
logger.info(f"Generate parameters for {model_type} (#{models_cnt + 1})")
params = self._get_model_params(model_type, seed=1)
if params is None:
continue
params["name"] = self.get_model_name(model_type, models_cnt + 1)
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = "original"
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
models_cnt += 1
return generated_params
def simple_algorithms_params(self, models_cnt):
generated_params = []
for model_type in ["Baseline", "Decision Tree", "Linear"]:
if model_type not in self._algorithms:
continue
models_to_check = 1
if model_type == "Decision Tree":
models_to_check = min(3, self._start_random_models)
for i in range(models_to_check):
logger.info(f"Generate parameters for {model_type} (#{models_cnt + 1})")
params = self._get_model_params(model_type, seed=i + 1)
if params is None:
continue
params["name"] = self.get_model_name(model_type, models_cnt + 1)
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = "original"
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
models_cnt += 1
return generated_params
def skip_if_rows_cols_limit(self, model_type):
max_rows_limit = AlgorithmsRegistry.get_max_rows_limit(
self._ml_task, model_type
)
max_cols_limit = AlgorithmsRegistry.get_max_cols_limit(
self._ml_task, model_type
)
if max_rows_limit is not None:
if self._data_info["rows"] > max_rows_limit:
return True
if max_cols_limit is not None:
if self._data_info["cols"] > max_cols_limit:
return True
return False
def default_params(self, models_cnt):
generated_params = []
for model_type in [
"LightGBM",
"Xgboost",
"CatBoost",
"Neural Network",
"Random Forest",
"Extra Trees",
"Nearest Neighbors",
]:
if model_type not in self._algorithms:
continue
if self.skip_if_rows_cols_limit(model_type):
continue
logger.info(f"Get default parameters for {model_type} (#{models_cnt + 1})")
params = self._get_model_params(
model_type, seed=models_cnt + 1, params_type="default"
)
if params is None:
continue
special = "Default_" if self._optuna_time_budget is None else "Optuna_"
params["name"] = self.get_model_name(
model_type, models_cnt + 1, special=special
)
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = "original"
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
models_cnt += 1
return generated_params
def get_not_so_random_params(self, models_cnt):
model_types = [
"Xgboost",
"LightGBM",
"CatBoost",
"Random Forest",
"Extra Trees",
"Neural Network",
"Nearest Neighbors",
]
generated_params = {m: [] for m in model_types}
for model_type in model_types:
if model_type not in self._algorithms:
continue
if self.skip_if_rows_cols_limit(model_type):
continue
# minus 1 because already have 1 default
for i in range(self._start_random_models - 1):
logger.info(
f"Generate not-so-random parameters for {model_type} (#{models_cnt+1})"
)
params = self._get_model_params(model_type, seed=i + 1)
if params is None:
continue
params["name"] = self.get_model_name(model_type, models_cnt + 1)
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = "original"
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params[model_type] += [params]
models_cnt += 1
"""
return_params = []
for i in range(100):
total = 0
for m in ["Xgboost", "LightGBM", "CatBoost"]:
if generated_params[m]:
return_params += [generated_params[m].pop(0)]
total += len(generated_params[m])
if total == 0:
break
rest_params = []
for m in [
"Random Forest",
"Extra Trees",
"Neural Network",
"Nearest Neighbors",
]:
rest_params += generated_params[m]
if rest_params:
np.random.shuffle(rest_params)
return_params += rest_params
"""
return_params = []
for i in range(100):
total = 0
for m in [
"LightGBM",
"Xgboost",
"CatBoost",
"Random Forest",
"Extra Trees",
"Neural Network",
"Nearest Neighbors",
]:
if generated_params[m]:
return_params += [generated_params[m].pop(0)]
total += len(generated_params[m])
if total == 0:
break
return return_params
def get_hill_climbing_params(self, current_models):
df_models, algorithms = self.df_models_algorithms(current_models)
generated_params = []
counts = {model_type: 0 for model_type in algorithms}
for i in range(df_models.shape[0]):
model_type = df_models["model_type"].iloc[i]
counts[model_type] += 1
if counts[model_type] > self._top_models_to_improve:
continue
m = df_models["model"].iloc[i]
for p in HillClimbing.get(
m.params.get("learner"), self._ml_task, len(current_models) + self._seed
):
model_indices = [
int(m.get_name().split("_")[0]) for m in current_models
]
model_max_index = np.max(model_indices)
logger.info(
"Hill climbing step, for model #{0}".format(model_max_index + 1)
)
if p is not None:
all_params = copy.deepcopy(m.params)
all_params["learner"] = p
all_params["name"] = self.get_model_name(
all_params["learner"]["model_type"],
model_max_index + 1 + len(generated_params),
)
if "golden_features" in all_params["preprocessing"]:
all_params["name"] += "_GoldenFeatures"
if "drop_features" in all_params["preprocessing"] and len(
all_params["preprocessing"]["drop_features"]
):
all_params["name"] += "_SelectedFeatures"
all_params["status"] = "initialized"
all_params["final_loss"] = None
all_params["train_time"] = None
unique_params_key = MljarTuner.get_params_key(all_params)
if unique_params_key not in self._unique_params_keys:
generated_params += [all_params]
return generated_params
def get_all_int_categorical_strategy(self, current_models, total_time_limit):
return self.get_categorical_strategy(
current_models, PreprocessingTuner.CATEGORICALS_ALL_INT, total_time_limit
)
def get_mix_categorical_strategy(self, current_models, total_time_limit):
return self.get_categorical_strategy(
current_models, PreprocessingTuner.CATEGORICALS_MIX, total_time_limit
)
def get_loo_categorical_strategy(self, current_models, total_time_limit):
return self.get_categorical_strategy(
current_models, PreprocessingTuner.CATEGORICALS_LOO, total_time_limit
)
def get_categorical_strategy(self, current_models, strategy, total_time_limit):
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
generated_params = []
for m_type in algorithms:
# try to add categorical strategy only for below algorithms
if m_type not in [
"Xgboost",
# "LightGBM", # use built-in categoricals (but need to int encode)
# "Neural Network",
# "Random Forest",
# "Extra Trees",
]:
continue
models = df_models[df_models.model_type == m_type]["model"]
for i in range(min(1, len(models))):
m = models.iloc[i]
params = copy.deepcopy(m.params)
cols_preprocessing = params["preprocessing"]["columns_preprocessing"]
for col, preproc in params["preprocessing"][
"columns_preprocessing"
].items():
new_preproc = []
convert_categorical = False
for p in preproc:
if "categorical" not in p:
new_preproc += [p]
else:
convert_categorical = True
col_data_info = self._data_info["columns_info"].get(col)
few_categories = False
if col_data_info is not None and "few_categories" in col_data_info:
few_categories = True
if convert_categorical:
if strategy == PreprocessingTuner.CATEGORICALS_ALL_INT:
new_preproc += [PreprocessingCategorical.CONVERT_INTEGER]
elif strategy == PreprocessingTuner.CATEGORICALS_LOO:
new_preproc += [PreprocessingCategorical.CONVERT_LOO]
elif strategy == PreprocessingTuner.CATEGORICALS_MIX:
if few_categories:
new_preproc += [
PreprocessingCategorical.CONVERT_ONE_HOT
]
else:
new_preproc += [
PreprocessingCategorical.CONVERT_INTEGER
]
cols_preprocessing[col] = new_preproc
params["preprocessing"]["columns_preprocessing"] = cols_preprocessing
# if there is already a name of categorical strategy in the name
# please remove it to avoid confusion (I hope!)
for st in [
PreprocessingTuner.CATEGORICALS_LOO,
PreprocessingTuner.CATEGORICALS_ALL_INT,
PreprocessingTuner.CATEGORICALS_MIX,
]:
params["name"] = params["name"].replace("_" + st, "")
params["name"] += f"_{strategy}"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = params.get("data_type", "") + "_" + strategy
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
return generated_params
def df_models_algorithms(
self, current_models, time_limit=None, exclude_golden=False
):
scores = [m.get_final_loss() for m in current_models]
model_types = [m.get_type() for m in current_models]
names = [m.get_name() for m in current_models]
train_times = [m.get_train_time() for m in current_models]
df_models = pd.DataFrame(
{
"model": current_models,
"score": scores,
"model_type": model_types,
"name": names,
"train_time": train_times,
}
)
if time_limit is not None:
df_models = df_models[df_models.train_time < time_limit]
df_models.reset_index(drop=True, inplace=True)
if exclude_golden:
ii = df_models["name"].apply(lambda x: "GoldenFeatures" in x)
df_models = df_models[~ii]
df_models.reset_index(drop=True, inplace=True)
df_models.sort_values(by="score", ascending=True, inplace=True)
model_types = list(df_models.model_type)
u, idx = np.unique(model_types, return_index=True)
algorithms = u[np.argsort(idx)]
return df_models, algorithms
def get_golden_features_params(
self, current_models, results_path, total_time_limit
):
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
generated_params = []
for i in range(min(3, df_models.shape[0])):
m = df_models["model"].iloc[i]
params = copy.deepcopy(m.params)
params["preprocessing"]["golden_features"] = {
"results_path": results_path,
"ml_task": self._ml_task,
}
if (
self._golden_features is not None
and not isinstance(self._golden_features, bool)
and isinstance(self._golden_features, int)
):
params["preprocessing"]["golden_features"][
"features_count"
] = self._golden_features
params["name"] += "_GoldenFeatures"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = params.get("data_type", "") + "_golden_features"
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
return generated_params
def get_kmeans_features_params(
self, current_models, results_path, total_time_limit
):
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit, exclude_golden=True
)
generated_params = []
for i in range(min(3, df_models.shape[0])):
m = df_models["model"].iloc[i]
params = copy.deepcopy(m.params)
params["preprocessing"]["kmeans_features"] = {"results_path": results_path}
params["name"] += "_KMeansFeatures"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = params.get("data_type", "") + "_kmeans_features"
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
return generated_params
def time_features_selection(self, current_models, total_time_limit):
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
time_needed = 0
for m_type in algorithms:
if m_type not in [
"Xgboost",
"LightGBM",
"CatBoost",
"Neural Network",
"Random Forest",
"Extra Trees",
]:
continue
models = df_models[df_models.model_type == m_type]["model"]
for i in range(min(1, len(models))):
m = models.iloc[i]
if time_needed == 0:
# best model will be used two times
# one for insert random feature
# one for selected features
time_needed += 2.0 * m.get_train_time()
else:
time_needed += m.get_train_time()
return time_needed
def get_params_to_insert_random_feature(self, current_models, total_time_limit):
time_needed = self.time_features_selection(current_models, total_time_limit)
if time_needed > 0.1 * total_time_limit:
print("Not enough time to perform features selection. Skip")
print(
"Time needed for features selection ~", np.round(time_needed), "seconds"
)
print(
f"Please increase total_time_limit to at least ({int(np.round(10.0*time_needed))+60} seconds) to have features selection"
)
return None
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
if df_models.shape[0] == 0:
return None
m = df_models.iloc[0]["model"]
params = copy.deepcopy(m.params)
params["preprocessing"]["add_random_feature"] = True
params["name"] += "_RandomFeature"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["explain_level"] = 1
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
if self._optuna_time_budget is not None:
# dont tune algorithm with random feature inserted
# algorithm will be tuned after feature selection
params["optuna_time_budget"] = None
params["optuna_init_params"] = {}
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
return [params]
return None
def get_features_selection_params(
self, current_models, results_path, total_time_limit
):
fname = os.path.join(results_path, "drop_features.json")
if not os.path.exists(fname):
return None
drop_features = json.load(open(fname, "r"))
print("Drop features", drop_features)
# in case of droping only one feature (random_feature)
# skip this step
if len(drop_features) <= 1:
return None
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
generated_params = []
for m_type in algorithms:
# try to do features selection only for below algorithms
if m_type not in [
"Xgboost",
"LightGBM",
"CatBoost",
"Neural Network",
"Random Forest",
"Extra Trees",
]:
continue
models = df_models[df_models.model_type == m_type]["model"]
for i in range(min(1, len(models))):
m = models.iloc[i]
params = copy.deepcopy(m.params)
params["preprocessing"]["drop_features"] = drop_features
params["name"] += "_SelectedFeatures"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = (
params.get("data_type", "") + "_features_selection"
)
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
return generated_params
def _get_model_params(self, model_type, seed, params_type="random"):
model_info = AlgorithmsRegistry.registry[self._ml_task][model_type]
model_params = None
if params_type == "default":
model_params = model_info["default_params"]
model_params["seed"] = seed
else:
model_params = RandomParameters.get(model_info["params"], seed + self._seed)
if model_params is None:
return None
# set eval metric
if model_info["class"].algorithm_short_name == "Xgboost":
model_params["eval_metric"] = xgboost_eval_metric(
self._ml_task, self._eval_metric
)
if model_info["class"].algorithm_short_name == "LightGBM":
metric, custom_metric = lightgbm_eval_metric(
self._ml_task, self._eval_metric
)
model_params["metric"] = metric
model_params["custom_eval_metric_name"] = custom_metric
if model_info["class"].algorithm_short_name == "CatBoost":
model_params["eval_metric"] = catboost_eval_metric(
self._ml_task, self._eval_metric
)
elif model_info["class"].algorithm_short_name in [
"Random Forest",
"Extra Trees",
]:
model_params["eval_metric_name"] = self._eval_metric
model_params["ml_task"] = self._ml_task
required_preprocessing = model_info["required_preprocessing"]
model_additional = model_info["additional"]
preprocessing_params = PreprocessingTuner.get(
required_preprocessing, self._data_info, self._ml_task
)
model_params = {
"additional": model_additional,
"preprocessing": preprocessing_params,
"validation_strategy": self._validation_strategy,
"learner": {
"model_type": model_info["class"].algorithm_short_name,
"ml_task": self._ml_task,
"n_jobs": self._n_jobs,
**model_params,
},
"automl_random_state": self._seed,
}
if self._data_info.get("num_class") is not None:
model_params["learner"]["num_class"] = self._data_info.get("num_class")
model_params["ml_task"] = self._ml_task
model_params["explain_level"] = self._explain_level
return model_params
@staticmethod
def get_params_key(params):
key = "key_"
for main_key in ["preprocessing", "learner", "validation_strategy"]:
key += "_" + main_key
for k in sorted(params[main_key]):
if k in ["seed", "explain_level"]:
continue
key += "_{}_{}".format(k, params[main_key][k])
return key
def add_key(self, model):
if model.get_type() != "Ensemble":
key = MljarTuner.get_params_key(model.params)
self._unique_params_keys += [key]
def boost_params(self, current_models, results_path, total_time_limit):
df_models, algorithms = self.df_models_algorithms(
current_models, time_limit=0.1 * total_time_limit
)
best_model = None
for i in range(df_models.shape[0]):
if df_models["model_type"].iloc[i] in [
"Ensemble",
"Neural Network",
"Nearest Neighbors",
]:
continue
if "RandomFeature" in df_models["model"].iloc[i].get_name():
continue
best_model = df_models["model"].iloc[i]
break
if best_model is None:
return []
# load predictions
oof = best_model.get_out_of_folds()
predictions = oof[[c for c in oof.columns if c.startswith("prediction")]]
y = oof["target"]
if self._ml_task == MULTICLASS_CLASSIFICATION:
oh = OneHotEncoder(sparse=False)
y_encoded = oh.fit_transform(np.array(y).reshape(-1, 1))
residua = np.sum(
np.abs(np.array(y_encoded) - np.array(predictions)), axis=1
)
else:
residua = np.abs(np.array(y) - np.array(predictions).ravel())
df_preds = pd.DataFrame(
{"res": residua, "lp": range(residua.shape[0]), "target": np.array(y)}
)
df_preds = df_preds.sort_values(by="res", ascending=True)
df_preds["order"] = range(residua.shape[0])
df_preds["order"] = (df_preds["order"]) / residua.shape[0] / 5.0 + 0.9
df_preds = df_preds.sort_values(by="lp", ascending=True)
sample_weight_path = os.path.join(
results_path, best_model.get_name() + "_sample_weight.data"
)
dump_data(
sample_weight_path, pd.DataFrame({"sample_weight": df_preds["order"]})
)
generated_params = []
params = copy.deepcopy(best_model.params)
params["validation_strategy"]["sample_weight_path"] = sample_weight_path
params["injected_sample_weight"] = True
params["name"] += "_BoostOnErrors"
params["status"] = "initialized"
params["final_loss"] = None
params["train_time"] = None
params["data_type"] = "boost_on_error"
if "model_architecture_json" in params["learner"]:
del params["learner"]["model_architecture_json"]
if self._optuna_time_budget is not None:
params["optuna_time_budget"] = self._optuna_time_budget
params["optuna_init_params"] = self._optuna_init_params
params["optuna_verbose"] = self._optuna_verbose
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
return generated_params
| 38.310724 | 137 | 0.566548 |
794148f576b9e215c3c6963e73dffe98204b7717 | 1,258 | py | Python | configs/_base_/models/ccnet_r50-d8.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 903 | 2021-06-13T04:45:03.000Z | 2022-03-31T13:21:50.000Z | configs/_base_/models/ccnet_r50-d8.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 72 | 2021-06-13T13:01:49.000Z | 2022-03-30T09:19:34.000Z | configs/_base_/models/ccnet_r50-d8.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 159 | 2021-04-13T01:23:15.000Z | 2022-03-31T18:56:09.000Z | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='CCHead',
in_channels=2048,
in_index=3,
channels=512,
recurrence=2,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 27.955556 | 74 | 0.583466 |
794148f716c4bab5f1ad55cd869525233a9a0d14 | 2,571 | py | Python | Prova-b1 (1700414).py | ithammar/prova-b1 | b30e5a0a74d2440ed8426ab2e40b0795c3348255 | [
"Apache-2.0"
] | null | null | null | Prova-b1 (1700414).py | ithammar/prova-b1 | b30e5a0a74d2440ed8426ab2e40b0795c3348255 | [
"Apache-2.0"
] | null | null | null | Prova-b1 (1700414).py | ithammar/prova-b1 | b30e5a0a74d2440ed8426ab2e40b0795c3348255 | [
"Apache-2.0"
] | null | null | null | ip = ['192.168.1.0'
,'0.168.1.31'
,'192.168.1.32'
,'192.168.1.63'
,'-123.168.1.64'
,'-192.168.1.95'
,'192.168.1.127'
,'192.168.1.159'
,'192.168.1.192'
,'192.168.1.223'
,'008.168.1.255'
,'192.168.1.1'
,'192.168.1.33'
,'192.168.1.65'
,'192.168.1.97'
,'192.168.1.129'
,'355.168.1.161'
,'192.168.1.193'
,'192.168.1.225'
,'-798.168.1.128'
,'192.168.1.191'
,'192.168.1.224'
,'192.168.1.96']
valido = []
invalido = []
if len(ip[0][0:3])> 1:
valido = ip[0]
else:
invalido = ip[0]
if len(ip[1][0:3])> 1:
valido = ip[1]
else:
invalido = ip[1]
if len(ip[2][0:3])> 1:
valido = ip[2]
else:
invalido = ip[2]
if len(ip[3][0:3])> 1:
valido = ip[3]
else:
invalido = ip[3]
if len(ip[4][0:3])> 1:
valido = ip[4]
else:
invalido = ip[4]
if len(ip[5][0:3])> 1:
valido = ip[5]
else:
invalido = ip[5]
if len(ip[6][0:3])> 1:
valido = ip[6]
else:
invalido = ip[6]
if len(ip[7][0:3])> 1:
valido = ip[7]
else:
invalido = ip[7]
if len(ip[8][0:3])> 1:
valido = ip[8]
else:
invalido = ip[8]
if len(ip[9][0:3])> 1:
valido = ip[9]
else:
invalido = ip[9]
if len(ip[10][0:3])> 1:
valido = ip[10]
else:
invalido = ip[10]
if len(ip[11][0:3])> 1:
valido = ip[11]
else:
invalido = ip[11]
if len(ip[12][0:3])> 1:
valido = ip[12]
else:
invalido = ip[12]
if len(ip[13][0:3])> 1:
valido = ip[13]
else:
invalido = ip[13]
if len(ip[14][0:3])> 1:
valido = ip[14]
else:
invalido = ip[14]
if len(ip[15][0:3])> 1:
valido = ip[15]
else:
invalido = ip[15]
if len(ip[16][0:3])> 1:
valido = ip[16]
else:
invalido = ip[16]
if len(ip[17][0:3])> 1:
valido = ip[17]
else:
invalido = ip[17]
if len(ip[18][0:3])> 1:
valido = ip[18]
else:
invalido = ip[18]
if len(ip[19][0:3])> 1:
valido = ip[19]
else:
invalido = ip[19]
if len(ip[20][0:3])> 1:
valido = ip[20]
else:
invalido = ip[20]
if len(ip[21][0:3])> 1:
valido = ip[21]
else:
invalido = ip[21]
if len(ip[22][0:3])> 1:
valido = ip[22]
else:
invalido = ip[22]
print("Ip válido", valido)
print("Ip inválido", invalido)
| 16.914474 | 31 | 0.434461 |
79414a87b94aa3dd9554ef49c289928d6a01cf94 | 827 | py | Python | lab1/src/generators/optimization_task_generator.py | pavponn/optimization-methods | 00db08c1b28a1ffad781fb918869247a4f2ab329 | [
"MIT"
] | null | null | null | lab1/src/generators/optimization_task_generator.py | pavponn/optimization-methods | 00db08c1b28a1ffad781fb918869247a4f2ab329 | [
"MIT"
] | null | null | null | lab1/src/generators/optimization_task_generator.py | pavponn/optimization-methods | 00db08c1b28a1ffad781fb918869247a4f2ab329 | [
"MIT"
] | null | null | null | import numpy as np
from functools import partial
from lab1.src.generators.quadratic_form_generator import generate_matrix_with_condition_number
def generate_function_by_quadratic_matrix(m):
def foo(x):
return sum(m[i][j] * x[i] * x[j] for i in range(len(m)) for j in range(len(m)))
return foo
def generate_grad_by_quadratic_matrix(m):
def grad_component(i, x):
return sum(np.array(m[i]) * np.array(x) * [2 if j == i else 1 for j in range(len(m))])
def grad(x):
return np.array([grad_component(i, x) for i in range(len(m))])
return grad
def generate_optimization_task(n: int, k: float):
matrix = generate_matrix_with_condition_number(n, k)
f = generate_function_by_quadratic_matrix(matrix)
f_grad = generate_grad_by_quadratic_matrix(matrix)
return f, f_grad
| 28.517241 | 94 | 0.709794 |
79414b40b2160d6b8e22e510bd52a09a4873a083 | 1,564 | py | Python | samples/aws_lambda/aws_lambda_oauth.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | 1 | 2020-11-11T19:19:20.000Z | 2020-11-11T19:19:20.000Z | samples/aws_lambda/aws_lambda_oauth.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | null | null | null | samples/aws_lambda/aws_lambda_oauth.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | null | null | null | # ------------------------------------------------
# instead of slack_bolt in requirements.txt
import sys
sys.path.insert(1, "vendor")
# ------------------------------------------------
import logging
from slack_bolt import App
from slack_bolt.adapter.aws_lambda import SlackRequestHandler
from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow
# process_before_response must be True when running on FaaS
app = App(process_before_response=True, oauth_flow=LambdaS3OAuthFlow(),)
@app.event("app_mention")
def handle_app_mentions(payload, say, logger):
logger.info(payload)
say("What's up?")
@app.command("/hello-bolt-python-lambda")
def respond_to_slack_within_3_seconds(ack):
# This method is for synchronous communication with the Slack API server
ack("Thanks!")
SlackRequestHandler.clear_all_log_handlers()
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG)
def handler(event, context):
slack_handler = SlackRequestHandler(app=app)
return slack_handler.handle(event, context)
# # -- OAuth flow -- #
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=xoxb-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,chat:write
# AWS IAM Role: bolt_python_s3_storage
# - AmazonS3FullAccess
# - AWSLambdaBasicExecutionRole
# rm -rf latest_slack_bolt && cp -pr ../../src latest_slack_bolt
# pip install python-lambda
# lambda deploy --config-file aws_lambda_oauth_config.yaml --requirements requirements_oauth.txt
| 29.509434 | 96 | 0.7289 |
79414c4e59a44c50aec463ae1d835c67925ad598 | 17,546 | py | Python | dvc/repo/__init__.py | shizacat/dvc | 11e56dbdf921c6eacf603bcdcc2e7a46ac7a3b88 | [
"Apache-2.0"
] | null | null | null | dvc/repo/__init__.py | shizacat/dvc | 11e56dbdf921c6eacf603bcdcc2e7a46ac7a3b88 | [
"Apache-2.0"
] | null | null | null | dvc/repo/__init__.py | shizacat/dvc | 11e56dbdf921c6eacf603bcdcc2e7a46ac7a3b88 | [
"Apache-2.0"
] | null | null | null | import os
from contextlib import contextmanager
from functools import wraps
from dvc.ignore import CleanTree
from dvc.compat import fspath_py35
from funcy import cached_property, cat, first
from dvc.config import Config
from dvc.exceptions import (
FileMissingError,
IsADirectoryError,
NotDvcRepoError,
OutputNotFoundError,
)
from dvc.path_info import PathInfo
from dvc.remote.base import RemoteActionNotImplemented
from dvc.utils.fs import path_isin
from .graph import check_acyclic, get_pipeline, get_pipelines
def locked(f):
@wraps(f)
def wrapper(repo, *args, **kwargs):
with repo.lock, repo.state:
repo._reset()
ret = f(repo, *args, **kwargs)
# Our graph cache is no longer valid after we release the repo.lock
repo._reset()
return ret
return wrapper
class Repo(object):
DVC_DIR = ".dvc"
from dvc.repo.destroy import destroy
from dvc.repo.install import install
from dvc.repo.add import add
from dvc.repo.remove import remove
from dvc.repo.ls import ls
from dvc.repo.lock import lock as lock_stage
from dvc.repo.move import move
from dvc.repo.run import run
from dvc.repo.imp import imp
from dvc.repo.imp_url import imp_url
from dvc.repo.reproduce import reproduce
from dvc.repo.checkout import _checkout
from dvc.repo.push import push
from dvc.repo.fetch import _fetch
from dvc.repo.pull import pull
from dvc.repo.status import status
from dvc.repo.gc import gc
from dvc.repo.commit import commit
from dvc.repo.diff import diff
from dvc.repo.brancher import brancher
from dvc.repo.get import get
from dvc.repo.get_url import get_url
from dvc.repo.update import update
def __init__(self, root_dir=None):
from dvc.state import State
from dvc.lock import make_lock
from dvc.scm import SCM
from dvc.cache import Cache
from dvc.data_cloud import DataCloud
from dvc.repo.metrics import Metrics
from dvc.repo.params import Params
from dvc.scm.tree import WorkingTree
from dvc.repo.tag import Tag
from dvc.utils.fs import makedirs
root_dir = self.find_root(root_dir)
self.root_dir = os.path.abspath(os.path.realpath(root_dir))
self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)
self.config = Config(self.dvc_dir)
no_scm = self.config["core"].get("no_scm", False)
self.scm = SCM(self.root_dir, no_scm=no_scm)
self.tree = WorkingTree(self.root_dir)
self.tmp_dir = os.path.join(self.dvc_dir, "tmp")
makedirs(self.tmp_dir, exist_ok=True)
hardlink_lock = self.config["core"].get("hardlink_lock", False)
self.lock = make_lock(
os.path.join(self.dvc_dir, "lock"),
tmp_dir=os.path.join(self.dvc_dir, "tmp"),
hardlink_lock=hardlink_lock,
friendly=True,
)
# NOTE: storing state and link_state in the repository itself to avoid
# any possible state corruption in 'shared cache dir' scenario.
self.state = State(self)
self.cache = Cache(self)
self.cloud = DataCloud(self)
self.metrics = Metrics(self)
self.params = Params(self)
self.tag = Tag(self)
self._ignore()
@property
def tree(self):
return self._tree
@tree.setter
def tree(self, tree):
self._tree = tree if isinstance(tree, CleanTree) else CleanTree(tree)
# Our graph cache is no longer valid, as it was based on the previous
# tree.
self._reset()
def __repr__(self):
return "{}: '{}'".format(self.__class__.__name__, self.root_dir)
@classmethod
def find_root(cls, root=None):
root_dir = os.path.realpath(root or os.curdir)
if not os.path.isdir(root_dir):
raise NotDvcRepoError("directory '{}' does not exist".format(root))
while True:
dvc_dir = os.path.join(root_dir, cls.DVC_DIR)
if os.path.isdir(dvc_dir):
return root_dir
if os.path.ismount(root_dir):
break
root_dir = os.path.dirname(root_dir)
message = (
"you are not inside of a DVC repository "
"(checked up to mount point '{}')"
).format(root_dir)
raise NotDvcRepoError(message)
@classmethod
def find_dvc_dir(cls, root=None):
root_dir = cls.find_root(root)
return os.path.join(root_dir, cls.DVC_DIR)
@staticmethod
def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False):
from dvc.repo.init import init
init(root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir)
return Repo(root_dir)
def unprotect(self, target):
return self.cache.local.unprotect(PathInfo(target))
def _ignore(self):
from dvc.updater import Updater
updater = Updater(self.dvc_dir)
flist = (
[self.config.files["local"], updater.updater_file]
+ [self.lock.lockfile, updater.lock.lockfile, self.tmp_dir]
+ self.state.files
)
if path_isin(self.cache.local.cache_dir, self.root_dir):
flist += [self.cache.local.cache_dir]
self.scm.ignore_list(flist)
def check_modified_graph(self, new_stages):
"""Generate graph including the new stage to check for errors"""
# Building graph might be costly for the ones with many DVC-files,
# so we provide this undocumented hack to skip it. See [1] for
# more details. The hack can be used as:
#
# repo = Repo(...)
# repo._skip_graph_checks = True
# repo.add(...)
#
# A user should care about not duplicating outs and not adding cycles,
# otherwise DVC might have an undefined behaviour.
#
# [1] https://github.com/iterative/dvc/issues/2671
if not getattr(self, "_skip_graph_checks", False):
self._collect_graph(self.stages + new_stages)
def collect(self, target, with_deps=False, recursive=False, graph=None):
import networkx as nx
from ..dvcfile import Dvcfile
if not target:
return list(graph) if graph else self.stages
target = os.path.abspath(target)
if recursive and os.path.isdir(target):
stages = nx.dfs_postorder_nodes(graph or self.graph)
return [stage for stage in stages if path_isin(stage.path, target)]
stage = Dvcfile(self, target).load()
# Optimization: do not collect the graph for a specific target
if not with_deps:
return [stage]
pipeline = get_pipeline(get_pipelines(graph or self.graph), stage)
return list(nx.dfs_postorder_nodes(pipeline, stage))
def collect_granular(self, target, *args, **kwargs):
from ..dvcfile import Dvcfile
if not target:
return [(stage, None) for stage in self.stages]
# Optimization: do not collect the graph for a specific .dvc target
if Dvcfile.is_valid_filename(target) and not kwargs.get("with_deps"):
return [(Dvcfile(self, target).load(), None)]
try:
(out,) = self.find_outs_by_path(target, strict=False)
filter_info = PathInfo(os.path.abspath(target))
return [(out.stage, filter_info)]
except OutputNotFoundError:
stages = self.collect(target, *args, **kwargs)
return [(stage, None) for stage in stages]
def used_cache(
self,
targets=None,
all_branches=False,
with_deps=False,
all_tags=False,
all_commits=False,
remote=None,
force=False,
jobs=None,
recursive=False,
):
"""Get the stages related to the given target and collect
the `info` of its outputs.
This is useful to know what files from the cache are _in use_
(namely, a file described as an output on a stage).
The scope is, by default, the working directory, but you can use
`all_branches`/`all_tags`/`all_commits` to expand the scope.
Returns:
A dictionary with Schemes (representing output's location) mapped
to items containing the output's `dumpd` names and the output's
children (if the given output is a directory).
"""
from dvc.cache import NamedCache
cache = NamedCache()
for branch in self.brancher(
all_branches=all_branches,
all_tags=all_tags,
all_commits=all_commits,
):
targets = targets or [None]
pairs = cat(
self.collect_granular(
target, recursive=recursive, with_deps=with_deps
)
for target in targets
)
suffix = "({})".format(branch) if branch else ""
for stage, filter_info in pairs:
used_cache = stage.get_used_cache(
remote=remote,
force=force,
jobs=jobs,
filter_info=filter_info,
)
cache.update(used_cache, suffix=suffix)
return cache
def _collect_graph(self, stages=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, collect stages
in the repository.
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from pygtrie import Trie
from dvc.exceptions import (
OutputDuplicationError,
StagePathAsOutputError,
OverlappingOutputPathsError,
)
G = nx.DiGraph()
stages = stages or self.stages
stages = [stage for stage in stages if stage]
outs = Trie() # Use trie to efficiently find overlapping outs and deps
for stage in stages:
for out in stage.outs:
out_key = out.path_info.parts
# Check for dup outs
if out_key in outs:
dup_stages = [stage, outs[out_key].stage]
raise OutputDuplicationError(str(out), dup_stages)
# Check for overlapping outs
if outs.has_subtrie(out_key):
parent = out
overlapping = first(outs.values(prefix=out_key))
else:
parent = outs.shortest_prefix(out_key).value
overlapping = out
if parent and overlapping:
msg = (
"Paths for outs:\n'{}'('{}')\n'{}'('{}')\n"
"overlap. To avoid unpredictable behaviour, "
"rerun command with non overlapping outs paths."
).format(
str(parent),
parent.stage.relpath,
str(overlapping),
overlapping.stage.relpath,
)
raise OverlappingOutputPathsError(parent, overlapping, msg)
outs[out_key] = out
for stage in stages:
out = outs.shortest_prefix(PathInfo(stage.path).parts).value
if out:
raise StagePathAsOutputError(stage, str(out))
# Building graph
G.add_nodes_from(stages)
for stage in stages:
for dep in stage.deps:
if dep.path_info is None:
continue
dep_key = dep.path_info.parts
overlapping = list(n.value for n in outs.prefixes(dep_key))
if outs.has_subtrie(dep_key):
overlapping.extend(outs.values(prefix=dep_key))
G.add_edges_from((stage, out.stage) for out in overlapping)
check_acyclic(G)
return G
@cached_property
def graph(self):
return self._collect_graph()
@cached_property
def pipelines(self):
return get_pipelines(self.graph)
@cached_property
def stages(self):
"""
Walks down the root directory looking for Dvcfiles,
skipping the directories that are related with
any SCM (e.g. `.git`), DVC itself (`.dvc`), or directories
tracked by DVC (e.g. `dvc add data` would skip `data/`)
NOTE: For large repos, this could be an expensive
operation. Consider using some memoization.
"""
from ..dvcfile import Dvcfile
stages = []
outs = set()
for root, dirs, files in self.tree.walk(self.root_dir):
for fname in files:
path = os.path.join(root, fname)
if not Dvcfile.is_valid_filename(path):
continue
stage = Dvcfile(self, path).load()
stages.append(stage)
for out in stage.outs:
if out.scheme == "local":
outs.add(out.fspath)
dirs[:] = [d for d in dirs if os.path.join(root, d) not in outs]
return stages
def find_outs_by_path(self, path, outs=None, recursive=False, strict=True):
if not outs:
outs = [out for stage in self.stages for out in stage.outs]
abs_path = os.path.abspath(path)
path_info = PathInfo(abs_path)
match = path_info.__eq__ if strict else path_info.isin_or_eq
def func(out):
if out.scheme == "local" and match(out.path_info):
return True
if recursive and out.path_info.isin(path_info):
return True
return False
matched = list(filter(func, outs))
if not matched:
raise OutputNotFoundError(path, self)
return matched
def find_out_by_relpath(self, relpath):
path = os.path.join(self.root_dir, relpath)
(out,) = self.find_outs_by_path(path)
return out
def is_dvc_internal(self, path):
path_parts = os.path.normpath(path).split(os.path.sep)
return self.DVC_DIR in path_parts
@contextmanager
def open_by_relpath(self, path, remote=None, mode="r", encoding=None):
"""Opens a specified resource as a file descriptor"""
cause = None
try:
out = self.find_out_by_relpath(path)
except OutputNotFoundError as exc:
out = None
cause = exc
if out and out.use_cache:
try:
with self._open_cached(out, remote, mode, encoding) as fd:
yield fd
return
except FileNotFoundError as exc:
raise FileMissingError(path) from exc
abs_path = os.path.join(self.root_dir, path)
if os.path.exists(abs_path):
with open(abs_path, mode=mode, encoding=encoding) as fd:
yield fd
return
raise FileMissingError(path) from cause
def _open_cached(self, out, remote=None, mode="r", encoding=None):
if out.isdir():
raise IsADirectoryError("Can't open a dir")
cache_file = self.cache.local.checksum_to_path_info(out.checksum)
cache_file = fspath_py35(cache_file)
if os.path.exists(cache_file):
return open(cache_file, mode=mode, encoding=encoding)
try:
remote_obj = self.cloud.get_remote(remote)
remote_info = remote_obj.checksum_to_path_info(out.checksum)
return remote_obj.open(remote_info, mode=mode, encoding=encoding)
except RemoteActionNotImplemented:
with self.state:
cache_info = out.get_used_cache(remote=remote)
self.cloud.pull(cache_info, remote=remote)
return open(cache_file, mode=mode, encoding=encoding)
def close(self):
self.scm.close()
@locked
def checkout(self, *args, **kwargs):
return self._checkout(*args, **kwargs)
@locked
def fetch(self, *args, **kwargs):
return self._fetch(*args, **kwargs)
def _reset(self):
self.__dict__.pop("graph", None)
self.__dict__.pop("stages", None)
self.__dict__.pop("pipelines", None)
self.__dict__.pop("dvcignore", None)
| 33.043315 | 79 | 0.588567 |
79414c9971f15f961f615dee33bc4718f3428a07 | 1,997 | py | Python | core_dev/_tkinter/configs.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null | core_dev/_tkinter/configs.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null | core_dev/_tkinter/configs.py | alexzanderr/_core-dev | 831f69dad524e450c4243b1dd88f26de80e1d444 | [
"MIT"
] | null | null | null |
"""
core/gui/configs.py
configurations 4 tkinter objects
helpful in dev of guis
author: @alexzander
"""
# python
from tkinter import *
# consolas
consolas_10_bold = ('Consolas', 10, 'bold')
consolas_20_bold = ('Consolas', 20, 'bold')
consolas_30_bold = ('Consolas', 30, 'bold')
consolas_40_bold = ('Consolas', 40, 'bold')
consolas_50_bold = ('Consolas', 50, 'bold')
consolas_60_bold = ('Consolas', 60, 'bold')
# cascadia code
cascadia_code_10 = ('Cascadia Code', 10)
cascadia_code_10_bold = ('Cascadia Code', 10, "bold")
cascadia_code_20 = ('Cascadia Code', 20)
cascadia_code_20_bold = ('Cascadia Code', 20, 'bold')
cascadia_code_30 = ('Cascadia Code', 30)
cascadia_code_30_bold = ('Cascadia Code', 30, 'bold')
cascadia_code_40 = ('Cascadia Code', 40)
cascadia_code_40_bold = ('Cascadia Code', 40, 'bold')
cascadia_code_50 = ('Cascadia Code', 50)
cascadia_code_50_bold = ('Cascadia Code', 50, 'bold')
# abadi
abadi_45_bold = ("Abadi", 45, 'bold')
# colors
red = "red"
white = "white"
yellow = "yellow"
blue = "blue"
black = "black"
lightgreen = 'lightgreen'
gray32 = "gray32"
light_golden_rod = "light goldenrod"
gold="gold"
state_blue = "state blue"
indian_red = "indian red"
green = "green"
green2 = "green2"
thistle2 = "thistle2"
gray63 = "gray63"
seashell2 = "seashell2"
gray10 = "gray10"
tomato2 = "tomato2"
pale_green = "pale green"
gold2 = "gold2"
spring_green = "spring green"
green_yellow = "green yellow"
forest_green = "forest green"
brown4 = "brown4"
dark_green = "dark green"
gold3 = "gold3"
DodgerBlue2 = "DodgerBlue2"
gray57 = "gray57"
def config_gui_obj(gui_obj, font: tuple, back=seashell2, fore=gray10,
text="", width=None, height=None):
if height:
gui_obj.config(height=height)
if width:
gui_obj.config(width=width)
if text:
gui_obj.config(width=width)
gui_obj.config(font=font, back=back, fore=fore) | 23.494118 | 70 | 0.656485 |
79414ca844b67bc8e27acaa1ab67c650bcc07143 | 2,209 | py | Python | CPD.py | roosevelt/bayes | e85ef972620124e40c8198dbb979af96a4417568 | [
"MIT"
] | null | null | null | CPD.py | roosevelt/bayes | e85ef972620124e40c8198dbb979af96a4417568 | [
"MIT"
] | null | null | null | CPD.py | roosevelt/bayes | e85ef972620124e40c8198dbb979af96a4417568 | [
"MIT"
] | null | null | null | # A conditional probability distribution table to be associated to a node
from Table import Table
class CPD():
# variables is a list of variables of type Variable
# parameters is a list of
def __init__(self, variable, condition_variables=[]):
if len(condition_variables)>0:
condition_variables.sort(key = lambda x: x.name)
self.variable = variable
self.condition_variables = condition_variables
self.table = Table([self.variable]+self.condition_variables)
self.__variable_dict = self.__buildVariableDict(self.variable, self.condition_variables)
def __str__(self):
# Prints this CPD to the stdout
n_line = 20
result = ""
result += ("="*n_line) + " Conditional Probability Table of " + self.variable.name + " (" + str(len(self.variable.values))+ " Values) " + "="*n_line + "\n"
condition_arr = []
for condition_var in self.condition_variables:
condition_arr.append(condition_var.name + " (" + str(len(condition_var.values)) + " Values)")
result += "Possible Values: " + ", ".join(self.variable.values) + "\n"
if len(self.condition_variables)>0:
result += "Parents: " + ", ".join(condition_arr) + "\n"
else:
result += "Parents: None (no parents)\n"
result += "Lines: " + str(len(self.table.data)) +" entries\n\n"
self.table.setFirstVariableToPrint(self.variable)
result += self.table.__str__()
return result
# Set the parameters in the CPD
def setParameters(self, parameters):
# parameters is a list of parameters in the order that it appears on the table
self.table.setParameters(parameters)
def getParameters(self):
return self.table.getParameters()
def getVariableByName(self, name):
return self.__variable_dict[name]
def __buildVariableDict(self, variable, conditional_variable):
variables = [variable] + conditional_variable
variable_dict = {}
for variable in variables:
variable_dict[variable.name] = variable
return variable_dict | 42.480769 | 164 | 0.629697 |
79414d533a95079ded3138fcace04a738721313e | 28,451 | py | Python | retro_star/packages/rdchiral/rdchiral/backup/main.py | wangxr0526/retro_star | 4ac696278e565a46a9839a0aef65526fe0a705b5 | [
"MIT"
] | 65 | 2020-06-27T04:28:21.000Z | 2022-03-30T11:18:22.000Z | retro_star/packages/rdchiral/rdchiral/backup/main.py | gmh14/data_efficient_grammar | 7a25cecddc9510deeac7fd816715fd7b69103d85 | [
"MIT"
] | 15 | 2020-07-07T13:17:05.000Z | 2022-03-22T12:52:29.000Z | retro_star/packages/rdchiral/rdchiral/backup/main.py | wangxr0526/retro_star | 4ac696278e565a46a9839a0aef65526fe0a705b5 | [
"MIT"
] | 14 | 2020-06-30T09:22:13.000Z | 2022-03-30T11:18:28.000Z | from __future__ import print_function
import sys
import os
import re
import copy
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
from rdkit.Chem.rdchem import ChiralType, BondType, BondDir
from rdchiral.utils import vprint, PLEVEL, atoms_are_different
from rdchiral.initialization import rdchiralReaction, rdchiralReactants
from rdchiral.chiral import template_atom_could_have_been_tetra, copy_chirality,\
atom_chirality_matches
from rdchiral.clean import canonicalize_outcome_smiles, combine_enantiomers_into_racemic
from rdchiral.bonds import BondDirOpposite, restore_bond_stereo_to_sp2_atom
'''
This file contains the main functions for running reactions.
An incomplete description of expected behavior is as follows:
(1) RDKit's native RunReactants is called on an achiral version of the molecule,
which has had all tetrahedral centers and bond directions stripped.
(2) For each outcome, we examine the correspondence between atoms in the
reactants and atoms in the reactant template for reasons to exclude the
current outcome. The way we do so is through the react_atom_idx property in
the generated products. This is one of the
few properties always copied over to the products here:
https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/ChemReactions/ReactionRunner.cpp
A previous version of this code did so through the Isotope label of each atom,
before the react_atom_idx was added to the ReactionRunner.cpp code.
The following conditions are checked:
TETRAHEDRAL ATOMS
(a) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template atom is NOT chiral but is defined in a way
that it could have been specified, reject this outcome
(b) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template atom is NOT chiral and is not defined in
a way where it could have been (i.e., is generalized without spec.
neighbors), then keep the match.
(c) If a reactant atom is achiral but the reactant tempalte atom is chiral,
the match is still allowed to happen. We might want to change this later
or let it be an option.
(d) If a reactant atom is a tetrahedral center with specified chirality
and the reactant template also has its chirality specified, let the
match happen if the chirality matches.
DOUBLE BONDS
(a) If a reactant double bond is defined with directionality specified and
the reactant template is unspecified but COULD have been (i.e.,
neighbors of sp2 carbons are specified), reject this outcome
(b) If a reactant double bond is defined with directionality specified and
the reactant template si unspecified but could NOT have been (in the
case of generalization), allow the match to occur. This is what we
default to when half the bond is specified, like in "C=C/O"
note: reactants are checked for implicit bond stereo based on rings
(c) If a reactant double bond has implicit cis due to ring membership, it is
still allowed to match an unspecified template double bond. Might lead
to some weird edge cases, but mostly makes sense.
(3) For each outcome, merge all products into a single molecule. During this
process, we check for bonds that are missing in the product. These are those
that were present in the reactants but were NOT matched in the reactant
template.
(4) For each outcome, examine product atoms to correct tetrahedral chirality.
(5) For each outcome, examine product double bonds to correct cis/trans-ness
'''
def rdchiralRunText(reaction_smarts, reactant_smiles, **kwargs):
'''Run from SMARTS string and SMILES string. This is NOT recommended
for library application, since initialization is pretty slow. You should
separately initialize the template and molecules and call run()'''
rxn = rdchiralReaction(reaction_smarts)
reactants = rdchiralReactants(reactant_smiles)
return rdchiralRun(rxn, reactants, **kwargs)
def rdchiralRun(rxn, reactants, keep_mapnums=False, combine_enantiomers=True, return_mapped=False):
'''
rxn = rdchiralReaction (rdkit reaction + auxilliary information)
reactants = rdchiralReactants (rdkit mol + auxilliary information)
note: there is a fair amount of initialization (assigning stereochem), most
importantly assigning atom map numbers to the reactant atoms. It is
HIGHLY recommended to use the custom classes for initialization.
'''
assert not return_mapped
# New: reset atom map numbers for templates in case they have been overwritten
# by previous uses of this template!
rxn.reset()
###############################################################################
# Run naive RDKit on ACHIRAL version of molecules
outcomes = rxn.rxn.RunReactants((reactants.reactants_achiral,))
if PLEVEL >= (1): print('Using naive RunReactants, {} outcomes'.format(len(outcomes)))
if not outcomes:
return []
###############################################################################
###############################################################################
# Initialize, now that there is at least one outcome
final_outcomes = set()
mapped_outcomes = {}
# We need to keep track of what map numbers correspond to which atoms
# note: all reactant atoms must be mapped, so this is safe
atoms_r = reactants.atoms_r
# Copy reaction template so we can play around with map numbers
template_r, template_p = rxn.template_r, rxn.template_p
# Get molAtomMapNum->atom dictionary for tempalte reactants and products
atoms_rt_map = rxn.atoms_rt_map
# TODO: cannot change atom map numbers in atoms_rt permanently?
atoms_pt_map = rxn.atoms_pt_map
###############################################################################
for outcome in outcomes:
###############################################################################
# Look for new atoms in products that were not in
# reactants (e.g., LGs for a retro reaction)
if PLEVEL >= (2): print('Processing {}'.format(str([Chem.MolToSmiles(x, True) for x in outcome])))
unmapped = 900
for m in outcome:
for a in m.GetAtoms():
# Assign map number to outcome based on react_atom_idx
if a.HasProp('react_atom_idx'):
a.SetAtomMapNum(reactants.idx_to_mapnum(int(a.GetProp('react_atom_idx'))))
if not a.GetAtomMapNum():
a.SetAtomMapNum(unmapped)
unmapped += 1
if PLEVEL >= 2: print('Added {} map numbers to product'.format(unmapped-900))
###############################################################################
###############################################################################
# Check to see if reactants should not have been matched (based on chirality)
# Define map num -> reactant template atom map
atoms_rt = {a.GetAtomMapNum(): atoms_rt_map[a.GetIntProp('old_mapno')] \
for m in outcome for a in m.GetAtoms() if a.HasProp('old_mapno')}
# Set map numbers of reactant template to be consistent with reactant/product molecules
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the atommapnum will get overwritten every time
[a.SetAtomMapNum(i) for (i, a) in atoms_rt.items()]
# Make sure each atom matches
# note: this is a little weird because atom_chirality_matches takes three values,
# -1 (both tetra but opposite), 0 (not a match), and +1 (both tetra and match)
# and we only want to continue if they all equal -1 or all equal +1
prev = None
skip_outcome = False
for match in (atom_chirality_matches(atoms_rt[i], atoms_r[i]) for i in atoms_rt):
if match == 0:
if PLEVEL >= 2: print('Chirality violated! Should not have gotten this match')
skip_outcome = True
break
elif match == 2: # ambiguous case
continue
elif prev is None:
prev = match
elif match != prev:
if PLEVEL >= 2: print('Part of the template matched reactant chirality, part is inverted! Should not match')
skip_outcome = True
break
if skip_outcome:
continue
if PLEVEL >= 2: print('Chirality matches! Just checked with atom_chirality_matches')
# Check bond chirality - iterate through reactant double bonds where
# chirality is specified (or not). atoms defined by map number
skip_outcome = False
for atoms, dirs, is_implicit in reactants.atoms_across_double_bonds:
if all(i in atoms_rt for i in atoms):
# All atoms definining chirality were matched to the reactant template
# So, check if it is consistent with how the template is defined
#...but /=/ should match \=\ since they are both trans...
matched_atom_map_nums = tuple(atoms_rt[i].GetAtomMapNum() for i in atoms)
# Convert atoms_rt to original template's atom map numbers:
matched_atom_map_nums = tuple(rxn.atoms_rt_idx_to_map[atoms_rt[i].GetIdx()] for i in atoms)
if matched_atom_map_nums not in rxn.required_rt_bond_defs:
continue # this can happen in ring openings, for example
dirs_template = rxn.required_rt_bond_defs[matched_atom_map_nums]
if dirs != dirs_template and \
(BondDirOpposite[dirs[0]], BondDirOpposite[dirs[1]]) != dirs_template and \
not (dirs_template == (BondDir.NONE, BondDir.NONE) and is_implicit):
if PLEVEL >= 5: print('Reactant bond chirality does not match template!')
if PLEVEL >= 5: print('Based on map numbers...')
if PLEVEL >= 5: print(' rct: {} -> {}'.format(matched_atom_map_nums, dirs))
if PLEVEL >= 5: print(' tmp: {} -> {}'.format(matched_atom_map_nums, dirs_template))
if PLEVEL >= 5: print('skipping this outcome, should not have matched...')
skip_outcome = True
break
if skip_outcome:
continue
###############################################################################
###############################################################################
# Convert product(s) to single product so that all
# reactions can be treated as pseudo-intramolecular
# But! check for ring openings mistakenly split into multiple
# This can be diagnosed by duplicate map numbers (i.e., SMILES)
mapnums = [a.GetAtomMapNum() for m in outcome for a in m.GetAtoms() if a.GetAtomMapNum()]
if len(mapnums) != len(set(mapnums)): # duplicate?
if PLEVEL >= 1: print('Found duplicate mapnums in product - need to stitch')
# need to do a fancy merge
merged_mol = Chem.RWMol(outcome[0])
merged_map_to_id = {a.GetAtomMapNum(): a.GetIdx() for a in outcome[0].GetAtoms() if a.GetAtomMapNum()}
for j in range(1, len(outcome)):
new_mol = outcome[j]
for a in new_mol.GetAtoms():
if a.GetAtomMapNum() not in merged_map_to_id:
merged_map_to_id[a.GetAtomMapNum()] = merged_mol.AddAtom(a)
for b in new_mol.GetBonds():
bi = b.GetBeginAtom().GetAtomMapNum()
bj = b.GetEndAtom().GetAtomMapNum()
if PLEVEL >= 10: print('stitching bond between {} and {} in stich has chirality {}, {}'.format(
bi, bj, b.GetStereo(), b.GetBondDir()
))
if not merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]):
merged_mol.AddBond(merged_map_to_id[bi],
merged_map_to_id[bj], b.GetBondType())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetStereo(b.GetStereo())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetBondDir(b.GetBondDir())
outcome = merged_mol.GetMol()
if PLEVEL >= 1: print('Merged editable mol, converted back to real mol, {}'.format(Chem.MolToSmiles(outcome, True)))
else:
new_outcome = outcome[0]
for j in range(1, len(outcome)):
new_outcome = AllChem.CombineMols(new_outcome, outcome[j])
outcome = new_outcome
if PLEVEL >= 2: print('Converted all outcomes to single molecules')
###############################################################################
###############################################################################
# Figure out which atoms were matched in the templates
# atoms_rt and atoms_p will be outcome-specific.
atoms_pt = {a.GetAtomMapNum(): atoms_pt_map[a.GetIntProp('old_mapno')] \
for a in outcome.GetAtoms() if a.HasProp('old_mapno')}
atoms_p = {a.GetAtomMapNum(): a for a in outcome.GetAtoms() if a.GetAtomMapNum()}
# Set map numbers of product template
# note: this is okay to do within the loop, because ALL atoms must be matched
# in the templates, so the map numbers will get overwritten every time
# This makes it easier to check parity changes
[a.SetAtomMapNum(i) for (i, a) in atoms_pt.items()]
###############################################################################
###############################################################################
# Check for missing bonds. These are bonds that are present in the reactants,
# not specified in the reactant template, and not in the product. Accidental
# fragmentation can occur for intramolecular ring openings
missing_bonds = []
for (i, j, b) in reactants.bonds_by_mapnum:
if i in atoms_p and j in atoms_p:
# atoms from reactant bond show up in product
if not outcome.GetBondBetweenAtoms(atoms_p[i].GetIdx(), atoms_p[j].GetIdx()):
#...but there is not a bond in the product between those atoms
if i not in atoms_rt or j not in atoms_rt or not template_r.GetBondBetweenAtoms(atoms_rt[i].GetIdx(), atoms_rt[j].GetIdx()):
# the reactant template did not specify a bond between those atoms (e.g., intentionally destroy)
missing_bonds.append((i, j, b))
if missing_bonds:
if PLEVEL >= 1: print('Product is missing non-reacted bonds that were present in reactants!')
outcome = Chem.RWMol(outcome)
rwmol_map_to_id = {a.GetAtomMapNum(): a.GetIdx() for a in outcome.GetAtoms() if a.GetAtomMapNum()}
for (i, j, b) in missing_bonds:
outcome.AddBond(rwmol_map_to_id[i], rwmol_map_to_id[j])
new_b = outcome.GetBondBetweenAtoms(rwmol_map_to_id[i], rwmol_map_to_id[j])
new_b.SetBondType(b.GetBondType())
new_b.SetBondDir(b.GetBondDir())
new_b.SetIsAromatic(b.GetIsAromatic())
outcome = outcome.GetMol()
atoms_p = {a.GetAtomMapNum(): a for a in outcome.GetAtoms() if a.GetAtomMapNum()}
else:
if PLEVEL >= 3: print('No missing bonds')
###############################################################################
# Now that we've fixed any bonds, connectivity is set. This is a good time
# to udpate the property cache, since all that is left is fixing atom/bond
# stereochemistry.
try:
Chem.SanitizeMol(outcome)
outcome.UpdatePropertyCache()
except ValueError as e:
if PLEVEL >= 1: print('{}, {}'.format(Chem.MolToSmiles(outcome, True), e))
continue
###############################################################################
# Correct tetra chirality in the outcome
tetra_copied_from_reactants = []
for a in outcome.GetAtoms():
# Participants in reaction core (from reactants) will have old_mapno
# Spectators present in reactants will have react_atom_idx
# ...so new atoms will have neither!
if not a.HasProp('old_mapno'):
# Not part of the reactants template
if not a.HasProp('react_atom_idx'):
# Atoms only appear in product template - their chirality
# should be properly instantiated by RDKit...hopefully...
if PLEVEL >= 4: print('Atom {} created by product template, should have right chirality'.format(a.GetAtomMapNum()))
else:
if PLEVEL >= 4: print('Atom {} outside of template, copy chirality from reactants'.format(a.GetAtomMapNum()))
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if a.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:
tetra_copied_from_reactants.append(a)
else:
# Part of reactants and reaction core
if template_atom_could_have_been_tetra(atoms_rt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} was in rct template (could have been tetra)'.format(a.GetAtomMapNum()))
if template_atom_could_have_been_tetra(atoms_pt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} in product template could have been tetra, too'.format(a.GetAtomMapNum()))
# Was the product template specified?
if atoms_pt[a.GetAtomMapNum()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, leave unspecified in product
if PLEVEL >= 3: print('...but it is not specified in product, so destroy chirality')
a.SetChiralTag(ChiralType.CHI_UNSPECIFIED)
else:
# Yes
if PLEVEL >= 3: print('...and product is specified')
# Was the reactant template specified?
if atoms_rt[a.GetAtomMapNum()].GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
# No, so the reaction introduced chirality
if PLEVEL >= 3: print('...but reactant template was not, so copy from product template')
copy_chirality(atoms_pt[a.GetAtomMapNum()], a)
else:
# Yes, so we need to check if chirality should be preserved or inverted
if PLEVEL >= 3: print('...and reactant template was, too! copy from reactants')
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if atom_chirality_matches(atoms_pt[a.GetAtomMapNum()], atoms_rt[a.GetAtomMapNum()]) == -1:
if PLEVEL >= 3: print('but! reactant template and product template have opposite stereochem, so invert')
a.InvertChirality()
else:
# Reactant template chiral, product template not - the
# reaction is supposed to destroy chirality, so leave
# unspecified
if PLEVEL >= 3: print('If reactant template could have been ' +
'chiral, but the product template could not, then we dont need ' +
'to worry about specifying product atom chirality')
else:
if PLEVEL >= 3: print('Atom {} could not have been chiral in reactant template'.format(a.GetAtomMapNum()))
if not template_atom_could_have_been_tetra(atoms_pt[a.GetAtomMapNum()]):
if PLEVEL >= 3: print('Atom {} also could not have been chiral in product template', a.GetAtomMapNum())
if PLEVEL >= 3: print('...so, copy chirality from reactant instead')
copy_chirality(atoms_r[a.GetAtomMapNum()], a)
if a.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:
tetra_copied_from_reactants.append(a)
else:
if PLEVEL >= 3: print('Atom could/does have product template chirality!'.format(a.GetAtomMapNum()))
if PLEVEL >= 3: print('...so, copy chirality from product template')
copy_chirality(atoms_pt[a.GetAtomMapNum()], a)
if PLEVEL >= 3: print('New chiral tag {}'.format(a.GetChiralTag()))
if skip_outcome:
if PLEVEL >= 2: print('Skipping this outcome - chirality broken?')
continue
if PLEVEL >= 2: print('After attempting to re-introduce chirality, outcome = {}'.format(Chem.MolToSmiles(outcome, True)))
###############################################################################
###############################################################################
# Correct bond directionality in the outcome
for b in outcome.GetBonds():
if b.GetBondType() != BondType.DOUBLE:
continue
# Ring double bonds do not need to be touched(?)
if b.IsInRing():
continue
ba = b.GetBeginAtom()
bb = b.GetEndAtom()
# Is it possible at all to specify this bond?
if ba.GetDegree() == 1 or bb.GetDegree() == 1:
continue
if PLEVEL >= 5: print('Looking at outcome bond {}={}'.format(ba.GetAtomMapNum(), bb.GetAtomMapNum()))
if ba.HasProp('old_mapno') and bb.HasProp('old_mapno'):
# Need to rely on templates for bond chirality, both atoms were
# in the reactant template
if PLEVEL >= 5: print('Both atoms in this double bond were in the reactant template')
if (ba.GetIntProp('old_mapno'), bb.GetIntProp('old_mapno')) in \
rxn.required_bond_defs_coreatoms:
if PLEVEL >= 5: print('and reactant template *could* have specified the chirality!')
if PLEVEL >= 5: print('..product should be property instantiated')
continue
if PLEVEL >= 5: print('But it was impossible to have specified chirality (e.g., aux C=C for context)')
elif not ba.HasProp('react_atom_idx') and not bb.HasProp('react_atom_idx'):
# The atoms were both created by the product template, so any bond
# stereochemistry should have been instantiated by the product template
# already...hopefully...otherwise it isn't specific enough?
continue
# Need to copy from reactants, this double bond was simply carried over,
# *although* one of the atoms could have reacted and been an auxilliary
# atom in the reaction, e.g., C/C=C(/CO)>>C/C=C(/C[Br])
if PLEVEL >= 5: print('Restoring cis/trans character of bond {}={} from reactants'.format(
ba.GetAtomMapNum(), bb.GetAtomMapNum()))
# Start with setting the BeginAtom
begin_atom_specified = restore_bond_stereo_to_sp2_atom(ba, reactants.bond_dirs_by_mapnum)
if not begin_atom_specified:
# don't bother setting other side of bond, since we won't be able to
# fully specify this bond as cis/trans
continue
# Look at other side of the bond now, the EndAtom
end_atom_specified = restore_bond_stereo_to_sp2_atom(bb, reactants.bond_dirs_by_mapnum)
if not end_atom_specified:
# note: this can happen if C=C/C-N turns into C=C/C=N
if PLEVEL >= 1:
print(reactants.bond_dirs_by_mapnum)
print(ba.GetAtomMapNum())
print(bb.GetAtomMapNum())
print(Chem.MolToSmiles(reactants.reactants, True))
print(Chem.MolToSmiles(outcome, True))
print('Uh oh, looks like bond direction is only specified for half of this bond?')
###############################################################################
#Keep track of the reacting atoms for later use in grouping
# atoms_diff = {x:atoms_are_different(atoms_r[x],atoms_p[x]) for x in atoms_rt}
#make tuple of changed atoms
# atoms_changed = tuple([x for x in atoms_diff.keys() if atoms_diff[x] == True])
mapped_outcome = Chem.MolToSmiles(outcome, True)
if not keep_mapnums:
for a in outcome.GetAtoms():
a.SetAtomMapNum(0)
# Now, check to see if we have destroyed chirality
# this occurs when chirality was not actually possible (e.g., due to
# symmetry) but we had assigned a tetrahedral center originating
# from the reactants.
# ex: SMILES C(=O)1C[C@H](Cl)CCC1
# SMARTS [C:1]-[C;H0;D3;+0:2](-[C:3])=[O;H0;D1;+0]>>[C:1]-[CH2;D2;+0:2]-[C:3]
skip_outcome = False
if len(tetra_copied_from_reactants) > 0:
Chem.AssignStereochemistry(outcome, cleanIt=True, force=True)
for a in tetra_copied_from_reactants:
if a.GetChiralTag() == ChiralType.CHI_UNSPECIFIED:
if PLEVEL >= 2: print('Auxiliary reactant atom was chiral, now is broken -> skip outcome')
skip_outcome = True
break
if skip_outcome:
continue
smiles = Chem.MolToSmiles(outcome, True)
smiles_new = canonicalize_outcome_smiles(smiles)
if smiles_new is None:
continue
final_outcomes.add(smiles_new)
# mapped_outcomes[smiles_new] = (mapped_outcome, atoms_changed)
###############################################################################
# One last fix for consolidating multiple stereospecified products...
if combine_enantiomers:
final_outcomes = combine_enantiomers_into_racemic(final_outcomes)
###############################################################################
if return_mapped:
return list(final_outcomes), mapped_outcomes
else:
return list(final_outcomes)
if __name__ == '__main__':
# Directly use SMILES/SMARTS
reaction_smiles = 'O=[C:1]1[CH2:2][CH2:3][CH:4]([NH:5][C:6]([O:7][C:8]([CH3:9])([CH3:10])[CH3:11])=[O:12])[CH2:13][CH2:14]1.[CH2:15]1[CH2:16][O:17][CH2:18][CH2:19][NH:20]1>>[C@@H:1]1([N:20]2[CH2:15][CH2:16][O:17][CH2:18][CH2:19]2)[CH2:2][CH2:3][C@H:4]([NH:5][C:6]([O:7][C:8]([CH3:9])([CH3:10])[CH3:11])=[O:12])[CH2:13][CH2:14]1'
retro_smarts = '([C@H;+0:1].[C@H;+0:2]-[N;H0;+0:3])>>O=[C;H0;+0:2].[CH;+0:1].[NH;+0:3]'
product = '[C@@H:1]1([N:20]2[CH2:15][CH2:16][O:17][CH2:18][CH2:19]2)[CH2:2][CH2:3][C@H:4]([NH:5][C:6]([O:7][C:8]([CH3:9])([CH3:10])[CH3:11])=[O:12])[CH2:13][CH2:14]1'
outcomes = rdchiralRunText(retro_smarts, product)
print(outcomes)
# Get list of atoms that changed as well
outcomes, mapped_outcomes = rdchiralRun(rxn, reactants, return_mapped=True)
print(outcomes, mapped_outcomes)
| 53.88447 | 332 | 0.570876 |
79414e44f59b0a269e7d908655dfd6f531b1f763 | 43,860 | py | Python | pystarboundmap/data.py | apocalyptech/pystarboundmap | 2f96cd88c67a1896c85ea20fbeb9b7027c05dacc | [
"BSD-3-Clause"
] | 6 | 2019-01-21T15:54:37.000Z | 2021-03-01T04:28:23.000Z | pystarboundmap/data.py | apocalyptech/pystarboundmap | 2f96cd88c67a1896c85ea20fbeb9b7027c05dacc | [
"BSD-3-Clause"
] | 4 | 2019-04-09T01:05:37.000Z | 2021-06-24T23:29:42.000Z | pystarboundmap/data.py | apocalyptech/pystarboundmap | 2f96cd88c67a1896c85ea20fbeb9b7027c05dacc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# vim: set expandtab tabstop=4 shiftwidth=4:
#
# Python Starbound Mapper (pystarboundmap)
# Copyright (C) 2018 CJ Kucera
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the development team nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CJ KUCERA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import io
import re
import json
import mmap
import struct
import starbound
from PIL import Image
from PyQt5 import QtGui
def read_config(config_data):
"""
Attempts to parse a starbound .config file. These are very nearly JSON,
but include comments prefixed by //, which isn't allowed in JSON, so
the JSON parser fails. https://pypi.org/project/json5/ might be able
to parse these, actually, but as its README mentions, it is SUPER slow.
Way slower than even the gigantic list of comment special cases below.
"""
out_lines = []
df = io.StringIO(config_data.decode('utf-8'))
odf = io.StringIO()
in_comment = False
for line in df.readlines():
# ack, multiline comments in /objects/generic/statuspod/statuspod.object
# also in /objects/ancient/hologramgalaxy/hologramgalaxy.object, and
# unfortunately that one necessitates some stripping (though stripping
# is no more CPU-intensive than hardcoding the few instances)
if line.lstrip()[:2] == '/*':
if line.rstrip()[-2:] != '*/':
in_comment = True
else:
if in_comment:
if line.lstrip()[:2] == '*/':
in_comment = False
else:
idx = line.find('//')
if idx == -1:
print(line, file=odf)
else:
print(line[0:idx], file=odf)
# This list of patterns allows us to load all the data we care about
# (that I'm aware of anyway) but I've moved to just stripping out
# anything after // automatically. That shaves about a second off of
# our startup time. Doubtless there are image processing speedups
# which would probably account for the majority of the loadtime)
#elif line[:3] != '// ':
# found_pattern = False
# for pattern in [
# ' // ',
# # special case for /objects/biome/foundry/lavatanklarge/lavatanklarge.object
# '//FIRE',
# # special cases for /objects/biome/tentacle/tentaclespawner1/tentaclespawner1.object
# '//type',
# '//additional',
# '//relative',
# '//[x,y] size',
# '//total',
# # special case for /objects/avian/sawblade/sawblade.object
# '//mollys',
# # special case for /objects/avian/birdgroundlantern/birdgroundlantern.object
# '//"interactive"',
# # special cases for /objects/outpost/signstore/signdispenser.object
# '//"openSounds"',
# '//"closeSounds"',
# # special case for /objects/glitch/medievalspikes/medievalspikes.object
# '//TODO',
# # special case for /objects/themed/island/islandhammock/islandhammock.object
# '//"sitCoverImage"',
# # special case for /objects/protectorate/objects/protectoratewindbanner3/protectoratewindbanner3.object
# '//"soundEffect"',
# # special cases for /objects/protectorate/objects/protectoratelobbyvending/protectoratelobbyvending.object
# '//"onSound"',
# '//"offSound"',
# # special case for /objects/spawner/spawners/spawner_human.object
# '//6000,',
# # special cases for /objects/spawner/colonydeed/colonydeed.object
# '//whether',
# '//delay',
# '//cooldown',
# '//scan',
# '//length',
# '//seconds',
# # special cases for /objects/spawner/invisiblemonsterspawner.object
# '//level',
# '//options',
# '//only',
# # special case for /objects/crafting/upgradeablecraftingobjects/craftingwheel/craftingwheel.object
# '//this',
# ]:
# idx = line.find(pattern)
# if idx != -1:
# found_pattern = True
# break
# if found_pattern:
# print(line[0:idx], file=odf)
# else:
# print(line, file=odf)
odf.seek(0)
return json.load(odf)
class Material(object):
"""
Holds info about a material. Right now we're ignoring all the
fancy rendering options and pretending that everything is the
very first (top left) tile, and we're not drawing edges or the
like.
"""
def __init__(self, info, path, full_path, pakdata, crop_parameters):
self.info = info
self.name = info['materialName']
self.path = path
self.full_path = full_path
self.pakdata = pakdata
self.crop_parameters = crop_parameters
self._image = None
self._bgimage = None
self._midimage = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
df = io.BytesIO(self.pakdata.get(
'{}/{}'.format(self.path, self.info['renderParameters']['texture'])
))
full_image = Image.open(df)
cropped = full_image.crop(self.crop_parameters)
df = io.BytesIO()
cropped.save(df, format='png')
self._image = QtGui.QPixmap()
if not self.image.loadFromData(df.getvalue()):
self._image = None
# TODO: handle these properly
raise Exception('Could not load material {}'.format(self.name))
return self._image
@property
def bgimage(self):
"""
Loads the background version dynamically on-demand.
"""
if not self._bgimage:
self._bgimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 192,
)
return self._bgimage
@property
def midimage(self):
"""
Loads the midrange version dynamically on-demand.
"""
if not self._midimage:
self._midimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 96,
)
return self._midimage
class Matmod(object):
"""
Holds info about a matmod. Right now we're ignoring all the
fancy rendering options and rendering the whole shebang, though
we're only using the very first (top left) tile.
"""
def __init__(self, info, full_path, pakdata):
self.info = info
self.name = info['modName']
self.full_path = full_path
self.pakdata = pakdata
self._image = None
self._bgimage = None
self._midimage = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
df = io.BytesIO(self.pakdata.get(
'/tiles/mods/{}'.format(self.info['renderParameters']['texture'])
))
full_image = Image.open(df)
cropped = full_image.crop((0, 8, 16, 24))
df = io.BytesIO()
cropped.save(df, format='png')
self._image = QtGui.QPixmap()
if not self._image.loadFromData(df.getvalue()):
self._image = None
# TODO: Handle this
raise Exception('Could not load material {}'.format(self.name))
return self._image
@property
def bgimage(self):
"""
Loads the background version dynamically on-demand.
"""
if not self._bgimage:
self._bgimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 90,
)
return self._bgimage
@property
def midimage(self):
"""
Loads the midrange version dynamically on-demand.
"""
if not self._midimage:
self._midimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 45,
)
return self._midimage
class Plant(object):
"""
Class to hold plant info. This is more basic than all our other
objects because map plant entities seem to only ever reference the
PNG directly.
"""
def __init__(self, pathname, pakdata):
self.pathname = pathname
self.pakdata = pakdata
self._image = None
self._hi_image = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
self._image = QtGui.QPixmap()
self._image.loadFromData(self.pakdata.get(self.pathname))
return self._image
@property
def hi_image(self):
"""
Loads the highlighted version dynamically on-demand.
"""
if not self._hi_image:
self._hi_image = StarboundData.highlight_pixmap(
self.image.copy(), 255, 255, 255, 100,
)
return self._hi_image
class SBObjectOrientation(object):
"""
Info about a specific orientation. Note that we're ignoring
color variations - just grabbing the top right image for now.
"""
def __init__(self, info, frames, path, pakdata):
self.info = info
self.offset = (0, 0)
self.anchor = (0, 0)
self.pakdata = pakdata
self._image = None
self._hi_image = None
# Grab offset, if we can
if 'imagePosition' in info:
self.offset = tuple(info['imagePosition'])
# Figure out what property holds the image filename
if 'dualImage' in info:
file_string = info['dualImage']
elif 'image' in info:
file_string = info['image']
elif 'imageLayers' in info:
# TODO: not actually sure what the Right thing to do here is.
# Just taking the first one in the list.
file_string = info['imageLayers'][0]['image']
elif 'leftImage' in info:
# TODO: Not sure here either - there'll also be a rightImage.
# I assume that the direction is specified somehow by the map
# data. Just taking the left version for now
file_string = info['leftImage']
else:
raise Exception('Not sure what to do with {}'.format(path))
# Grab the actual image filename and frame info file
image_file = file_string.split(':')[0]
self.info_frames = self.get_frame(path, image_file, frames, pakdata)
if image_file[0] == '/':
self.full_image_file = image_file
else:
self.full_image_file = '{}/{}'.format(path, image_file)
def get_frame(self, path, image_file, frames, pakdata):
"""
Given a path and image filename, read in frames if possible
"""
base_filename = image_file.rsplit('.', 1)[0]
if base_filename not in frames:
full_filename = '{}/{}.frames'.format(path, base_filename)
try:
frames[base_filename] = read_config(pakdata.get(full_filename))
except KeyError:
if 'default' not in frames:
full_filename = '{}/default.frames'.format(path)
try:
frames['default'] = read_config(pakdata.get(full_filename))
except KeyError:
frames['default'] = None
frames[base_filename] = frames['default']
return frames[base_filename]
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
df = io.BytesIO(self.pakdata.get(self.full_image_file))
full_image = Image.open(df)
if self.info_frames:
(width, height) = tuple(self.info_frames['frameGrid']['size'])
else:
(width, height) = full_image.size
cropped = full_image.crop((0, 0, width, height))
df = io.BytesIO()
cropped.save(df, format='png')
self._image = QtGui.QPixmap()
self._image.loadFromData(df.getvalue())
return self._image
@property
def hi_image(self):
"""
Loads the highlighted version dynamically on-demand.
"""
if not self._hi_image:
self._hi_image = StarboundData.highlight_pixmap(
self.image.copy(), 255, 255, 255, 100,
)
return self._hi_image
class SBObject(object):
"""
Class to hold info about a starbound "object". We're ignoring a lot
of information about the object, and like our other graphics, just
taking the top-left image in the graphics files.
"""
def __init__(self, info, filename, path, pakdata):
self.info = info
self.orientations = []
self.frames = {}
self.full_path = '{}/{}'.format(path, filename)
for o in info['orientations']:
self.orientations.append(
SBObjectOrientation(o, self.frames, path, pakdata)
)
def get_image_path(self, orientation):
"""
Returns the path to the graphic used
"""
if orientation < len(self.orientations):
orient = self.orientations[orientation]
else:
orient = self.orientations[0]
return orient.full_image_file
def get_image(self, orientation):
"""
Returns a tuple for displaying the image of this object, at the
given orientation. The first element will be the image data
(as a QPixmap), the second two will be the x and y offsets at which
it should be rendered.
"""
if orientation < len(self.orientations):
orient = self.orientations[orientation]
else:
orient = self.orientations[0]
return (orient.image, orient.offset[0], orient.offset[1])
def get_hi_image(self, orientation):
"""
Returns the highlighted image data for the specified orientation
(as a QPixmap). This doesn't provide offset info because it's only
used while hovering, and the position data is already set at that
point
"""
if orientation < len(self.orientations):
orient = self.orientations[orientation]
else:
orient = self.orientations[0]
return orient.hi_image
class Liquid(object):
"""
Class to hold info about a liquid. Not much in here, honestly
"""
def __init__(self, info):
self.info = info
self.name = info['name']
self.overlay = QtGui.QColor(*info['color'])
class PakTree(object):
"""
Tree-based dict so we can "browse" pak contents by directory.
Makes no distinction between directories and files.
"""
def __init__(self):
self.top = {}
def add_path(self, pathname):
"""
Adds a path to our tree
"""
parts = pathname.lower().split('/')[1:]
cur = self.top
for part in parts:
if part not in cur:
cur[part] = {}
cur = cur[part]
def get_all_in_path(self, path):
"""
Gets all "files" within the given path.
"""
parts = path.lower().split('/')[1:]
cur = self.top
for part in parts:
if part not in cur:
return []
cur = cur[part]
return sorted(cur.keys())
def get_all_matching_ext(self, path, ext):
"""
Gets all "files" within the given path which match the given
extension. Note that "extension" is being used here a bit
generously - be sure to pass in a leading dot if you want it
to *actually* be an extension.
"""
to_ret = []
for item in self.get_all_in_path(path):
if item.endswith(ext):
to_ret.append(item)
return to_ret
def get_all_recurs_matching_ext(self, path, ext):
"""
Searches recursively through the tree, starting at `path`, to
find all files matching the given extension. `ext` can either
be a single extension, or a set of extensions. Returns a list
of tuples - the first element is the *full* path the file is
found in, and the second is the name of the file
"""
cur = self.top
for part in path.lower().split('/')[1:]:
if part not in cur:
return []
cur = cur[part]
if type(ext) != set:
ext = set([ext])
return self._search_in(path.lower(), cur, ext)
def _search_in(self, cur_path, node, exts):
"""
Inner recursive function for `get_all_recurs_matching_ext`
"""
to_ret = []
for name, children in node.items():
parts = name.rsplit('.', 1)
if len(parts) == 2 and parts[1] in exts:
to_ret.append((cur_path, name))
elif len(children) > 0:
to_ret.extend(self._search_in(
'{}/{}'.format(cur_path, name),
children,
exts,
))
return to_ret
class Bookmark(object):
"""
Class to hold info about a bookmark
"""
def __init__(self, bookmark):
"""
`bookmark` should be the dict acquired by looping through the
player's "teleportBookmarks"
"""
self.name = bookmark['bookmarkName']
target = bookmark['target']
list_data = target[0]
self.uuid = target[1]
self.filename = StarboundData.world_string_to_filename(list_data)
def __lt__(self, other):
"""
Makes this object sortable (by bookmark name)
"""
return self.name.lower() < other.name.lower()
class Player(object):
"""
Wrapper class for the player save dict, to provide a helper function
or two.
"""
def __init__(self, playerdict, base_universe):
self.playerdict = playerdict
self.base_universe = base_universe
self.name = playerdict.data['identity']['name']
self.uuid = playerdict.data['uuid']
# Figure out our current location
self.cur_world_filename = None
self.cur_world_loc = None
context_path = os.path.join(base_universe, '{}.clientcontext'.format(self.uuid))
if os.path.exists(context_path):
with open(context_path, 'rb') as df:
context = starbound.read_sbvj01(df)
if 'reviveWarp' in context.data:
self.cur_world_filename = StarboundData.world_string_to_filename(
context.data['reviveWarp']['world'])
if self.cur_world_filename:
self.cur_world_loc = tuple(context.data['reviveWarp']['target'])
# Load in bookmarks
self.bookmarks = {}
# TODO: Check that the universeMap dicts always has just one key
# (a uuid or something)
for k, v in self.playerdict.data['universeMap'].items():
for bookmark_data in v['teleportBookmarks']:
bookmark = Bookmark(bookmark_data)
if bookmark.filename:
if bookmark.filename not in self.bookmarks:
self.bookmarks[bookmark.filename] = []
self.bookmarks[bookmark.filename].append(bookmark)
def get_systems(self):
"""
Returns a list of tuples of the form:
((x, y, z), systemdict)
Describing all systems known to this player.
(I'm using x, y, z because I imagine that those are maybe
supposed to be coordinates, but in reality I suspect they're
effectively random.)
"""
# TODO: Check that the universeMap dicts always has just one key
# (a uuid or something)
for k, v in self.playerdict.data['universeMap'].items():
# universeMap keys:
# systems
# teleportBookmarks
systemlist = v['systems']
return systemlist
def get_worlds(self, data, progress_callback=None):
"""
Given a StarboundData object `data`, returns a list of all worlds
known to the user, as a list of tuples of the form:
(mtime, config.WorldNameCache.WorldName tuple, filename)
Note that this has to actually load in world files to get the names
on the first runthrough, but we *do* now cache the name information,
so subsequent listings should be much faster.
`progress_callback` can be used to specify a function to call to update
the value of a progress bar as we go through (note that we do NOT
currently support a "real" 0 -> 100% progress bar, since the logic here
is a bit weird and branches off depending on what we find, as we go.
Tearing it apart to be able to provide a total-number-of-files-to-load-
from-disk count before actually processing is more work than I care to
deal with at the moment).
"""
worlds = []
(world_dict, extra_uuid) = data.get_worlds()
# Get our world name cache
cache = data.config.worldname_cache
# Add in our own spaceship, if we've got it
ship_path = os.path.join(data.base_player, '{}.shipworld'.format(self.playerdict.data['uuid']))
if os.path.exists(ship_path):
ship_mtime = os.path.getmtime(ship_path)
if ship_path not in cache or cache[ship_path].mtime != ship_mtime:
(world, worlddf) = StarboundData.open_world(ship_path)
cache.register_other(
ship_path,
'Starship',
'Your Starship',
'aaaaa',
world,
ship_mtime,
)
worlddf.close()
if progress_callback:
progress_callback()
worlds.append((
ship_mtime,
cache[ship_path],
ship_path,
))
# Loop through all systems we've explored
for (coords, systemdict) in self.get_systems():
base_system_name = '{}_{}_{}'.format(*coords)
if base_system_name in world_dict:
detected_system_name = None
for planet in systemdict['mappedPlanets']:
if planet['planet'] in world_dict[base_system_name]:
for filename in world_dict[base_system_name][planet['planet']]:
world_mtime = os.path.getmtime(filename)
if filename not in cache or cache[filename].mtime != world_mtime:
(world, worlddf) = StarboundData.open_world(filename)
cache.register_planet(filename,
world_name=StarboundData.strip_colors(world.info.name),
world_type=world.info.description,
biome_types=', '.join(world.info.world_biomes),
sort_name=StarboundData.world_name_to_sortable(world.info.name),
world_obj=world,
mtime=world_mtime,
)
worlddf.close()
if progress_callback:
progress_callback()
# This is the only way I can find to try and associate a system
# to its name (only really useful in the uuid checks below). Alas!
if not detected_system_name:
detected_system_name = re.sub(
r'^(.*?) (I|II|III|IV|V|VI|VII|VIII|IX|X|XI|XII)( .*)?$',
r'\1',
cache[filename].world_name)
worlds.append((
world_mtime,
cache[filename],
filename,
))
# Now loop through any extra worlds we found via UUID
if not detected_system_name:
detected_system_name = '(Unknown System)'
for uuid in systemdict['mappedObjects'].keys():
if uuid in extra_uuid:
(filename, description) = extra_uuid[uuid]
other_mtime = os.path.getmtime(filename)
if filename not in cache or cache[filename].mtime != other_mtime:
if description.startswith('unique-'):
description = description[7:]
(world, worlddf) = StarboundData.open_world(filename)
cache.register_other(filename,
world_name='{} - {}'.format(detected_system_name, description),
extra_desc='Non-Planet System Object',
sort_name='{} 99 - {}'.format(detected_system_name, description).lower(),
world_obj=world,
mtime=other_mtime,
)
worlddf.close()
if progress_callback:
progress_callback()
worlds.append((
other_mtime,
cache[filename],
filename,
))
# Save our cache, if anything's changed
if cache.changed:
cache.save()
# Return our list
return worlds
class StarboundData(object):
"""
Master class to hold the starbound data that we're interested in.
"""
base_game = None
base_storage = None
base_player = None
base_universe = None
base_pak = None
class World(starbound.World):
"""
Simple little wrapper class because I want to keep track of the filename
inside the World object, which py-starbound isn't going to care about.
"""
def __init__(self, stream, filename):
super().__init__(stream)
self.filename = filename
self.base_filename = os.path.basename(filename)
world_name_sortable_conversions = [
('^green;I^white;', '01'),
('^green;II^white;', '02'),
('^green;III^white;', '03'),
('^green;IV^white;', '04'),
('^green;V^white;', '05'),
('^green;VI^white;', '06'),
('^green;VII^white;', '07'),
('^green;VIII^white;', '08'),
('^green;IX^white;', '09'),
('^green;X^white;', '10'),
('^green;XI^white;', '11'),
('^green;XII^white;', '12'),
]
def __init__(self, config):
"""
`config` should be a Config object (which will have the base game
installation directory info).
"""
self.config = config
self.base_game = config.starbound_data_dir
self.base_storage = os.path.join(self.base_game, 'storage')
self.base_player = os.path.join(self.base_storage, 'player')
self.base_universe = os.path.join(self.base_storage, 'universe')
self.base_pak = os.path.join(self.base_game, 'assets', 'packed.pak')
# Read in the data file
pakdf = open(self.base_pak, 'rb')
self.pakdf = pakdf
if pakdf:
paktree = PakTree()
pakdata = starbound.SBAsset6(pakdf)
# py-starbound doesn't let you "browse" inside the pakfile's
# internal "directory", so we're doing it by hand here
pakdata.read_index()
for path in pakdata.index.keys():
paktree.add_path(path)
# Cropping parameters for our various material templates.
# TODO: obviously if we want to render things *correctly*
# we'd have to actually parse/understand these. Instead
# we're just grabbing the top-left image, basically.
crop_params = {
'/tiles/classicmaterialtemplate.config': (4, 12, 12, 20),
'/tiles/platformtemplate.config': (8, 0, 16, 8),
'/tiles/girdertemplate.config': (1, 1, 9, 9),
'/tiles/screwtemplate.config': (2, 14, 10, 22),
'/tiles/columntemplate.config': (2, 14, 10, 22),
'/tiles/rowtemplate.config': (2, 14, 10, 22),
# Out of all of these, this will be the one that's Most
# Wrong. I think this is space station stuff
'/tiles/slopedmaterialtemplate.config': (24, 0, 32, 8),
# These two are quite wrong, of course, since they're supposed
# to "join up" properly. For pipes I chose a tile which is
# the straight horizontal image for most, though note that it's
# a vertical image for tentacle pipes.
'/tiles/pipetemplate.config': (68, 36, 76, 44),
'/tiles/railtemplate.config': (3, 5, 11, 13),
}
# Load in our materials
self.materials = {}
obj_list = paktree.get_all_recurs_matching_ext('/tiles', 'material')
for idx, (obj_path, obj_name) in enumerate(obj_list):
matpath = '{}/{}'.format(obj_path, obj_name)
material = read_config(pakdata.get(matpath))
if 'renderTemplate' in material:
if material['renderTemplate'] in crop_params:
self.materials[material['materialId']] = Material(
material,
obj_path,
matpath,
pakdata,
crop_params[material['renderTemplate']],
)
else:
print('Unhandled material render template: {}'.format(material['renderTemplate']))
else:
print('No render template found for {}'.format(matpath))
# Load in our material mods.
self.matmods = {}
for idx, matmod_name in enumerate(paktree.get_all_matching_ext('/tiles/mods', '.matmod')):
# All matmods, at least in the base game, are classicmaterialtemplate
matmodpath = '/tiles/mods/{}'.format(matmod_name)
matmod = read_config(pakdata.get(matmodpath))
self.matmods[matmod['modId']] = Matmod(matmod, matmodpath, pakdata)
# Load in object data (this also populates some item names, for container reporting)
self.items = {}
self.objects = {}
obj_list = paktree.get_all_recurs_matching_ext('/objects', 'object')
for idx, (obj_path, obj_name) in enumerate(obj_list):
obj_full_path = '{}/{}'.format(obj_path, obj_name)
obj_json = read_config(pakdata.get(obj_full_path))
self.objects[obj_json['objectName']] = SBObject(obj_json, obj_name, obj_path, pakdata)
self.items[obj_json['objectName']] = StarboundData.strip_colors(obj_json['shortdescription'])
# Load in plant data
# The Entities seem to actually only references these by PNG path, so
# I guess that's what we'll do too.
self.plants = {}
img_list = paktree.get_all_recurs_matching_ext('/plants', 'png')
for idx, (img_path, img_name) in enumerate(img_list):
img_full_path = '{}/{}'.format(img_path, img_name)
self.plants[img_full_path] = Plant(img_full_path, pakdata)
# Load in liquid data
self.liquids = {}
liquid_list = paktree.get_all_recurs_matching_ext('/liquids', 'liquid')
for idx, (liquid_path, liquid_name) in enumerate(liquid_list):
liquid_full_path = '{}/{}'.format(liquid_path, liquid_name)
liquid = read_config(pakdata.get(liquid_full_path))
self.liquids[liquid['liquidId']] = Liquid(liquid)
# Load in extra item name mapping (just for reporting container contents)
# (have verified that none of these "overwrite" the mappings set up by
# the object processing)
item_list = paktree.get_all_recurs_matching_ext('/items', set([
# There may be some things in here which shouldn't be, but whatever.
# Might make more sense to *exclude* extensions instead? That
# list would be a bit shorter: animation, combofinisher,
# config, frames, lua, png, weaponability, weaponcolors
'activeitem', 'augment', 'back', 'beamaxe', 'chest',
'consumable', 'currency', 'flashlight', 'harvestingtool',
'head', 'inspectiontool', 'instrument', 'item', 'legs',
'liqitem', 'matitem', 'miningtool', 'painttool',
'thrownitem', 'tillingtool', 'unlock', 'wiretool',
]))
for item_path, item_name in item_list:
item_full_path = '{}/{}'.format(item_path, item_name)
item = read_config(pakdata.get(item_full_path))
self.items[item['itemName']] = StarboundData.strip_colors(item['shortdescription'])
def get_all_players(self):
"""
Returns a list of tuples describing all players. Tuples will be of the form
(timestamp, Player object)
and will be sorted so that the most recently-modified players are first.
"""
entries = []
with os.scandir(self.base_player) as it:
for entry in it:
if entry.name.endswith('.player'):
player = self.get_player(entry.path)
entries.append((entry.stat().st_mtime, player))
# TODO: sorting by mtime, because that's how Starbound does it. Should
# we at least provide the option for alphabetical?
return sorted(entries, reverse=True)
def get_player(self, player_file):
"""
Returns player data, given the specified player file
"""
player = None
with open(os.path.join(self.base_player, player_file), 'rb') as playerdf:
player = Player(starbound.read_sbvj01(playerdf), self.base_universe)
return player
def get_worlds(self):
"""
Get available worlds from the `universe` dir. Useful when trying to find out
what worlds are available for a given user, since there's otherwise not really
a directory of those, apart from what planets have been "visited" (but that
doesn't actually have anything to do with what planets have been landed on,
and thus which worlds have maps).
Returns a tuple - the first element will be a nested dict with the top-level
keys being the string "coordinates" of the system, as three underscore-
separated numbers, and the next-level keys being the number of the planet.
The value for that key will be a list of filenames.
The second element of the tuple will be a dict whose keys are UUIDs. The
values will be a tuple whose first element is the filenames, and the second
element is the descriptive text found in the filename. (These will be random
encounters (if they've been saved) or space stations or the like, and don't
really have any useful text to show the user other than what's in the filename.)
"""
worlds = {}
extra_uuids = {}
for filename in os.listdir(self.base_universe):
match = re.match(r'([-0-9]+_[-0-9]+_[-0-9]+)_(\d+)(_(\d+))?.world', filename)
if match:
system = match.group(1)
planet_num = int(match.group(2))
if match.group(4) is None:
moon_num = None
else:
moon_num = int(match.group(4))
if system not in worlds:
worlds[system] = {}
if planet_num not in worlds[system]:
worlds[system][planet_num] = []
worlds[system][planet_num].append(os.path.join(self.base_universe, filename))
else:
match = re.match(r'(.*)-([0-9a-f]{32})-(\d+).(temp)?world', filename)
if match:
description = match.group(1)
uuid = match.group(2)
num = int(match.group(3))
is_temp = match.group(4)
extra_uuids[uuid] = (os.path.join(self.base_universe, filename), description)
return (worlds, extra_uuids)
def close(self):
"""
Closes our open filehandle
"""
if self.pakdf:
self.pakdf.close()
@staticmethod
def world_name_to_sortable(name):
"""
Given a raw world name (with color highlights and everything), convert
it to a string that will sort properly using regular ol' alphanumerics.
This is basically just converting the roman numerals into numbers.
"""
for (old, new) in StarboundData.world_name_sortable_conversions:
if old in name:
return StarboundData.strip_colors(name.replace(old, new).lower())
return name.lower()
@staticmethod
def open_world(filename):
"""
Given a `filename`, returns a tuple where the first element is
a World object, and the second is a filehandle which should be
closed once the app is through with it (this will actually be
an mmap object).
"""
with open(filename, 'rb') as worlddf:
worldmm = mmap.mmap(worlddf.fileno(), 0, access=mmap.ACCESS_READ)
world = StarboundData.World(worldmm, filename)
return (world, worldmm)
@staticmethod
def strip_colors(input_string):
"""
Strips color information from a string
"""
return re.sub('\^\w+?;', '', input_string)
@staticmethod
def world_string_to_filename(world_desc):
"""
Converts a world description string (colon-delimited, starting with
CelestialWorld, ClientShipWorld, or InstanceWorld) into a filename.
Note that this does *not* return the data directory as well -- if
you intend to open a file with this (as opposed to just checking
filenames), be sure to check for `shipworld` in the file name, to
know to load from the `player` dir instead of `universe`.
"""
parts = world_desc.split(':')
world_type = parts[0]
if world_type == 'CelestialWorld':
if len(parts) < 5:
raise Exception('Not sure what to do with world string: {}'.format(world_desc))
coords = (parts[1], parts[2], parts[3])
planet = parts[4]
if len(parts) == 6:
moon = parts[5]
return '{}_{}_{}_{}_{}.world'.format(
*coords,
planet,
moon,
)
else:
return '{}_{}_{}_{}.world'.format(
*coords,
planet,
)
elif world_type == 'ClientShipWorld':
# Hardly seems worth it to bookmark your own ship, but it *is*
# possible, so we'll support it.
return '{}.shipworld'.format(parts[1])
elif world_type == 'InstanceWorld':
if len(parts) < 4:
raise Exception('Not sure what to do with world_string: {}'.format(world_desc))
inner_desc = parts[1]
target_uuid = parts[2]
suffix = parts[3]
if target_uuid != '-' and suffix != '-':
# Bookmarks to The Outpost (and perhaps others?) have blank info here,
# and we couldn't load them anyway, so just skip 'em
# TODO: is it always "unique" as a prefix?
return 'unique-{}-{}-{}.world'.format(
inner_desc,
target_uuid,
suffix,
)
else:
print('Unknown world type: {}'.format(world_type))
return None
@staticmethod
def highlight_pixmap(pixmap, r, g, b, a):
"""
Given a QPixmap `pixmap`, highlight it with the given color.
For convenience, returns `pixmap`, though of course the reference
will not have changed.
"""
painter = QtGui.QPainter(pixmap)
painter.setCompositionMode(painter.CompositionMode_SourceAtop)
painter.setBrush(QtGui.QBrush(QtGui.QColor(r, g, b, a)))
painter.setPen(QtGui.QPen(QtGui.QColor(0, 0, 0, 0)))
painter.drawRect(0, 0, pixmap.width(), pixmap.height())
return pixmap
| 41.494797 | 131 | 0.549293 |
79414ebd68dd088d446ed8c2e069ffb1ac55e433 | 1,078 | py | Python | tests/models/convert_deeplab.py | juanCastrillo/gluon2pytorch | dc73055f0c74dbc45a70f21057fa161123826d86 | [
"MIT"
] | 73 | 2018-11-01T03:07:11.000Z | 2021-03-03T01:48:58.000Z | tests/models/convert_deeplab.py | juanCastrillo/gluon2pytorch | dc73055f0c74dbc45a70f21057fa161123826d86 | [
"MIT"
] | 5 | 2018-11-02T06:45:33.000Z | 2019-09-24T06:54:59.000Z | tests/models/convert_deeplab.py | juanCastrillo/gluon2pytorch | dc73055f0c74dbc45a70f21057fa161123826d86 | [
"MIT"
] | 5 | 2019-01-29T00:03:24.000Z | 2021-01-12T14:18:59.000Z | import torch
import mxnet as mx
import numpy as np
import gluoncv
from gluon2pytorch import gluon2pytorch
def check_error(gluon_output, pytorch_output, epsilon=1e-4):
if not isinstance(pytorch_output, tuple):
pytorch_output = [pytorch_output]
gluon_output = [gluon_output]
for p, g in zip(pytorch_output, gluon_output):
pytorch_output = p.data.numpy()
gluon_output = g.asnumpy()
error = np.max(pytorch_output - gluon_output)
print('Error:', error)
assert error < epsilon
return error
if __name__ == '__main__':
net = gluoncv.model_zoo.DeepLabV3Plus(nclass=4, crop_size=224)
net.hybridize()
net.collect_params().initialize()
pytorch_model = gluon2pytorch(net, [(1, 3, 224, 224)], dst_dir='../tmp/', pytorch_module_name='densenet169')
pytorch_model.eval()
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
gluon_output = net(mx.nd.array(input_np))[0]
pytorch_output = pytorch_model(torch.FloatTensor(input_np))
check_error(gluon_output, pytorch_output)
| 27.641026 | 112 | 0.688312 |
79414edadfcaf8acac04abcaa8d6abc98fef06ee | 1,827 | py | Python | methods/crout.py | eechava6/NumericalAnalysis | 1b44349fe4c5e24413c3d5faeca7d227272814ec | [
"MIT"
] | null | null | null | methods/crout.py | eechava6/NumericalAnalysis | 1b44349fe4c5e24413c3d5faeca7d227272814ec | [
"MIT"
] | null | null | null | methods/crout.py | eechava6/NumericalAnalysis | 1b44349fe4c5e24413c3d5faeca7d227272814ec | [
"MIT"
] | null | null | null | import ast
import json
import numpy as np
from methods.utils import progressiveSustitution, regresiveSustitutions, isSquared
def crout(A,b):
A = ast.literal_eval(A)
b = ast.literal_eval(b)
A = np.array(A).astype(float)
b = np.array(b).astype(float)
pivots = []
res = {}
A = np.array(A).astype(float)
b = np.array(b).astype(float)
times = A[:, 0].size
U = np.zeros((times, times))
L = np.identity(times)
cont = 0
# Validates if matrix is squared
if (not isSquared(A)):
res["source"] = 'Not square matrix!'
res["error"] = True
return res
# Determines if det is 0
if (np.linalg.det(A) == 0):
res["source"] = 'Determinant is 0'
res["error"] = True
return res
# L,U = inicializa(n,0)
for d in range(0, times):
U[d, d] = 1
for d in range(0, times): #Etapas
#Calculo L
for j in range(d, times):
sum0 = sum([L[j, s] * U[s, d] for s in range(0, j)])
L[j, d] = A[j, d] - sum0
#Calculo U
for j in range(d+1, times):
sum1 = sum([L[d, s] * U[s, j] for s in range(0, d)])
U[d, j] = (A[d, j] - sum1) / L[d, d]
cont = cont+1
pivots.append({'step': cont, "L": json.dumps(L.tolist()), "U": json.dumps(U.tolist())})
LB = np.concatenate([L,b.reshape((A.shape[0],1)) ], axis=1)
size = LB[:, 0].size
pro = progressiveSustitution(LB, size)
pro = np.array(pro).astype(float)
UB = np.concatenate([U, pro.reshape((U.shape[0], 1))], axis=1)
size2 = UB[:, 0].size
results = regresiveSustitutions(UB, size2 - 1)
res["pivots"] = pivots
res["error"] = False
res["results"] = results
return res
| 26.478261 | 97 | 0.517241 |
79414fca6d0fe84b4d185e39963d8601049c65a2 | 1,176 | py | Python | satchless/payment/migrations/0001_initial.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | 1 | 2017-11-26T18:53:40.000Z | 2017-11-26T18:53:40.000Z | satchless/payment/migrations/0001_initial.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | 13 | 2015-01-22T23:47:52.000Z | 2022-01-13T20:22:34.000Z | satchless/payment/migrations/0001_initial.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('order', '__first__'),
]
operations = [
migrations.CreateModel(
name='PaymentVariant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subtype_attr', models.CharField(max_length=500, editable=False)),
('name', models.CharField(max_length=128, verbose_name='name')),
('description', models.TextField(verbose_name='description', blank=True)),
('price', models.DecimalField(verbose_name='unit price', max_digits=12, decimal_places=4)),
('amount', models.DecimalField(verbose_name='payment applied', max_digits=12, decimal_places=4)),
('order', models.ForeignKey(to='order.Order', on_delete=models.PROTECT)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| 36.75 | 114 | 0.597789 |
794150c90dc32247a1c99ca8125acec906964a86 | 489 | py | Python | test/sample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | test/sample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | test/sample.py | vibhatha/PythonMPI | e01bb0d4a53059c2bd77f74494db6d2d29844aea | [
"Apache-2.0"
] | null | null | null | from mpi4py import MPI
class Initial:
comm = []
rank = 0
def __init__(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
data = {'a': 7, 'b': 3.14}
comm.isend(data, dest=1, tag=11)
print("Sending : " + str(data) + ": Rank " + str(rank))
elif rank == 1:
data = comm.irecv(source=0, tag=11)
print("Receiving : " + str(data) + ": Rank " + str(rank))
init = Initial()
| 22.227273 | 69 | 0.480573 |
7941518cf181e7c3d5b3a0ec477f26ee70fc8fd0 | 3,943 | py | Python | code/pretty_print.py | RahulSChand/Multi-Granularity-Hierarchical-Attention-Fusion-Networks-for-Question-Answering---TensorFlow | 0febc6e982026248be3759753b8be404311b5673 | [
"Apache-2.0"
] | 8 | 2019-05-03T15:33:28.000Z | 2021-12-27T17:31:30.000Z | code/pretty_print.py | xuwd11/QANet | 209915452cb924f73ae451f50b366a291dafaf63 | [
"MIT"
] | null | null | null | code/pretty_print.py | xuwd11/QANet | 209915452cb924f73ae451f50b366a291dafaf63 | [
"MIT"
] | 3 | 2019-05-29T07:39:15.000Z | 2022-02-28T14:20:13.000Z | # Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains functions to pretty-print a SQuAD example"""
from colorama import Fore, Back, Style
from vocab import _PAD
# See here for more colorama formatting options:
# https://pypi.python.org/pypi/colorama
def yellowtext(s):
"""Yellow text"""
return Fore.YELLOW + Style.BRIGHT + s + Style.RESET_ALL + Fore.RESET
def greentext(s):
"""Green text"""
return Fore.GREEN + Style.BRIGHT + s + Style.RESET_ALL + Fore.RESET
def redtext(s):
"""Red text"""
return Fore.RED + Style.BRIGHT + s + Style.RESET_ALL + Fore.RESET
def redback(s):
"""Red background"""
return Back.RED + s + Back.RESET
def magentaback(s):
"""Magenta background"""
return Back.MAGENTA + s + Back.RESET
def print_example(word2id, context_tokens, qn_tokens, true_ans_start, true_ans_end, pred_ans_start, pred_ans_end, true_answer, pred_answer, f1, em):
"""
Pretty-print the results for one example.
Inputs:
word2id: dictionary mapping word (string) to word id (int)
context_tokens, qn_tokens: lists of strings, no padding.
Note these do *not* contain UNKs.
true_ans_start, true_ans_end, pred_ans_start, pred_ans_end: ints
true_answer, pred_answer: strings
f1: float
em: bool
"""
# Get the length (no padding) of this context
curr_context_len = len(context_tokens)
# Highlight out-of-vocabulary tokens in context_tokens
context_tokens = [w if w in word2id else "_%s_" % w for w in context_tokens]
# Highlight the true answer green.
# If the true answer span isn't in the range of the context_tokens, then this context has been truncated
truncated = False
for loc in range(true_ans_start, true_ans_end+1):
if loc in range(curr_context_len):
context_tokens[loc] = greentext(context_tokens[loc])
else:
truncated = True
# Check that the predicted span is within the range of the context_tokens
assert pred_ans_start in range(curr_context_len)
assert pred_ans_end in range(curr_context_len)
# Highlight the predicted start and end positions
# Note: the model may predict the end position as before the start position, in which case the predicted answer is an empty string.
context_tokens[pred_ans_start] = magentaback(context_tokens[pred_ans_start])
context_tokens[pred_ans_end] = redback(context_tokens[pred_ans_end])
# Print out the context
print("CONTEXT: (%s is true answer, %s is predicted start, %s is predicted end, _underscores_ are unknown tokens). Length: %i" % (greentext("green text"), magentaback("magenta background"), redback("red background"), len(context_tokens)))
print(" ".join(context_tokens))
# Print out the question, true and predicted answer, F1 and EM score
question = " ".join(qn_tokens)
print(yellowtext("{:>20}: {}".format("QUESTION", question)))
if truncated:
print(redtext("{:>20}: {}".format("TRUE ANSWER", true_answer)))
print(redtext("{:>22}(True answer was truncated from context)".format("")))
else:
print(yellowtext("{:>20}: {}".format("TRUE ANSWER", true_answer)))
print(yellowtext("{:>20}: {}".format("PREDICTED ANSWER", pred_answer)))
print(yellowtext("{:>20}: {:4.3f}".format("F1 SCORE ANSWER", f1)))
print(yellowtext("{:>20}: {}".format("EM SCORE", em)))
print("")
| 39.43 | 242 | 0.699721 |
7941525edbc411712852dc8e10229f9d1a9b098f | 7,946 | py | Python | inference.py | NeerajAI/PICK-pytorch | 61deb7c1e11df30c8f03726c061a2866234ac770 | [
"MIT"
] | null | null | null | inference.py | NeerajAI/PICK-pytorch | 61deb7c1e11df30c8f03726c061a2866234ac770 | [
"MIT"
] | null | null | null | inference.py | NeerajAI/PICK-pytorch | 61deb7c1e11df30c8f03726c061a2866234ac770 | [
"MIT"
] | null | null | null | import os
import os
print(os.getcwd())
# os.path.dirname(os.path.abspath("__file__"))
path = '/Volumes/Extreme SSD/MLWork/DocAI/PICK-pytorch'
os.chdir(path)
# os.chdir('../')
# path = '/Users/neerajyadav/Documents/pycv/PICK-pytorch/'
"""Convert files of a selected directory in jpg format"""
import converter
# !pip install easyocr
import easyocr
#download the model
reader = easyocr.Reader(['en'], gpu = True)
# show an image
import PIL
from PIL import ImageDraw
from PIL import Image
import cv2
import PIL
from PIL import ImageDraw
from PIL import Image
import cv2
import pandas as pd
from pandas import DataFrame
import pandas as pd
import json
import glob
# import xlrd
import csv
import argparse
import torch
from tqdm import tqdm
from pathlib import Path
from torch.utils.data.dataloader import DataLoader
from allennlp.data.dataset_readers.dataset_utils.span_utils import bio_tags_to_spans
from parse_config import ConfigParser
import model.pick as pick_arch_module
from data_utils.pick_dataset import PICKDataset
from data_utils.pick_dataset import BatchCollateFn
from utils.util import iob_index_to_str, text_index_to_str
import converter
import shutil, os
### convert image into transcript file
"""Select jpg files and convert into transcript files"""
filenames = glob.glob("../TestImage/*.jpg")
filenamesj = glob.glob("../TestImage/*.jpeg")
filenames = filenames + filenamesj
filenames.sort()
def draw_boxes(image, bounds, color='green', width=1):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color , width=width)
# if bound[1] == "ToTAL" or bound[1] =="TOTAL" or bound[1]=="TOTAL" or bound[1] =="Total Payable;" or bound[1] =="Total Payable:" or bound[1] =="Total Payable:" or bound[1]=='Total' or bound[1]=='TOTAL' or bound[1]=="Totz' Ingi, 0f GST" or bound[1]=="Total Sales (Inclusive of GST)" or bound[1]=="Net Total (MYR)":
# draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
# print(bound[0])
# image.save("temp.jpg")
return image
# draw_boxes(im, bounds)
def concatenate_list_data(list):
result= ''
for element in list:
result = result +str(element)
return result
for s in filenames:
# s = "Invoice0.jpg"
filen = s.split(".")[0]
print(filen)
im = PIL.Image.open(s).convert('RGB')
# Doing OCR. Get bounding boxes.
bounds = reader.readtext(s)
im = PIL.Image.open(s).convert('RGB')
df = pd.DataFrame()
CoordinatesValue = []
for i in bounds:
Coordinates =[]
CoordinatesValue=[]
temp_df = pd.DataFrame()
Coordinates.append(concatenate_list_data(i[0]).replace("][",",").replace("[","").replace("]","").replace(" ",""))
# print(i[1])
CoordinatesValue.append(i[1])
temp_df = DataFrame(zip(Coordinates,CoordinatesValue),columns = ['Coordinates', 'Value'])
# print(temp_df)
df = df.append(temp_df)
# print(item[0])
combine_lambda = lambda x: '{},{}'.format(x.Coordinates, x.Value)
df['Result'] = df.apply(combine_lambda, axis = 1)
dfnew= df['Result']
dfnew = dfnew[0].str.split(',', expand=True)
dfnew.insert(0,'name_of_column','')
dfnew['name_of_column'] = 1
# dfnew.to_csv(str(filen)+".tsv", sep = ',',index=False ,header=False )
dfnew.to_csv(str(filen)+".tsv",sep = ',',index=False,header=False, quotechar='',escapechar='\\',quoting=csv.QUOTE_NONE, )
### copy file from source folder to destination folder ###
for f in filenames:
shutil.copy(f, 'test_img/')
filetsv = glob.glob("/Volumes/Extreme SSD/MLWork/DocAI/TestImage/*.tsv")
for f in filetsv:
shutil.copy(f, 'test_boxes_and_transcripts/')
### inference code #####
device = torch.device(f'cuda:{args.gpu}' if -1 != -1 else 'cpu') ### setting value of gpu to -1 to run inference
savedCheckpiont = 'saved/models/PICK_Default/test_999/model_best.pth'
checkpoint = torch.load(savedCheckpiont, map_location=device)
config = checkpoint['config']
state_dict = checkpoint['state_dict']
monitor_best = checkpoint['monitor_best']
print('Loading checkpoint: {} \nwith saved mEF {:.4f} ...'.format(savedCheckpiont, monitor_best))
# prepare model for testing
pick_model = config.init_obj('model_arch', pick_arch_module)
pick_model = pick_model.to(device)
pick_model.load_state_dict(state_dict)
pick_model.eval()
## pick ocr transcript file and image in below folders
out_img_path = "test_img/"
out_box_path = "test_boxes_and_transcripts/"
# setup dataset and data_loader instances
batch_size_val=1
test_dataset = PICKDataset(boxes_and_transcripts_folder=out_box_path,
images_folder=out_img_path,
resized_image_size=(480, 960),
ignore_error=False,
training=False)
test_data_loader = DataLoader(test_dataset, batch_size=batch_size_val, shuffle=False,
num_workers=0, collate_fn=BatchCollateFn(training=False)) ## have changed the number of workers to zero
# setup output path
output_folder = 'output'
output_path = Path(output_folder)
output_path.mkdir(parents=True, exist_ok=True)
with torch.no_grad():
for step_idx, input_data_item in enumerate(test_data_loader):
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(device)
# For easier debug.
image_names = input_data_item["filenames"]
# print('image names')
# print(image_names)
output = pick_model(**input_data_item)
# print(output)
logits = output['logits'] # (B, N*T, out_dim)
# print(logits)
new_mask = output['new_mask']
# print(new_mask)
image_indexs = input_data_item['image_indexs'] # (B,)
text_segments = input_data_item['text_segments'] # (B, num_boxes, T)
mask = input_data_item['mask']
# List[(List[int], torch.Tensor)]
best_paths = pick_model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask, logits_batch_first=True)
# print('best_paths')
# print(best_paths)
predicted_tags = []
for path, score in best_paths:
# print(path,score)
predicted_tags.append(path)
# convert iob index to iob string
decoded_tags_list = iob_index_to_str(predicted_tags)
# union text as a sequence and convert index to string
decoded_texts_list = text_index_to_str(text_segments, mask)
# print(decoded_texts_list)
for decoded_tags, decoded_texts, image_index in zip(decoded_tags_list, decoded_texts_list, image_indexs):
# List[ Tuple[str, Tuple[int, int]] ]
spans = bio_tags_to_spans(decoded_tags, [])
spans = sorted(spans, key=lambda x: x[1][0])
entities = [] # exists one to many case
# print(spans)
for entity_name, range_tuple in spans:
entity = dict(entity_name=entity_name,
text=''.join(decoded_texts[range_tuple[0]:range_tuple[1] + 1]))
entities.append(entity)
result_file = output_path.joinpath(Path(test_dataset.files_list[image_index]).stem + '.txt')
# print(entities)
with result_file.open(mode='w') as f:
for item in entities:
f.write('{}\t{}\n'.format(item['entity_name'], item['text']))
print(item['entity_name'],item['text'])
# dir = 'path/to/dir'
try:
dir = out_img_path
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
except:
pass
try:
dir = out_box_path
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
except:
pass
| 38.386473 | 322 | 0.654669 |
7941528580afb67fafdad4a8aea90e41a7f98e63 | 1,563 | py | Python | examples/example.py | harlev/jsonbox-python | 09ce4334b7167084cc814080995178c737016ef1 | [
"MIT"
] | 13 | 2019-09-17T01:13:52.000Z | 2020-08-03T13:07:38.000Z | examples/example.py | harlev/jsonbox-python | 09ce4334b7167084cc814080995178c737016ef1 | [
"MIT"
] | null | null | null | examples/example.py | harlev/jsonbox-python | 09ce4334b7167084cc814080995178c737016ef1 | [
"MIT"
] | 2 | 2019-09-19T15:20:34.000Z | 2020-04-27T11:40:43.000Z | from jsonbox import JsonBox
# generate unique box id
MY_BOX_ID = JsonBox.get_new_box_id()
# create instance
jb = JsonBox()
data = [{"name": "first", "age": 25}, {"name": "second", "age": 19}]
# write data
result = jb.write(data, MY_BOX_ID)
# get record id of written data
record_ids = jb.get_record_id(result)
# read record
print(jb.read(MY_BOX_ID, record_ids[0]))
# read all records in box
print(jb.read(MY_BOX_ID))
# get metadata for box
print(jb.get_meta(MY_BOX_ID))
# read all records in box with sort
print(jb.read(MY_BOX_ID, sort_by="age"))
# read records in box with sort matching query (see documentation for syntax)
print(jb.read(MY_BOX_ID, query="name:firs*"))
print(jb.read(MY_BOX_ID, query="age:=19"))
# read records with limit
print(jb.read(MY_BOX_ID, limit=1))
# read records with skip
print(jb.read(MY_BOX_ID, skip=1))
# update data
data = {"name": "Bob", "age": 23}
jb.update(data, MY_BOX_ID, record_ids[0])
# read updated data
print(jb.read(MY_BOX_ID))
print(jb.read(MY_BOX_ID, record_ids[0]))
# delete records matching to query
print(jb.delete(MY_BOX_ID, query="age:=23"))
# delete records
jb.delete(MY_BOX_ID, record_ids[1])
# write to a private box
MY_PRIVATE_BOX_ID = JsonBox.get_new_box_id()
api_key = jb.get_new_api_key()
result = jb.write(data, MY_PRIVATE_BOX_ID, api_key=api_key)
record_id = jb.get_record_id(result)
# update a private box
data = {"name": "David", "age": 35}
jb.update(data, MY_PRIVATE_BOX_ID, record_id, api_key=api_key)
# delete a private box
jb.delete(MY_PRIVATE_BOX_ID, record_id, api_key=api_key)
| 24.046154 | 77 | 0.733845 |
794153040273ea8af84655d60f568285136bcd95 | 31,395 | py | Python | pandas/tests/frame/indexing/test_where.py | bamford/pandas | a474af5b4526dc333f62260587a1d8ef494df57c | [
"BSD-3-Clause"
] | 1 | 2022-01-26T19:37:10.000Z | 2022-01-26T19:37:10.000Z | pandas/tests/frame/indexing/test_where.py | bamford/pandas | a474af5b4526dc333f62260587a1d8ef494df57c | [
"BSD-3-Clause"
] | 1 | 2021-12-01T03:10:17.000Z | 2021-12-23T20:27:21.000Z | pandas/tests/frame/indexing/test_where.py | bamford/pandas | a474af5b4526dc333f62260587a1d8ef494df57c | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from hypothesis import (
given,
settings,
)
import numpy as np
import pytest
from pandas.compat import np_version_under1p19
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
StringDtype,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
from pandas._testing._hypothesis import OPTIONAL_ONE_OF_ALL
@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"])
def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame):
if request.param == "default":
return DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
if request.param == "float_string":
return float_string_frame
if request.param == "mixed_float":
return mixed_float_frame
if request.param == "mixed_int":
return mixed_int_frame
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (
issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8"
)
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items()))
class TestDataFrameIndexingWhere:
def test_where_get(self, where_frame, float_string_frame):
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.items():
exp = Series(np.where(cond[k], df[k], other1[k]), index=v.index)
tm.assert_series_equal(v, exp, check_names=False)
tm.assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
cond = df > 0
_check_get(df, cond)
def test_where_upcasting(self):
# upcasting case (GH # 2794)
df = DataFrame(
{
c: Series([1] * 3, dtype=c)
for c in ["float32", "float64", "int32", "int64"]
}
)
df.iloc[1, :] = 0
result = df.dtypes
expected = Series(
[
np.dtype("float32"),
np.dtype("float64"),
np.dtype("int32"),
np.dtype("int64"),
],
index=["float32", "float64", "int32", "int64"],
)
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
tm.assert_series_equal(result, expected)
def test_where_alignment(self, where_frame, float_string_frame, mixed_int_frame):
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
tm.assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
warn = None
if df is mixed_int_frame:
warn = FutureWarning
with tm.assert_produces_warning(warn, match="Downcasting integer-dtype"):
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes)
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
def test_where_invalid(self):
# invalid conditions
df = DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"])
cond = df > 0
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
df.where(cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
df.where(err2, other1)
with pytest.raises(ValueError, match=msg):
df.mask(True)
with pytest.raises(ValueError, match=msg):
df.mask(0)
def test_where_set(self, where_frame, float_string_frame):
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
return_value = dfi.where(cond, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in df.dtypes.items():
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype("float64")
assert dfi[k].dtype == v
df = where_frame
if df is float_string_frame:
msg = "'>' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
df > 0
return
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligning
cond = (df >= 0)[1:]
_check_set(df, cond)
def test_where_series_slicing(self):
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({"a": range(3), "b": range(4, 7)})
result = df.where(df["a"] == 1)
expected = df[df["a"] == 1].reindex(df.index)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array])
def test_where_array_like(self, klass):
# see gh-15414
df = DataFrame({"a": [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({"a": [np.nan, 2, 3]})
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
df["b"] = 2
expected["b"] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
result = df.where(klass(cond))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({"a": [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")], [pd.NaT], [Timestamp("2017-01-02")]],
],
)
def test_where_invalid_input_single(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
@pytest.mark.parametrize(
"cond",
[
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"], ["True", "True"]],
DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}),
[
[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")],
],
],
)
def test_where_invalid_input_multiple(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# see gh-2793
df = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0], "b": [4.0, 3.0, 2.0, 1.0]}, dtype="float64"
)
expected = DataFrame(
{"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64",
)
result = df.where(df > 2, np.nan)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_mixed(self, any_signed_int_numpy_dtype):
# see gh-2793
df = DataFrame(
{
"a": np.array([1, 2, 3, 4], dtype=any_signed_int_numpy_dtype),
"b": np.array([4.0, 3.0, 2.0, 1.0], dtype="float64"),
}
)
expected = DataFrame(
{"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64",
)
result = df.where(df > 2, np.nan)
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(result > 2, np.nan, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_bug_transposition(self):
# see gh-7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
tm.assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(
{
"A": date_range("20130102", periods=5),
"B": date_range("20130104", periods=5),
"C": np.random.randn(5),
}
)
stamp = datetime(2013, 1, 3)
msg = "'>' not supported between instances of 'float' and 'datetime.datetime'"
with pytest.raises(TypeError, match=msg):
df > stamp
result = df[df.iloc[:, :-1] > stamp]
expected = df.copy()
expected.loc[[0, 1], "A"] = np.nan
expected.loc[:, "C"] = np.nan
tm.assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({"series": Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{"series": Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])}
)
tm.assert_frame_equal(df, expected)
# GH 7656
df = DataFrame(
[
{"A": 1, "B": np.nan, "C": "Test"},
{"A": np.nan, "B": "Test", "C": np.nan},
]
)
msg = "boolean setting on mixed-type"
with pytest.raises(TypeError, match=msg):
df.where(~isna(df), None, inplace=True)
def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self):
# see gh-21947
df = DataFrame(columns=["a"])
cond = df
assert (cond.dtypes == object).all()
result = df.where(cond)
tm.assert_frame_equal(result, df)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis="columns")
tm.assert_frame_equal(result, expected)
return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis="index")
tm.assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis="rows")
tm.assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(
pd.notna(df), DataFrame(1, index=df.index, columns=df.columns)
)
tm.assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1 + 1j, 2], [np.nan, 4 + 1j]], columns=["a", "b"])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=["a", "b"])
df[df.abs() >= 5] = np.nan
tm.assert_frame_equal(df, expected)
def test_where_axis(self, using_array_manager):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype="float64")
result = df.where(mask, s, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype="float64")
result = df.where(mask, s, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype="float64")
result = df.where(mask, s, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
warn = FutureWarning if using_array_manager else None
expected = DataFrame([[0, np.nan], [0, np.nan]])
with tm.assert_produces_warning(warn, match="Downcasting integer-dtype"):
result = df.where(mask, s, axis="columns")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{
0: np.array([0, 0], dtype="int64"),
1: np.array([np.nan, np.nan], dtype="float64"),
}
)
result = df.copy()
return_value = result.where(mask, s, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_axis_multiple_dtypes(self):
# Multiple dtypes (=> multiple Blocks)
df = pd.concat(
[
DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)), dtype="int64"),
],
ignore_index=True,
axis=1,
)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis="columns")
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s1, axis="columns", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.where(mask, s2, axis="index")
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype("int64")
expected[3] = expected[3].astype("int64")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, s2, axis="index", inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d1, axis="index")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d1, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d1, inplace=True, axis="index")
assert return_value is None
tm.assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
tm.assert_frame_equal(result, expected)
result = df.where(mask, d2, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True)
assert return_value is None
tm.assert_frame_equal(result, expected)
result = df.copy()
return_value = result.where(mask, d2, inplace=True, axis="columns")
assert return_value is None
tm.assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, (df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_where_tz_values(self, tz_naive_fixture, frame_or_series):
obj1 = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),
columns=["date"],
)
obj2 = DataFrame(
DatetimeIndex(["20150103", "20150104", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
mask = DataFrame([True, True, False], columns=["date"])
exp = DataFrame(
DatetimeIndex(["20150101", "20150102", "20150105"], tz=tz_naive_fixture),
columns=["date"],
)
if frame_or_series is Series:
obj1 = obj1["date"]
obj2 = obj2["date"]
mask = mask["date"]
exp = exp["date"]
result = obj1.where(mask, obj2)
tm.assert_equal(exp, result)
def test_df_where_change_dtype(self):
# GH#16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
result = df.where(mask)
expected = DataFrame(
[[0, np.nan, np.nan], [np.nan, np.nan, 5]], columns=list("ABC")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"other": None}])
def test_df_where_with_category(self, kwargs):
# GH#16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, False], [False, False, True]])
# change type to category
df.A = df.A.astype("category")
df.B = df.B.astype("category")
df.C = df.C.astype("category")
result = df.where(mask, **kwargs)
A = pd.Categorical([0, np.nan], categories=[0, 3])
B = pd.Categorical([np.nan, np.nan], categories=[1, 4])
C = pd.Categorical([np.nan, 5], categories=[2, 5])
expected = DataFrame({"A": A, "B": B, "C": C})
tm.assert_frame_equal(result, expected)
# Check Series.where while we're here
result = df.A.where(mask[:, 0], **kwargs)
expected = Series(A, name="A")
tm.assert_series_equal(result, expected)
def test_where_categorical_filtering(self):
# GH#22609 Verify filtering operations on DataFrames with categorical Series
df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"])
df["b"] = df["b"].astype("category")
result = df.where(df["a"] > 0)
expected = df.copy()
expected.loc[0, :] = np.nan
tm.assert_equal(result, expected)
def test_where_ea_other(self):
# GH#38729/GH#38742
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
arr = pd.array([7, pd.NA, 9])
ser = Series(arr)
mask = np.ones(df.shape, dtype=bool)
mask[1, :] = False
# TODO: ideally we would get Int64 instead of object
result = df.where(mask, ser, axis=0)
expected = DataFrame({"A": [1, pd.NA, 3], "B": [4, pd.NA, 6]}).astype(object)
tm.assert_frame_equal(result, expected)
ser2 = Series(arr[:2], index=["A", "B"])
expected = DataFrame({"A": [1, 7, 3], "B": [4, pd.NA, 6]})
expected["B"] = expected["B"].astype(object)
result = df.where(mask, ser2, axis=1)
tm.assert_frame_equal(result, expected)
def test_where_interval_noop(self):
# GH#44181
df = DataFrame([pd.Interval(0, 0)])
res = df.where(df.notna())
tm.assert_frame_equal(res, df)
ser = df[0]
res = ser.where(ser.notna())
tm.assert_series_equal(res, ser)
@pytest.mark.parametrize(
"dtype",
[
"timedelta64[ns]",
"datetime64[ns]",
"datetime64[ns, Asia/Tokyo]",
"Period[D]",
],
)
def test_where_datetimelike_noop(self, dtype):
# GH#45135, analogue to GH#44181 for Period don't raise on no-op
# For td64/dt64/dt64tz we already don't raise, but also are
# checking that we don't unnecessarily upcast to object.
ser = Series(np.arange(3) * 10 ** 9, dtype=np.int64).view(dtype)
df = ser.to_frame()
mask = np.array([False, False, False])
res = ser.where(~mask, "foo")
tm.assert_series_equal(res, ser)
mask2 = mask.reshape(-1, 1)
res2 = df.where(~mask2, "foo")
tm.assert_frame_equal(res2, df)
res3 = ser.mask(mask, "foo")
tm.assert_series_equal(res3, ser)
res4 = df.mask(mask2, "foo")
tm.assert_frame_equal(res4, df)
def test_where_try_cast_deprecated(frame_or_series):
obj = DataFrame(np.random.randn(4, 3))
obj = tm.get_obj(obj, frame_or_series)
mask = obj > 0
with tm.assert_produces_warning(FutureWarning):
# try_cast keyword deprecated
obj.where(mask, -1, try_cast=False)
def test_where_int_downcasting_deprecated(using_array_manager, request):
# GH#44597
if not using_array_manager:
mark = pytest.mark.xfail(
reason="After fixing a bug in can_hold_element, we don't go through "
"the deprecated path, and also up-cast both columns to int32 "
"instead of just 1."
)
request.node.add_marker(mark)
arr = np.arange(6).astype(np.int16).reshape(3, 2)
df = DataFrame(arr)
mask = np.zeros(arr.shape, dtype=bool)
mask[:, 0] = True
msg = "Downcasting integer-dtype"
warn = FutureWarning if not using_array_manager else None
with tm.assert_produces_warning(warn, match=msg):
res = df.where(mask, 2 ** 17)
expected = DataFrame({0: arr[:, 0], 1: np.array([2 ** 17] * 3, dtype=np.int32)})
tm.assert_frame_equal(res, expected)
def test_where_copies_with_noop(frame_or_series):
# GH-39595
result = frame_or_series([1, 2, 3, 4])
expected = result.copy()
col = result[0] if frame_or_series is DataFrame else result
where_res = result.where(col < 5)
where_res *= 2
tm.assert_equal(result, expected)
where_res = result.where(col > 5, [1, 2, 3, 4])
where_res *= 2
tm.assert_equal(result, expected)
def test_where_string_dtype(frame_or_series):
# GH40824
obj = frame_or_series(
["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype()
)
filtered_obj = frame_or_series(
["b", "c"], index=["id2", "id3"], dtype=StringDtype()
)
filter_ser = Series([False, True, True, False])
result = obj.where(filter_ser, filtered_obj)
expected = frame_or_series(
[pd.NA, "b", "c", pd.NA],
index=["id1", "id2", "id3", "id4"],
dtype=StringDtype(),
)
tm.assert_equal(result, expected)
def test_where_bool_comparison():
# GH 10336
df_mask = DataFrame(
{"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]}
)
result = df_mask.where(df_mask == False) # noqa:E712
expected = DataFrame(
{
"AAA": np.array([np.nan] * 4, dtype=object),
"BBB": [False] * 4,
"CCC": [np.nan, False, np.nan, False],
}
)
tm.assert_frame_equal(result, expected)
def test_where_none_nan_coerce():
# GH 15613
expected = DataFrame(
{
"A": [Timestamp("20130101"), pd.NaT, Timestamp("20130103")],
"B": [1, 2, np.nan],
}
)
result = expected.where(expected.notnull(), None)
tm.assert_frame_equal(result, expected)
def test_where_non_keyword_deprecation(frame_or_series):
# GH 41485
obj = frame_or_series(range(5))
msg = (
"In a future version of pandas all arguments of "
f"{frame_or_series.__name__}.where except for the arguments 'cond' "
"and 'other' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = obj.where(obj > 1, 10, False)
expected = frame_or_series([10, 10, 2, 3, 4])
tm.assert_equal(expected, result)
def test_where_columns_casting():
# GH 42295
df = DataFrame({"a": [1.0, 2.0], "b": [3, np.nan]})
expected = df.copy()
result = df.where(pd.notnull(df), None)
# make sure dtypes don't change
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("as_cat", [True, False])
def test_where_period_invalid_na(frame_or_series, as_cat, request):
# GH#44697
idx = pd.period_range("2016-01-01", periods=3, freq="D")
if as_cat:
idx = idx.astype("category")
obj = frame_or_series(idx)
# NA value that we should *not* cast to Period dtype
tdnat = pd.NaT.to_numpy("m8[ns]")
mask = np.array([True, True, False], ndmin=obj.ndim).T
if as_cat:
msg = (
r"Cannot setitem on a Categorical with a new category \(NaT\), "
"set the categories first"
)
if np_version_under1p19:
mark = pytest.mark.xfail(
reason="When evaluating the f-string to generate the exception "
"message, numpy somehow ends up trying to cast None to int, so "
"ends up raising TypeError but with an unrelated message."
)
request.node.add_marker(mark)
else:
msg = "value should be a 'Period'"
with pytest.raises(TypeError, match=msg):
obj.where(mask, tdnat)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, tdnat)
def test_where_nullable_invalid_na(frame_or_series, any_numeric_ea_dtype):
# GH#44697
arr = pd.array([1, 2, 3], dtype=any_numeric_ea_dtype)
obj = frame_or_series(arr)
mask = np.array([True, True, False], ndmin=obj.ndim).T
msg = r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}"
for null in tm.NP_NAT_OBJECTS + [pd.NaT]:
# NaT is an NA value that we should *not* cast to pd.NA dtype
with pytest.raises(TypeError, match=msg):
obj.where(mask, null)
with pytest.raises(TypeError, match=msg):
obj.mask(mask, null)
@given(data=OPTIONAL_ONE_OF_ALL)
@settings(deadline=None) # GH 44969
def test_where_inplace_casting(data):
# GH 22051
df = DataFrame({"a": data})
df_copy = df.where(pd.notnull(df), None).copy()
df.where(pd.notnull(df), None, inplace=True)
tm.assert_equal(df, df_copy)
| 33.794403 | 86 | 0.561777 |
794153a6dae8319d1477085dab64349fd63be00b | 488 | py | Python | 4_factory/factory_method/chicago_style_veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/factory_method/chicago_style_veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | 4_factory/factory_method/chicago_style_veggie_pizza.py | hypersport/Head-First-Design-Patterns-Python | 0c8b831ae89ebbbef8b203b96508deb7e3063590 | [
"MIT"
] | null | null | null | from pizza import Pizza
class ChicagoStyleVeggiePizza(Pizza):
def __init__(self):
self.name = 'Chicago Deep Dish Veggie Pizza'
self.dough = 'Extra Thick Crust Dough'
self.sauce = 'Plum Tomato Sauce'
self.toppings.append('Shredded Mozzarella Cheese')
self.toppings.append('Black Olives')
self.toppings.append('Spinach')
self.toppings.append('Eggplant')
def cut(self):
print('Cutting the pizza into square slices')
| 30.5 | 58 | 0.661885 |
794154f50a1146dc3da93628f68025f25ca71000 | 2,322 | py | Python | examples/classfiler.py | ParanoiaSYT/Qulab-backup | 09ec5457145b3789d4c1ac02c43dd3e6dfafc96f | [
"MIT"
] | null | null | null | examples/classfiler.py | ParanoiaSYT/Qulab-backup | 09ec5457145b3789d4c1ac02c43dd3e6dfafc96f | [
"MIT"
] | null | null | null | examples/classfiler.py | ParanoiaSYT/Qulab-backup | 09ec5457145b3789d4c1ac02c43dd3e6dfafc96f | [
"MIT"
] | null | null | null | """
Classfiler
Aquire state of qubits by support vector classification.
"""
import asyncio
from sklearn import svm
from qulab.sugar import getDHT, mount
from qulab.utils import ShutdownBlocker
class Classfiler:
def __init__(self, N=100):
self.clfs = [[svm.SVC(kernel='linear'), 0] for i in range(N)]
def fit(self, index=0, s0, s1):
s0, s1 = s0[index], s1[index]
y = [0] * len(s0) + [1] * len(s1)
x = list(s0) + list(s1)
self.clfs[index][0].fit(x, y)
self.clfs[index][1] = self.clfs[index][0].score(x, y)
def predict(self, data):
"""
data: Iterable
data[0], data[1], ... 分别是 Q0, Q1, ... 的数据
"""
ret = 0
for i, ((clf, _), s) in enumerate(zip(self.clfs, data)):
ret += clf.predict(s) << i
return ret
async def start(args):
dht = await getDHT()
dev = Classfiler(args.num)
await mount(dev, args.name)
await asyncio.sleep(1)
print(title, dht.port, await dht.get(args.name))
def main(args):
loop = asyncio.get_event_loop()
asyncio.ensure_future(start(args), loop=loop)
try:
loop.run_forever()
finally:
loop.close()
if __name__ == '__main__':
import argparse
import subprocess
import sys
parser = argparse.ArgumentParser(description='Run an classfiler server.')
parser.add_argument('--name',
'-n',
default='Classfiler',
help='server name')
parser.add_argument('--num',
'-N',
type=int,
default=100,
help='number of qubits')
parser.add_argument('--no-retry', action='store_true', help='no retry')
args = parser.parse_args()
title = f'{args.name}'
if args.no_retry:
main(args)
else:
with ShutdownBlocker(title):
cmd = [
sys.executable, __file__, '-n', args.name, '-N', args.num,
'--no-retry'
]
while True:
proc = subprocess.Popen(cmd)
proc.wait()
if proc.returncode == 0:
break
| 26.386364 | 78 | 0.502584 |
7941564b740bcf755a18a016b1c654423a1ddaf8 | 875 | py | Python | app/core/migrations/0006_auto_20200509_1055.py | FullCycleRoid/poll_app | cf53823e02cc67f6722f785d99ef227d341a6eb4 | [
"MIT"
] | null | null | null | app/core/migrations/0006_auto_20200509_1055.py | FullCycleRoid/poll_app | cf53823e02cc67f6722f785d99ef227d341a6eb4 | [
"MIT"
] | null | null | null | app/core/migrations/0006_auto_20200509_1055.py | FullCycleRoid/poll_app | cf53823e02cc67f6722f785d99ef227d341a6eb4 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-05-09 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20200508_1842'),
]
operations = [
migrations.RenameModel(
old_name='Vote',
new_name='Choice',
),
migrations.RenameField(
model_name='poll',
old_name='is_published',
new_name='is_active',
),
migrations.RenameField(
model_name='poll',
old_name='title',
new_name='name',
),
migrations.AddField(
model_name='question',
name='question_type',
field=models.CharField(choices=[('1', 'Text answer'), ('2', 'One choice option'), ('3', 'Multiple choice option')], default=1, max_length=1),
),
]
| 26.515152 | 153 | 0.545143 |
794157847fcd8df463598493706175785cbfb4f3 | 89 | py | Python | business_house/game/urls.py | vikasgoyal09/flp_07_2021 | f255091f131b93a727ba8f4bd2cee6f65ce47bad | [
"Apache-2.0"
] | null | null | null | business_house/game/urls.py | vikasgoyal09/flp_07_2021 | f255091f131b93a727ba8f4bd2cee6f65ce47bad | [
"Apache-2.0"
] | null | null | null | business_house/game/urls.py | vikasgoyal09/flp_07_2021 | f255091f131b93a727ba8f4bd2cee6f65ce47bad | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
#Enter url paths
] | 12.714286 | 28 | 0.707865 |
7941579de60caf0bdcc0a4821f2a6fd2646c3ee3 | 2,606 | py | Python | tests/srcga/main.py | chriscoombs/faropt | 46e3037959f0e3163567db2438ab700844dbebfe | [
"MIT-0"
] | 14 | 2020-08-24T21:34:35.000Z | 2022-03-01T06:21:22.000Z | tests/srcga/main.py | chriscoombs/faropt | 46e3037959f0e3163567db2438ab700844dbebfe | [
"MIT-0"
] | 1 | 2021-04-03T09:57:04.000Z | 2022-02-23T15:29:14.000Z | tests/srcga/main.py | chriscoombs/faropt | 46e3037959f0e3163567db2438ab700844dbebfe | [
"MIT-0"
] | 3 | 2021-02-16T23:23:00.000Z | 2022-03-12T21:39:33.000Z |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import json
import numpy
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from utils import *
# gr*.json contains the distance map in list of list style in JSON format
# Optimal solutions are : gr17 = 2085, gr24 = 1272, gr120 = 6942
with open("/tmp/tsp/gr17.json", "r") as tsp_data:
tsp = json.load(tsp_data)
distance_map = tsp["DistanceMatrix"]
IND_SIZE = tsp["TourSize"]
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("indices", random.sample, range(IND_SIZE), IND_SIZE)
# Structure initializers
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalTSP(individual):
distance = distance_map[individual[-1]][individual[0]]
for gene1, gene2 in zip(individual[0:-1], individual[1:]):
distance += distance_map[gene1][gene2]
return distance,
toolbox.register("mate", tools.cxPartialyMatched)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evalTSP)
def main():
random.seed(169)
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, 0.7, 0.2, 40, stats=stats,
halloffame=hof)
log_metric('fstar',int(hof[0].fitness.values[0]))
return pop, stats, hof
if __name__ == "__main__":
main() | 32.575 | 86 | 0.711435 |
794158787b27c79aa5b47b70eb9057e31bda0337 | 12,905 | py | Python | sdk/python/pulumi_aws_native/codepipeline/custom_action_type.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/codepipeline/custom_action_type.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/codepipeline/custom_action_type.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CustomActionTypeArgs', 'CustomActionType']
@pulumi.input_type
class CustomActionTypeArgs:
def __init__(__self__, *,
category: pulumi.Input[str],
input_artifact_details: pulumi.Input['CustomActionTypeArtifactDetailsArgs'],
output_artifact_details: pulumi.Input['CustomActionTypeArtifactDetailsArgs'],
provider: pulumi.Input[str],
version: pulumi.Input[str],
configuration_properties: Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeConfigurationPropertiesArgs']]]] = None,
settings: Optional[pulumi.Input['CustomActionTypeSettingsArgs']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeTagArgs']]]] = None):
"""
The set of arguments for constructing a CustomActionType resource.
"""
pulumi.set(__self__, "category", category)
pulumi.set(__self__, "input_artifact_details", input_artifact_details)
pulumi.set(__self__, "output_artifact_details", output_artifact_details)
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "version", version)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def category(self) -> pulumi.Input[str]:
return pulumi.get(self, "category")
@category.setter
def category(self, value: pulumi.Input[str]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="inputArtifactDetails")
def input_artifact_details(self) -> pulumi.Input['CustomActionTypeArtifactDetailsArgs']:
return pulumi.get(self, "input_artifact_details")
@input_artifact_details.setter
def input_artifact_details(self, value: pulumi.Input['CustomActionTypeArtifactDetailsArgs']):
pulumi.set(self, "input_artifact_details", value)
@property
@pulumi.getter(name="outputArtifactDetails")
def output_artifact_details(self) -> pulumi.Input['CustomActionTypeArtifactDetailsArgs']:
return pulumi.get(self, "output_artifact_details")
@output_artifact_details.setter
def output_artifact_details(self, value: pulumi.Input['CustomActionTypeArtifactDetailsArgs']):
pulumi.set(self, "output_artifact_details", value)
@property
@pulumi.getter
def provider(self) -> pulumi.Input[str]:
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input[str]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeConfigurationPropertiesArgs']]]]:
return pulumi.get(self, "configuration_properties")
@configuration_properties.setter
def configuration_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeConfigurationPropertiesArgs']]]]):
pulumi.set(self, "configuration_properties", value)
@property
@pulumi.getter
def settings(self) -> Optional[pulumi.Input['CustomActionTypeSettingsArgs']]:
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[pulumi.Input['CustomActionTypeSettingsArgs']]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CustomActionTypeTagArgs']]]]):
pulumi.set(self, "tags", value)
warnings.warn("""CustomActionType is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class CustomActionType(pulumi.CustomResource):
warnings.warn("""CustomActionType is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CustomActionTypeConfigurationPropertiesArgs']]]]] = None,
input_artifact_details: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeArtifactDetailsArgs']]] = None,
output_artifact_details: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeArtifactDetailsArgs']]] = None,
provider: Optional[pulumi.Input[str]] = None,
settings: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeSettingsArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CustomActionTypeTagArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::CodePipeline::CustomActionType
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CustomActionTypeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::CodePipeline::CustomActionType
:param str resource_name: The name of the resource.
:param CustomActionTypeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CustomActionTypeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
configuration_properties: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CustomActionTypeConfigurationPropertiesArgs']]]]] = None,
input_artifact_details: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeArtifactDetailsArgs']]] = None,
output_artifact_details: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeArtifactDetailsArgs']]] = None,
provider: Optional[pulumi.Input[str]] = None,
settings: Optional[pulumi.Input[pulumi.InputType['CustomActionTypeSettingsArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CustomActionTypeTagArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""CustomActionType is deprecated: CustomActionType is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CustomActionTypeArgs.__new__(CustomActionTypeArgs)
if category is None and not opts.urn:
raise TypeError("Missing required property 'category'")
__props__.__dict__["category"] = category
__props__.__dict__["configuration_properties"] = configuration_properties
if input_artifact_details is None and not opts.urn:
raise TypeError("Missing required property 'input_artifact_details'")
__props__.__dict__["input_artifact_details"] = input_artifact_details
if output_artifact_details is None and not opts.urn:
raise TypeError("Missing required property 'output_artifact_details'")
__props__.__dict__["output_artifact_details"] = output_artifact_details
if provider is None and not opts.urn:
raise TypeError("Missing required property 'provider'")
__props__.__dict__["provider"] = provider
__props__.__dict__["settings"] = settings
__props__.__dict__["tags"] = tags
if version is None and not opts.urn:
raise TypeError("Missing required property 'version'")
__props__.__dict__["version"] = version
super(CustomActionType, __self__).__init__(
'aws-native:codepipeline:CustomActionType',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CustomActionType':
"""
Get an existing CustomActionType resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CustomActionTypeArgs.__new__(CustomActionTypeArgs)
__props__.__dict__["category"] = None
__props__.__dict__["configuration_properties"] = None
__props__.__dict__["input_artifact_details"] = None
__props__.__dict__["output_artifact_details"] = None
__props__.__dict__["provider"] = None
__props__.__dict__["settings"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["version"] = None
return CustomActionType(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def category(self) -> pulumi.Output[str]:
return pulumi.get(self, "category")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> pulumi.Output[Optional[Sequence['outputs.CustomActionTypeConfigurationProperties']]]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter(name="inputArtifactDetails")
def input_artifact_details(self) -> pulumi.Output['outputs.CustomActionTypeArtifactDetails']:
return pulumi.get(self, "input_artifact_details")
@property
@pulumi.getter(name="outputArtifactDetails")
def output_artifact_details(self) -> pulumi.Output['outputs.CustomActionTypeArtifactDetails']:
return pulumi.get(self, "output_artifact_details")
@property
@pulumi.getter
def provider(self) -> pulumi.Output[str]:
return pulumi.get(self, "provider")
@property
@pulumi.getter
def settings(self) -> pulumi.Output[Optional['outputs.CustomActionTypeSettings']]:
return pulumi.get(self, "settings")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.CustomActionTypeTag']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
return pulumi.get(self, "version")
| 47.09854 | 202 | 0.682294 |
79415895cb81db5ee25a0aef3b0ace8d97d7b060 | 910 | py | Python | creational/singleton/singleton_1.py | samuelsoaress/design-patterns-python | a8737d205f3dc78b48e847d98fb702668c187a6f | [
"MIT"
] | 16 | 2020-06-29T17:32:11.000Z | 2022-03-29T20:22:01.000Z | creational/singleton/singleton_1.py | samuelsoaress/design-patterns-python | a8737d205f3dc78b48e847d98fb702668c187a6f | [
"MIT"
] | null | null | null | creational/singleton/singleton_1.py | samuelsoaress/design-patterns-python | a8737d205f3dc78b48e847d98fb702668c187a6f | [
"MIT"
] | 14 | 2020-07-21T14:31:24.000Z | 2022-03-04T20:26:33.000Z | """
O Singleton tem a intenção de garantir que uma classe tenha somente
uma instância e fornece um ponto global de acesso para a mesma.
When discussing which patterns to drop, we found
that we still love them all.
(Not really—I'm in favor of dropping Singleton.
Its use is almost always a design smell.)
- Erich Gamma, em entrevista para informIT
http://www.informit.com/articles/article.aspx?p=1404056
"""
class AppSettings:
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self) -> None:
""" O init será chamado todas as vezes """
self.tema = 'O tema escuro'
self.font = '18px'
if __name__ == "__main__":
as1 = AppSettings()
as1.tema = 'O tema claro'
print(as1.tema)
as2 = AppSettings()
print(as1.tema)
| 26 | 67 | 0.665934 |
7941597e40e7b431b13c35e58a88b00b669f1c2b | 464 | py | Python | elasticsearch_django/signals.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 87 | 2016-09-04T06:24:04.000Z | 2022-02-01T01:43:47.000Z | elasticsearch_django/signals.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 28 | 2016-12-09T22:48:29.000Z | 2021-04-07T11:01:34.000Z | elasticsearch_django/signals.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 31 | 2017-01-30T12:31:47.000Z | 2022-02-03T17:22:03.000Z | import django.dispatch
# signal fired just before calling model.index_search_document
# providing_args=["instance", "index"]
pre_index = django.dispatch.Signal()
# signal fired just before calling model.update_search_document
# providing_args=["instance", "index", "update_fields"]
pre_update = django.dispatch.Signal()
# signal fired just before calling model.delete_search_document
# providing_args=["instance", "index"]
pre_delete = django.dispatch.Signal()
| 33.142857 | 63 | 0.790948 |
794159b0d458f8020b3593d2b82e9c6557a1ab21 | 8,518 | py | Python | GasBotty/models/googlenet.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | null | null | null | GasBotty/models/googlenet.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | null | null | null | GasBotty/models/googlenet.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | null | null | null | import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_state_dict_from_url
__all__ = ['GoogLeNet', 'googlenet']
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
_GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
def googlenet(pretrained=False, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(model_urls['googlenet'],
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True):
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return _GoogLeNetOutputs(x, aux2, aux1)
return x
class Inception(nn.Module):
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1)
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv = BasicConv2d(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| 36.401709 | 102 | 0.570439 |
79415a90ab516f79360812d00d703d26f2b66c3f | 2,024 | py | Python | openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py | ameoba/horizon | ff9e367c98a8bb79f10914abffaaa04b0a461819 | [
"Apache-2.0"
] | 2 | 2019-12-29T09:20:13.000Z | 2020-01-01T13:12:34.000Z | openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 1 | 2015-03-12T01:03:44.000Z | 2015-03-12T01:03:44.000Z | openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 4 | 2015-05-05T08:17:28.000Z | 2020-02-05T10:47:06.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django.http import HttpRequest # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
EC2_URL = reverse("horizon:project:access_and_security:api_access:ec2")
class APIAccessTests(test.TestCase):
def test_ec2_download_view(self):
creds = self.ec2.first()
cert = self.certs.first()
self.mox.StubOutWithMock(api.keystone, "list_ec2_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_root_certificate")
self.mox.StubOutWithMock(api.keystone, "create_ec2_credentials")
api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \
.AndReturn([])
api.nova.get_x509_credentials(IsA(HttpRequest)).AndReturn(cert)
api.nova.get_x509_root_certificate(IsA(HttpRequest)) \
.AndReturn(cert)
api.keystone.create_ec2_credentials(IsA(HttpRequest),
self.user.id,
self.tenant.id).AndReturn(creds)
self.mox.ReplayAll()
res = self.client.get(EC2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['content-type'], 'application/zip')
| 38.923077 | 78 | 0.685771 |
79415bcf24662c2027f8821ada37e500c3b6dc7c | 3,389 | py | Python | bridge/bridge_vxlan.py | ZirakZaheer/ZeroTrust-Ebpf | b0f1968e5c9fa307a84b62a76f02847845a458db | [
"Apache-2.0"
] | 2 | 2018-11-17T08:16:47.000Z | 2022-01-21T16:45:46.000Z | bridge/bridge_vxlan.py | ZirakZaheer/ZeroTrust-Ebpf | b0f1968e5c9fa307a84b62a76f02847845a458db | [
"Apache-2.0"
] | null | null | null | bridge/bridge_vxlan.py | ZirakZaheer/ZeroTrust-Ebpf | b0f1968e5c9fa307a84b62a76f02847845a458db | [
"Apache-2.0"
] | null | null | null | from bcc import BPF
from builtins import input
from ctypes import c_int
from pyroute2 import IPRoute, IPDB
from simulation import Simulation
from netaddr import IPAddress
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
num_hosts = 2
null = open("/dev/null", "w")
class BridgeSimulation(Simulation):
def __init__(self, ipdb):
super(BridgeSimulation, self).__init__(ipdb)
def start(self):
# Ingress = attached to tc ingress class on bridge
# Egress = attached to tc engress class on namespace (outside) interface
# Loading bpf functions/maps.
bridge_code = BPF(src_file="bridge.c")
ingress_fn = bridge_code.load_func("handle_ingress", BPF.SCHED_CLS)
egress_fn = bridge_code.load_func("handle_egress", BPF.SCHED_CLS)
mac2host = bridge_code.get_table("mac2host")
conf = bridge_code.get_table("conf")
tunkey2if = b.get_table("tunkey2if")
if2tunkey = b.get_table("if2tunkey")
# Creating dummy interface behind which ebpf code will do bridging.
ebpf_bridge = ipdb.create(ifname="ebpf_br", kind="vxlan",vxlan_id=0,vxlan_link=ipdb.interfaces.eno1,vxlan_port=4789,vxlan_collect_metadata=True).up().commit()
#ebpf_bridge = ipdb.create(ifname="ebpf_br", kind="dummy").up().commit()
ipr.tc("add", "ingress", ebpf_bridge.index, "ffff:")
ipr.tc("add-filter", "bpf", ebpf_bridge.index, ":1", fd=egress_fn.fd,
name=egress_fn.name, parent="ffff:", action="drop", classid=1)
# Passing bridge index number to dataplane module
conf[c_int(0)] = c_int(ebpf_bridge.index)
# Setup namespace and their interfaces for demostration.
host_info = []
ipaddrs = []
for i in range(0, num_hosts):
print("Launching host %i of %i" % (i + 1, num_hosts))
ipaddr = "172.16.1.%d/24" % (100 + i)
ipaddrs.append(ipaddr)
host_info.append(self._create_ns("host%d" % i, ipaddr=ipaddr,
disable_ipv6=True))
# For each namespace that want to connect to the ebpf bridge
# We link it to the dummy interface behind which we run ebpf learning/forwarding code
# logically: Attaching individual namespace interface into the ebpf bridge.
# programmatically: running ebpf engress code on each interface
temp_index=1
for host in host_info:
for i in range(0, num_hosts):
if i != host[1].index: #assuming that index 0 ... 1):
if2tunkey_key = if2tunkey.Key(host[1].index)
if2tunkey_remote_ipv4 = IPAddress(ipaddrs[i])
if2tunkey[if2tunkey_key] = if2tunkey_remote_ipv4 #
ipr.tc("add", "ingress", host[1].index, "ffff:")
ipr.tc("add-filter", "bpf", host[1].index, ":1", fd=ingress_fn.fd,
name=ingress_fn.name, parent="ffff:", action="drop", classid=1)
# Passing namespace interface info to dataplane module.
conf[c_int(temp_index)] = c_int(host[1].index)
temp_index=temp_index+1
try:
sim = BridgeSimulation(ipdb)
sim.start()
input("Press enter to quit:")
except Exception,e:
print str(e)
if "sim" in locals():
for p in sim.processes: p.kill(); p.wait(); p.release()
finally:
if "ebpf_br" in ipdb.interfaces: ipdb.interfaces["ebpf_br"].remove().commit()
if "sim" in locals(): sim.release()
ipdb.release()
null.close()
| 41.329268 | 159 | 0.65152 |
79415c4c07b97469f459af099f04cba937832ddf | 304 | py | Python | setup.py | EberhardtRafael/Group-Theory-Calculations | 1c466720c26be1f311f0b2aa737bbd4acfa9b401 | [
"MIT"
] | null | null | null | setup.py | EberhardtRafael/Group-Theory-Calculations | 1c466720c26be1f311f0b2aa737bbd4acfa9b401 | [
"MIT"
] | null | null | null | setup.py | EberhardtRafael/Group-Theory-Calculations | 1c466720c26be1f311f0b2aa737bbd4acfa9b401 | [
"MIT"
] | null | null | null | from distutils.core import setup, Extension
setup(name = "YoungTab",
version = "1.0",
long_description = "blablabla",
url = "",
author = "blabla",
author_email = "bla@bla",
license = "",
ext_modules = [Extension("YoungTab", sources=["YoungTableauxBib.c"])],
)
| 25.333333 | 76 | 0.592105 |
79415d46f26cd43d82213d66eb092b74e3586753 | 6,655 | py | Python | test/functional/p2p_invalid_tx.py | aentan/ain | 1d6db33159de1c8c7930d29a0ab0902f42b728c1 | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_tx.py | aentan/ain | 1d6db33159de1c8c7930d29a0ab0902f42b728c1 | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_tx.py | aentan/ain | 1d6db33159de1c8c7930d29a0ab0902f42b728c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
from data import invalid_txs
class InvalidTxRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
]]
self.setup_clean_chain = True
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generate(100)
# Iterate through a list of known invalid transaction types, ensuring each is
# rejected. Some are consensus invalid and some just violate policy.
for BadTxTemplate in invalid_txs.iter_all_templates():
self.log.info("Testing invalid transaction: %s", BadTxTemplate.__name__)
template = BadTxTemplate(spend_block=block1)
tx = template.get_tx()
node.p2p.send_txs_and_test(
[tx], node, success=False,
expect_disconnect=template.expect_disconnect,
reject_reason=template.reject_reason,
)
if template.expect_disconnect:
self.log.info("Reconnecting to peer")
self.reconnect_p2p()
# Make two p2p connections to provide the node with orphans
# * p2ps[0] will send valid orphan txs (one with low fee)
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
self.reconnect_p2p(num_connections=2)
self.log.info('Test orphan transaction handling ... ')
# Create a root transaction that we withhold until all dependent transactions
# are sent out and in the orphan cache
SCRIPT_PUB_KEY_OP_TRUE = b'\x51\x75' * 15 + b'\x51'
tx_withhold = CTransaction()
tx_withhold.vin.append(CTxIn(outpoint=COutPoint(block1.vtx[0].sha256, 0)))
tx_withhold.vout.append(CTxOut(nValue=50 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_withhold.calc_sha256()
# Our first orphan tx with some outputs to create further orphan txs
tx_orphan_1 = CTransaction()
tx_orphan_1.vin.append(CTxIn(outpoint=COutPoint(tx_withhold.sha256, 0)))
tx_orphan_1.vout = [CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE)] * 3
tx_orphan_1.calc_sha256()
# A valid transaction with low fee
tx_orphan_2_no_fee = CTransaction()
tx_orphan_2_no_fee.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 0)))
tx_orphan_2_no_fee.vout.append(CTxOut(nValue=10 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
# A valid transaction with sufficient fee
tx_orphan_2_valid = CTransaction()
tx_orphan_2_valid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 1)))
tx_orphan_2_valid.vout.append(CTxOut(nValue=10 * COIN - 12000, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
tx_orphan_2_valid.calc_sha256()
# An invalid transaction with negative fee
tx_orphan_2_invalid = CTransaction()
tx_orphan_2_invalid.vin.append(CTxIn(outpoint=COutPoint(tx_orphan_1.sha256, 2)))
tx_orphan_2_invalid.vout.append(CTxOut(nValue=11 * COIN, scriptPubKey=SCRIPT_PUB_KEY_OP_TRUE))
self.log.info('Send the orphans ... ')
# Send valid orphan txs from p2ps[0]
node.p2p.send_txs_and_test([tx_orphan_1, tx_orphan_2_no_fee, tx_orphan_2_valid], node, success=False)
# Send invalid tx from p2ps[1]
node.p2ps[1].send_txs_and_test([tx_orphan_2_invalid], node, success=False)
assert_equal(0, node.getmempoolinfo()['size']) # Mempool should be empty
assert_equal(2, len(node.getpeerinfo())) # p2ps[1] is still connected
self.log.info('Send the withhold tx ... ')
with node.assert_debug_log(expected_msgs=["bad-txns-in-belowout"]):
node.p2p.send_txs_and_test([tx_withhold], node, success=True)
# Transactions that should end up in the mempool
expected_mempool = {
t.hash
for t in [
tx_withhold, # The transaction that is the root for all orphans
tx_orphan_1, # The orphan transaction that splits the coins
tx_orphan_2_valid, # The valid transaction (with sufficient fee)
]
}
# Transactions that do not end up in the mempool
# tx_orphan_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
# tx_orphan_invaid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
assert_equal(expected_mempool, set(node.getrawmempool()))
if __name__ == '__main__':
InvalidTxRequestTest().main()
| 43.214286 | 109 | 0.675733 |
79415e4a9b3906a41e2f172a12f58ca3d40110ab | 142 | py | Python | Myapi/apps.py | jyothiprakashpanaik/codeforces_api | a339253123770b89ba79e1d6fbcd9ca4f020716d | [
"MIT"
] | null | null | null | Myapi/apps.py | jyothiprakashpanaik/codeforces_api | a339253123770b89ba79e1d6fbcd9ca4f020716d | [
"MIT"
] | null | null | null | Myapi/apps.py | jyothiprakashpanaik/codeforces_api | a339253123770b89ba79e1d6fbcd9ca4f020716d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class MyapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Myapi'
| 20.285714 | 56 | 0.753521 |
79415ebbd25bc49636c071b5ff06512570ae468e | 467 | py | Python | python-dsa/combination-1.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | python-dsa/combination-1.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | python-dsa/combination-1.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 15:51:01 2020
@author: Abhishek Parashar
"""
def subsetutil(arr, subset, index,k,ans):
if len(subset)==k:
ans.append(subset[:])
return
for i in range(index,len(arr)):
subset.append(arr[i])
subsetutil(arr,subset,i+1,k,ans)
subset.pop(-1)
return ans
subset=[]
index=0
k=2
arr=[1,2,3]
ans=[]
subsetutil(arr, subset, index,k,ans)
print(*ans)
| 18.68 | 42 | 0.56531 |
79416032d3e7ec70faef8bd7533eb6c7a6cc3a1b | 2,527 | py | Python | ensysmod/schemas/energy_commodity.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | 1 | 2021-12-10T19:41:01.000Z | 2021-12-10T19:41:01.000Z | ensysmod/schemas/energy_commodity.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | 83 | 2021-10-20T22:54:28.000Z | 2022-03-24T19:07:06.000Z | ensysmod/schemas/energy_commodity.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | null | null | null | from typing import Optional
from pydantic import BaseModel, Field, validator
from ensysmod.schemas import Dataset
from ensysmod.util import validators
class EnergyCommodityBase(BaseModel):
"""
Shared attributes for an energy commodity. Used as a base class for all schemas.
"""
name: str = Field(...,
description="The unique name of the energy commodity inside this dataset. "
"It is needed to add energy components of this specific commodity.",
example="Electricity")
unit: str = Field(...,
description="Unit of the energy commodity. "
"Every provided data for this commodity must be in this unit.",
example="GW")
description: Optional[str] = Field(None,
description="Description of the energy commodity."
"Can be used as detailed description of the energy commodity.",
example="Electricity")
# validators
_valid_name = validator("name", allow_reuse=True)(validators.validate_name)
_valid_unit = validator("unit", allow_reuse=True)(validators.validate_unit)
_valid_description = validator("description", allow_reuse=True)(validators.validate_description)
class EnergyCommodityCreate(EnergyCommodityBase):
"""
Attributes to receive via API on creation of an energy commodity.
"""
ref_dataset: int = Field(...,
description="Reference to the dataset where that energy commodity belongs to.",
example=1)
# validators
_valid_ref_dataset = validator("ref_dataset", allow_reuse=True)(validators.validate_ref_dataset_required)
class EnergyCommodityUpdate(EnergyCommodityBase):
"""
Attributes to receive via API on update of an energy commodity.
"""
name: Optional[str] = Field(None, description="New Name of the energy commodity.", example="Electricity")
unit: Optional[str] = Field(None, description="New Unit of the energy commodity.", example="GW")
class EnergyCommodity(EnergyCommodityBase):
"""
Attributes to return via API for an energy commodity.
"""
id: int = Field(..., description="The unique ID of the energy commodity.")
dataset: Dataset = Field(..., description="Dataset object where the energy commodity belongs to.")
class Config:
orm_mode = True
| 41.42623 | 114 | 0.636723 |
7941605a1a1e82821bd20707ec476e001624d51c | 10,595 | py | Python | tests/unit_tests/test_binance.py | Vashiru/pycryptobot | a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe | [
"Apache-2.0"
] | 1 | 2021-06-14T18:21:49.000Z | 2021-06-14T18:21:49.000Z | tests/unit_tests/test_binance.py | Vashiru/pycryptobot | a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe | [
"Apache-2.0"
] | 1 | 2021-04-29T20:44:33.000Z | 2021-04-29T20:44:33.000Z | tests/unit_tests/test_binance.py | Vashiru/pycryptobot | a4cf67cbb7467acc1d4aa7aab17cd65e8dbb80fe | [
"Apache-2.0"
] | null | null | null | import json
import os
import sys
import pandas
import pytest
import urllib3
BINANCE_CONFIG_JSON = 'binance_config.json'
MOCK_MARKET = 'BTCEUR'
# disable insecure ssl warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
sys.path.append('.')
# pylint: disable=import-error
from models.exchange.binance import AuthAPI, PublicAPI
def test_instantiate_authapi_without_error():
api_key = "0000000000000000000000000000000000000000000000000000000000000000"
api_secret = "0000000000000000000000000000000000000000000000000000000000000000"
exchange = AuthAPI(api_key, api_secret)
assert type(exchange) is AuthAPI
def test_instantiate_authapi_with_api_key_error():
api_key = "ERROR"
api_secret = "0000000000000000000000000000000000000000000000000000000000000000"
with pytest.raises(SystemExit) as execinfo:
AuthAPI(api_key, api_secret)
assert str(execinfo.value) == 'Binance API key is invalid'
def test_instantiate_authapi_with_api_secret_error():
api_key = "0000000000000000000000000000000000000000000000000000000000000000"
api_secret = "ERROR"
with pytest.raises(SystemExit) as execinfo:
AuthAPI(api_key, api_secret)
assert str(execinfo.value) == 'Binance API secret is invalid'
def test_instantiate_authapi_with_api_url_error():
api_key = "0000000000000000000000000000000000000000000000000000000000000000"
api_secret = "0000000000000000000000000000000000000000000000000000000000000000"
api_url = "ERROR"
with pytest.raises(ValueError) as execinfo:
AuthAPI(api_key, api_secret, api_url)
assert str(execinfo.value) == 'Binance API URL is invalid'
def test_instantiate_publicapi_without_error():
exchange = PublicAPI()
assert type(exchange) is PublicAPI
def test_config_json_exists_and_valid():
file = _get_config_file()
assert os.path.exists(file) == True
with open(file) as config_file:
config = json.load(config_file)
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
def test_get_account(mocker):
client_response = {
'makerCommission': 10,
'takerCommission': 10,
'buyerCommission': 0,
'sellerCommission': 0,
'canTrade': True,
'canWithdraw': True,
'canDeposit': True,
'updateTime': 1620861508183,
'accountType': 'SPOT',
'balances': [
{'asset': 'BTC', 'free': '0.00000000', 'locked': '0.00000000'},
{'asset': 'LTC', 'free': '0.00000000', 'locked': '0.24944000'}
],
'permissions': ['SPOT']
}
with open(_get_config_file()) as config_file:
config = json.load(config_file)
api_key = ''
api_secret = ''
api_url = ''
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
exchange = AuthAPI(api_key, api_secret, api_url)
assert type(exchange) is AuthAPI
mocker.patch("models.exchange.binance.Client.get_account", return_value=client_response)
df = exchange.getAccount()
assert type(df) is pandas.core.frame.DataFrame
actual = df.columns.to_list()
expected = ['currency', 'balance', 'hold', 'available']
assert len(actual) == len(expected)
assert all([a == b for a, b in zip(actual, expected)])
def test_get_fees_with_market(mocker):
client_response = {'success': True, 'tradeFee': [{'maker': 0.001, 'symbol': 'CHZUSDT', 'taker': 0.001}]}
with open(_get_config_file()) as config_file:
config = json.load(config_file)
api_key = ''
api_secret = ''
api_url = ''
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
exchange = AuthAPI(api_key, api_secret, api_url)
assert type(exchange) is AuthAPI
mocker.patch("models.exchange.binance.Client.get_trade_fee", return_value=client_response)
df = exchange.getFees(MOCK_MARKET)
assert type(df) is pandas.core.frame.DataFrame
assert len(df) == 1
actual = df.columns.to_list()
expected = ['maker_fee_rate', 'taker_fee_rate', 'usd_volume', 'market']
assert len(actual) == len(expected)
assert all([a == b for a, b in zip(actual, expected)])
def test_get_taker_fee_with_market(mocker):
client_response = {'success': True, 'tradeFee': [{'maker': 0.001, 'symbol': 'CHZUSDT', 'taker': 0.001}]}
with open(_get_config_file()) as config_file:
config = json.load(config_file)
api_key = ''
api_secret = ''
api_url = ''
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
exchange = AuthAPI(api_key, api_secret, api_url)
assert type(exchange) is AuthAPI
mocker.patch("models.exchange.binance.Client.get_trade_fee", return_value=client_response)
fee = exchange.getTakerFee(MOCK_MARKET)
assert type(fee) is float
assert fee == 0.001
def test_get_maker_fee_with_market(mocker):
client_response = {'success': True, 'tradeFee': [{'maker': 0.001, 'symbol': 'CHZUSDT', 'taker': 0.001}]}
with open(_get_config_file()) as config_file:
config = json.load(config_file)
api_key = ''
api_secret = ''
api_url = ''
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
exchange = AuthAPI(api_key, api_secret, api_url)
assert type(exchange) is AuthAPI
mocker.patch("models.exchange.binance.Client.get_trade_fee", return_value=client_response)
fee = exchange.getMakerFee(MOCK_MARKET)
assert type(fee) is float
assert fee == 0.001
@pytest.mark.skip(reason="further work required to get this working")
def test_get_orders(mocker):
client_response = [
{
'symbol': 'CHZUSDT',
'orderId': 123456789,
'orderListId': -1,
'clientOrderId': 'SOME-CLIENT-ORDER-ID',
'price': '0.00000000',
'origQty': '31.30000000',
'executedQty': '31.30000000',
'cummulativeQuoteQty': '15.68161300',
'status': 'FILLED',
'timeInForce': 'GTC',
'type': 'MARKET',
'side': 'SELL',
'stopPrice': '0.00000000',
'icebergQty': '0.00000000',
'time': 1616845743872,
'updateTime': 1616845743872,
'isWorking': True,
'origQuoteOrderQty': '0.00000000'
}
]
with open(_get_config_file()) as config_file:
config = json.load(config_file)
api_key = ''
api_secret = ''
api_url = ''
if 'api_key' in config and 'api_secret' in config and 'api_pass' in config and 'api_url' in config:
api_key = config['api_key']
api_secret = config['api_secret']
api_url = config['api_url']
AuthAPI(api_key, api_secret, api_url)
elif 'api_key' in config['binance'] and 'api_secret' in config['binance'] and 'api_url' in config['binance']:
api_key = config['binance']['api_key']
api_secret = config['binance']['api_secret']
api_url = config['binance']['api_url']
AuthAPI(api_key, api_secret, api_url)
exchange = AuthAPI(api_key, api_secret, api_url)
assert type(exchange) is AuthAPI
mocker.patch("models.exchange.binance.Client.get_all_orders", return_value=client_response)
df = exchange.getOrders(MOCK_MARKET)
assert len(df) > 0
actual = df.columns.to_list()
expected = ['created_at', 'market', 'action', 'type', 'size', 'filled', 'status', 'price']
# order is not important, but no duplicate
assert len(actual) == len(expected)
diff = set(actual) ^ set(expected)
assert not diff
def _get_config_file():
filename = BINANCE_CONFIG_JSON
path_to_current_file = os.path.realpath(__file__)
current_directory = os.path.split(path_to_current_file)[0]
path_to_file = os.path.join(current_directory, filename)
return path_to_file
| 38.111511 | 117 | 0.645871 |
7941608be729fd6cab62c09df4392d9a490bb339 | 1,646 | py | Python | legacy/enums.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 1,275 | 2020-11-19T14:18:25.000Z | 2021-08-13T07:31:39.000Z | legacy/enums.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 62 | 2020-11-30T16:06:14.000Z | 2021-08-10T08:34:52.000Z | legacy/enums.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 75 | 2020-12-22T19:15:08.000Z | 2021-08-13T03:07:50.000Z | # Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from enum import Enum
class PipelineStatusTypes(Enum):
NotStarted = 1
Failed = 2
Succeeded = 3
Running = 4
class GDPComponent(Enum):
SplitGen = 1
SplitStatistics = 2
SplitSchema = 3
Sequencer = 4
SequencerStatistics = 5
SequencerSchema = 6
PreTransform = 7
PreTransformStatistics = 8
PreTransformSchema = 9
Transform = 10
Trainer = 11
Evaluator = 12
ResultPackager = 13
ModelValidator = 14
Deployer = 15
DataGen = 16
Inferrer = 17
DataStatistics = 18
DataSchema = 19
Tokenizer = 20
class MLMetadataTypes(Enum):
sqlite = 1
mysql = 2
mock = 3
class ArtifactStoreTypes(Enum):
local = 1
gcs = 2
class StepTypes(Enum):
base = 1
data = 2
sequencer = 3
preprocesser = 4
split = 5
trainer = 6
evaluator = 7
deployer = 8
inferrer = 9
class GCPGPUTypes(Enum):
K80 = 1
V100 = 2
P100 = 3
class ImagePullPolicy(Enum):
Always = 1
Never = 2
IfNotPresent = 3
| 20.320988 | 70 | 0.658566 |
79416092bdc4240a1dc60355635e39c726530136 | 434 | py | Python | rackattack/virtual/sh.py | shlomimatichin/rackattack-virtual | c800646c6f07dd04493dc313a3770f192a542d4c | [
"Apache-2.0"
] | null | null | null | rackattack/virtual/sh.py | shlomimatichin/rackattack-virtual | c800646c6f07dd04493dc313a3770f192a542d4c | [
"Apache-2.0"
] | null | null | null | rackattack/virtual/sh.py | shlomimatichin/rackattack-virtual | c800646c6f07dd04493dc313a3770f192a542d4c | [
"Apache-2.0"
] | 1 | 2020-01-29T08:05:00.000Z | 2020-01-29T08:05:00.000Z | import subprocess
import logging
_LOGGER = logging.getLogger("sh")
def run(* args, ** kwargs):
try:
return subprocess.check_output(
* args, stderr=subprocess.STDOUT, close_fds=True, ** kwargs)
except subprocess.CalledProcessError as e:
_LOGGER.exception(
"Return code:%(returncode)d Output was:\n%(output)s",
dict(output=e.output, returncode=e.returncode))
raise
| 27.125 | 72 | 0.645161 |
794160d7ee1d9d5a8b568991b34cc7af4cb0069b | 397 | py | Python | h/migrations/versions/f48100c9af86_add_authority_column_to_user_table.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | 1 | 2020-06-19T01:49:39.000Z | 2020-06-19T01:49:39.000Z | h/migrations/versions/f48100c9af86_add_authority_column_to_user_table.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | 5 | 2019-10-31T14:23:18.000Z | 2019-11-15T19:24:27.000Z | h/migrations/versions/f48100c9af86_add_authority_column_to_user_table.py | tgiardina/rpp-h | fece590f901b052a59c19a24acfeba52cee33c84 | [
"BSD-2-Clause"
] | null | null | null | """
Add authority column to user table
Revision ID: f48100c9af86
Revises: 64cf31f9f721
Create Date: 2016-08-15 18:10:23.511861
"""
import sqlalchemy as sa
from alembic import op
revision = "f48100c9af86"
down_revision = "64cf31f9f721"
def upgrade():
op.add_column("user", sa.Column("authority", sa.UnicodeText(), nullable=True))
def downgrade():
op.drop_column("user", "authority")
| 18.045455 | 82 | 0.730479 |
794161645fabe5c593ccf4e7b7ed6d58bf879753 | 1,504 | py | Python | NucleationModel/gridifier.py | MFrassek/CommittorEAE | 88a467e4500bc9ab69834209f4eaec9f2d0d7a61 | [
"MIT"
] | null | null | null | NucleationModel/gridifier.py | MFrassek/CommittorEAE | 88a467e4500bc9ab69834209f4eaec9f2d0d7a61 | [
"MIT"
] | null | null | null | NucleationModel/gridifier.py | MFrassek/CommittorEAE | 88a467e4500bc9ab69834209f4eaec9f2d0d7a61 | [
"MIT"
] | null | null | null | import numpy as np
class Gridifier():
def __init__(self, base_snapshots, resolution):
self._minima = np.amin(base_snapshots, axis=0)
self._maxima = np.amax(base_snapshots, axis=0)
self._spans = np.array(
[span if span != 0 else 1 for span in self._maxima - self._minima])
self._resolution = resolution
self._inverse_spans_times_resolution = \
1 / self._spans * (self._resolution - 1)
@property
def resolution(self):
return self._resolution
@property
def minima(self):
return self._minima
@property
def maxima(self):
return self._maxima
def gridify_snapshots(self, snapshots):
"""Take a list of snapshots and bin all entries to the closest
gridpoint.
"""
return self.convert_array_to_int_array(
self.broadcast_round_to_closest_integer(
self.broadcast_rescale_to_resolution_range(
self.broadcast_shift_values_to_start_at_minimum(
snapshots))))
def broadcast_shift_values_to_start_at_minimum(self, snapshots):
return snapshots - self.minima
def broadcast_rescale_to_resolution_range(self, snapshots):
return snapshots * self._inverse_spans_times_resolution
def broadcast_round_to_closest_integer(self, snapshots):
return np.floor(snapshots + 0.5)
def convert_array_to_int_array(self, snapshots):
return snapshots.astype(int)
| 32 | 79 | 0.664229 |
794161c33ac87b1919a09ea01235202bbbdac8c8 | 648 | py | Python | ansible/roles/db/molecule/default/tests/test_default.py | Otus-DevOps-2020-11/AndreyAgafonov_infra | 19e3d20479c0a713678d6ca7e346d689bf8e9ac9 | [
"MIT"
] | null | null | null | ansible/roles/db/molecule/default/tests/test_default.py | Otus-DevOps-2020-11/AndreyAgafonov_infra | 19e3d20479c0a713678d6ca7e346d689bf8e9ac9 | [
"MIT"
] | 2 | 2020-12-16T13:42:37.000Z | 2021-02-14T14:41:31.000Z | ansible/roles/db/molecule/default/tests/test_default.py | barmank32/barmank32_infra | 062b1c5cceb4991d53b3c71dd2a787255f87b23b | [
"MIT"
] | 1 | 2021-01-29T14:50:38.000Z | 2021-01-29T14:50:38.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
def test_mongo_socket(host):
host.socket("tcp://0.0.0.0:27017").is_listening
| 29.454545 | 63 | 0.763889 |
794163ea82060bd7617b43b1f61cb53f04bd20ea | 14,862 | py | Python | lib/nibLib/ui/glyphs.py | jenskutilek/nibLib | 7d0880828f46c05dd2512463c02e8f9728c13027 | [
"MIT"
] | 11 | 2020-01-21T13:18:07.000Z | 2020-12-13T20:57:28.000Z | lib/nibLib/ui/glyphs.py | jenskutilek/nibLib | 7d0880828f46c05dd2512463c02e8f9728c13027 | [
"MIT"
] | 1 | 2020-01-21T14:40:46.000Z | 2020-01-21T17:13:54.000Z | lib/nibLib/ui/glyphs.py | jenskutilek/nibLib | 7d0880828f46c05dd2512463c02e8f9728c13027 | [
"MIT"
] | null | null | null | from __future__ import division, print_function
from math import degrees, pi, radians
import vanilla
from defconAppKit.windows.baseWindow import BaseWindowController
from nibLib import DEBUG, def_angle_key, def_width_key, def_height_key, \
def_local_key, def_guide_key, def_super_key, def_model_key, \
rf_guide_key
from nibLib.pens import nib_models
def UpdateCurrentGlyphView():
pass
# This is development code, copy it to __init__.py after editing
class JKNib(BaseWindowController):
def __init__(self, glyph, font):
self.model = "Superellipse"
self.angle = radians(30)
self.width = 60
self.height = 2
self.superness = 2.5
self.line_join = "round" # bevel, round
self.guide_layer = None
self.nib_pen = nib_models[self.model]
self._draw_nib_faces = False
self._draw_in_preview_mode = False
self.glyph = glyph
self.font = font
# window dimensions
width = 300
height = 322
self.w = vanilla.FloatingWindow((width, height), "Nib Simulator")
col = 60
y = 10
self.w.model_label = vanilla.TextBox((8, y, col-8, 20), "Model")
self.w.model_select = vanilla.PopUpButton(
(col, y, -48, 20),
nib_models.keys(),
callback=self._model_select_callback,
)
y += 32
self.w.angle_label = vanilla.TextBox((8, y, col-8, 20), "Angle")
self.w.angle_slider = vanilla.Slider(
(col, y, -48, 20),
minValue=0,
maxValue=pi,
value=radians(30),
tickMarkCount=7,
callback=self._nib_angle_callback,
stopOnTickMarks=False,
)
self.w.angle_text = vanilla.TextBox(
(-40, y, -8, 20),
"%i" % int(round(degrees(self.angle)))
)
y += 24
self.w.width_label = vanilla.TextBox((8, y, col-8, 20), "Width")
self.w.width_slider = vanilla.Slider(
(col, y, -48, 20),
minValue=0,
maxValue=200,
value=self.width,
# tickMarkCount=7,
callback=self._nib_width_callback,
# stopOnTickMarks=False,
)
self.w.width_text = vanilla.TextBox(
(-40, y, -8, 20),
"%i" % self.width
)
y += 24
self.w.height_label = vanilla.TextBox((8, y, col-8, 20), "Height")
self.w.height_slider = vanilla.Slider(
(col, y, -48, 20),
minValue=1,
maxValue=200,
value=self.height,
# tickMarkCount=7,
callback=self._nib_height_callback,
# stopOnTickMarks=False,
)
self.w.height_text = vanilla.TextBox(
(-40, y, -8, 20),
"%i" % self.height
)
y += 24
self.w.superness_label = vanilla.TextBox((8, y, col-8, 20), "Super")
self.w.superness_slider = vanilla.Slider(
(col, y, -48, 20),
minValue=1.01,
maxValue=15.0,
value=self.superness,
callback=self._nib_superness_callback,
)
self.w.superness_text = vanilla.TextBox(
(-40, y, -8, 20),
"%0.2f" % self.superness
)
y += 32
self.w.guide_label = vanilla.TextBox((8, y, col-8, 20), "Guide")
self.w.guide_select = vanilla.PopUpButton(
(col, y, -48, 20),
[]
# callback=self._guide_select_callback,
)
y += 32
self.w.glyph_local = vanilla.CheckBox(
(col, y, -40, 20),
"Glyph Uses Local Parameters",
callback=self._glyph_local_callback,
value=False,
)
y += 32
self.w.display_label = vanilla.TextBox((8, y, col-8, 20), "Display")
self.w.draw_space = vanilla.CheckBox(
(col, y, -48, 20),
"Draw In Space Center",
callback=self._draw_space_callback,
value=False,
)
y += 24
self.w.draw_preview = vanilla.CheckBox(
(col, y, -48, 20),
"Draw In Preview Mode",
callback=self._draw_preview_callback,
value=False,
)
y += 24
self.w.draw_faces = vanilla.CheckBox(
(col, y, -48, 20),
"Draw Nib Faces In RGB",
callback=self._draw_faces_callback,
value=False,
)
y += 32
self.w.trace_outline = vanilla.Button(
(col, y, 120, 20),
title="Trace Outline",
callback=self._trace_callback
)
self.observers = [
("_preview", "drawBackground"),
("_preview", "drawInactive"),
("_previewFull", "drawPreview"),
("_glyph_changed", "currentGlyphChanged"),
("_font_changed", "fontBecameCurrent"),
("_font_resign", "fontResignCurrent"),
]
self.envSpecificInit()
self._update_layers()
# self._update_ui()
# self.w.trace_outline.enable(False)
self.w.open()
UpdateCurrentGlyphView()
def envSpecificInit(self):
pass
def windowCloseCallback(self, sender):
if self.font is not None:
self.save_settings()
self.envSpecificQuit()
super(JKNib, self).windowCloseCallback(sender)
UpdateCurrentGlyphView()
def envSpecificQuit(self):
pass
def _update_layers(self):
if self.font is None:
self.font_layers = []
else:
self.font_layers = self.getLayerList()
self.w.guide_select.setItems(self.font_layers)
if self.font_layers:
last_layer = len(self.font_layers) - 1
self.w.guide_select.set(last_layer)
self.guide_layer = self.font_layers[last_layer]
def getLayerList(self):
return []
def _update_ui(self):
# print("_update_ui")
i = 0
for i, model in enumerate(self.w.model_select.getItems()):
if model == self.model:
break
self.w.model_select.set(i)
self.nib_pen = nib_models[self.model]
self.w.angle_slider.set(self.angle)
self.w.angle_text.set("%i" % int(round(degrees(self.angle))))
self.w.width_slider.set(self.width)
self.w.width_text.set("%i" % self.width)
self.w.width_slider.setMinValue(self.height + 1)
self.w.height_slider.set(self.height)
self.w.height_text.set("%i" % self.height)
self.w.height_slider.set(self.height)
self.w.superness_text.set("%0.2f" % self.superness)
self.w.superness_slider.set(self.superness)
if self.font is None:
self.w.guide_select.setItems([])
else:
if self.guide_layer in self.font_layers:
self.w.guide_select.setItems(self.font_layers)
self.w.guide_select.set(
self.font_layers.index(self.guide_layer)
)
else:
self._update_layers()
self.check_secondary_ui()
def check_secondary_ui(self):
if self.model == "Superellipse":
self.w.superness_slider.enable(True)
else:
self.w.superness_slider.enable(False)
if self.model == "Rectangle":
self.w.draw_faces.enable(True)
else:
self.w.draw_faces.enable(False)
def _model_select_callback(self, sender):
self.model = self.w.model_select.getItems()[sender.get()]
self.nib_pen = nib_models[self.model]
self.check_secondary_ui()
UpdateCurrentGlyphView()
def _nib_angle_callback(self, sender):
angle = int(round(degrees(sender.get())))
self.angle = radians(angle)
self.w.angle_text.set("%i" % angle)
UpdateCurrentGlyphView()
def _nib_width_callback(self, sender):
self.width = int(round(sender.get()))
self.w.width_text.set("%i" % self.width)
self.w.height_slider.setMaxValue(self.width)
UpdateCurrentGlyphView()
def _nib_height_callback(self, sender):
self.height = int(round(sender.get()))
self.w.height_text.set("%i" % self.height)
self.w.width_slider.setMinValue(self.height)
UpdateCurrentGlyphView()
def _nib_superness_callback(self, sender):
self.superness = sender.get()
self.w.superness_text.set("%0.2f" % self.superness)
UpdateCurrentGlyphView()
def _glyph_local_callback(self, sender):
value = sender.get()
#print("Local:", value)
self.save_to_lib(self.glyph, def_local_key, False)
if not value:
self.load_settings()
def _draw_space_callback(self, sender):
pass
def _draw_preview_callback(self, sender):
self._draw_in_preview_mode = sender.get()
UpdateCurrentGlyphView()
def _draw_faces_callback(self, sender):
self._draw_nib_faces = sender.get()
UpdateCurrentGlyphView()
def get_guide_representation(self, glyph, font, angle):
# TODO: Rotate, add extreme points, rotate back
return glyph.copy()
def _trace_callback(self, sender):
if self.guide_layer is None:
self._update_layers()
return
guide_glyph = self.glyph.getLayer(self.guide_layer)
glyph = get_guide_representation(
font=guide_glyph.font,
angle=self.angle
)
p = self.nib_pen(
self.font,
self.angle,
self.width,
self.height,
self._draw_nib_faces,
nib_superness=self.superness,
trace=True
)
glyph.draw(p)
p.trace_path(self.glyph)
def _setup_draw(self, preview=False):
if preview:
fill(0)
stroke(0)
else:
fill(0.6, 0.7, 0.9, 0.5)
stroke(0.6, 0.7, 0.9)
# strokeWidth(self.height)
# strokeWidth(1)
strokeWidth(0)
stroke(None)
lineJoin(self.line_join)
def _draw_preview_glyph(self, preview=False):
if self.guide_layer is None:
self._update_layers()
return
glyph = get_guide_representation(
font=guide_glyph.font,
angle=self.angle
)
save()
self._setup_draw(preview=preview)
# TODO: Reuse pen object.
# Needs modifications to the pens before possible.
p = self.nib_pen(
self.font,
self.angle,
self.width,
self.height,
self._draw_nib_faces,
nib_superness=self.superness
)
glyph.draw(p)
restore()
def save_to_lib(self, font_or_glyph, libkey, value):
pass
def load_from_lib(self, font_or_glyph, libkey, attr=None):
pass
def save_settings(self):
has_local_settings = self.w.glyph_local.get()
if has_local_settings:
# print("Saving settings to", self.glyph)
for setting, value in [
(def_angle_key, degrees(self.angle)),
(def_width_key, self.width),
(def_height_key, self.height),
(def_guide_key, self.guide_layer),
(def_local_key, has_local_settings),
(def_super_key, self.superness),
(def_model_key, self.model),
]:
self.save_to_lib(self.glyph, setting, value)
else:
for setting in [
def_angle_key,
def_width_key,
def_height_key,
def_guide_key,
def_local_key
]:
self.save_to_lib(self.glyph, setting, None)
# print("Saving settings to", self.font)
for setting, value in [
(def_angle_key, degrees(self.angle)),
(def_width_key, self.width),
(def_height_key, self.height),
(def_guide_key, self.guide_layer),
(def_super_key, self.superness),
(def_model_key, self.model),
]:
self.save_to_lib(self.font, setting, value)
def load_settings(self):
has_local_settings = self.load_from_lib(self.glyph, def_local_key)
if has_local_settings:
# print("Loading settings from glyph", self.glyph)
self.w.glyph_local.set(True)
self.angle = radians(self.load_from_lib(self.glyph, def_angle_key))
for setting, attr in [
(def_width_key, "width"),
(def_height_key, "height"),
(def_guide_key, "guide_layer"),
(def_super_key, "superness"),
(def_model_key, "model"),
]:
self.load_from_lib(self.glyph, setting, attr)
else:
# print("Loading settings from font", self.font)
self.w.glyph_local.set(False)
angle = self.load_from_lib(self.font, def_angle_key)
if angle is not None:
self.angle = radians(angle)
for setting, attr in [
(def_width_key, "width"),
(def_height_key, "height"),
(def_guide_key, "guide_layer"),
(def_super_key, "superness"),
(def_model_key, "model"),
]:
self.load_from_lib(self.font, setting, attr)
self._update_ui()
# This is the Glyphs-specific code, it should stay here.
class JKNibGlyphs(JKNib):
user_data_attr = "userData"
def __init__(self, layer, font):
super(JKNibGlyphs, self).__init__(layer, font)
def envSpecificInit(self):
pass
def envSpecificQuit(self):
pass
def getLayerList(self):
return [layer.name for layer in self.glyph.parent.layers]
def save_to_lib(self, font_or_glyph, libkey, value):
if value is None:
if font_or_glyph.userData and libkey in font_or_glyph.userData:
del font_or_glyph.userData[libkey]
else:
if font_or_glyph.userData and libkey in font_or_glyph.userData:
if font_or_glyph.userData[libkey] != value:
font_or_glyph.userData[libkey] = value
else:
font_or_glyph.userData[libkey] = value
def load_from_lib(self, font_or_glyph, libkey, attr=None):
if font_or_glyph is None:
return False
value = font_or_glyph.userData.get(libkey, None)
if attr is not None:
if value is not None:
setattr(self, attr, value)
return value
if __name__ == "__main__":
JKNibGlyphs(Layer, Font)
| 31.487288 | 79 | 0.56049 |
794165a498c6030defd2735f36215372dc324cb6 | 1,340 | py | Python | 2020/day9.py | tangarts/advent-of-code | 5879fbec1a5377d1288666a357b029f6345d4a5d | [
"MIT"
] | null | null | null | 2020/day9.py | tangarts/advent-of-code | 5879fbec1a5377d1288666a357b029f6345d4a5d | [
"MIT"
] | null | null | null | 2020/day9.py | tangarts/advent-of-code | 5879fbec1a5377d1288666a357b029f6345d4a5d | [
"MIT"
] | null | null | null | #%%
from advent_of_code.core import parse_input
raw = """35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
def two_sum(array: list, t: int) -> int:
cache = {}
for i in range(len(array)):
if array[i] not in cache:
cache[t - array[i]] = array[i]
else:
return array[i] + cache[array[i]]
return -1
test = parse_input(raw, parser=int)
# %%
def part1(numbers, window):
for i in range(window + 1, len(numbers)):
if two_sum(numbers[i - window : i], numbers[i]) == -1:
return numbers[i]
day9 = parse_input("data/input9.txt", sep="\n", parser=int, test=False)
assert part1(test, 5) == 127
assert part1(day9, 25) == 1930745883
# %%
# part 2
# find a contiguous set of at least two numbers in
# your list which sum to the invalid number from step 1.
def part2(numbers, target):
left, right = 0, 0
total = 0
while left < len(numbers) - 1 or right < len(numbers) - 1:
while total < target:
total += numbers[right]
right += 1
if total > target:
total -= numbers[left]
left += 1
if total == target:
return min(numbers[left:right]) + max(numbers[left:right])
# %%
assert part2(test, 127) == 62
assert part2(day9, 1930745883) == 268878261
# %%
| 16.962025 | 71 | 0.580597 |
794165fe60812838e9973f27330464f674f0cab1 | 8,973 | py | Python | pytorch_lightning/callbacks/gpu_stats_monitor.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 4 | 2021-07-27T14:39:02.000Z | 2022-03-07T10:57:13.000Z | pytorch_lightning/callbacks/gpu_stats_monitor.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 2 | 2021-07-03T07:07:32.000Z | 2022-03-10T16:07:20.000Z | pytorch_lightning/callbacks/gpu_stats_monitor.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 1 | 2022-01-08T14:06:27.000Z | 2022-01-08T14:06:27.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GPU Stats Monitor
=================
Monitor and logs GPU stats during training.
"""
import os
import shutil
import subprocess
import time
from typing import Any, Dict, List, Tuple
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import DeviceType, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict
class GPUStatsMonitor(Callback):
r"""
Automatically monitors and logs GPU stats during training stage. ``GPUStatsMonitor``
is a callback and in order to use it you need to assign a logger in the ``Trainer``.
Args:
memory_utilization: Set to ``True`` to monitor used, free and percentage of memory
utilization at the start and end of each step. Default: ``True``.
gpu_utilization: Set to ``True`` to monitor percentage of GPU utilization
at the start and end of each step. Default: ``True``.
intra_step_time: Set to ``True`` to monitor the time of each step. Default: ``False``.
inter_step_time: Set to ``True`` to monitor the time between the end of one step
and the start of the next step. Default: ``False``.
fan_speed: Set to ``True`` to monitor percentage of fan speed. Default: ``False``.
temperature: Set to ``True`` to monitor the memory and gpu temperature in degree Celsius.
Default: ``False``.
Raises:
MisconfigurationException:
If NVIDIA driver is not installed, not running on GPUs, or ``Trainer`` has no logger.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import GPUStatsMonitor
>>> gpu_stats = GPUStatsMonitor() # doctest: +SKIP
>>> trainer = Trainer(callbacks=[gpu_stats]) # doctest: +SKIP
GPU stats are mainly based on `nvidia-smi --query-gpu` command. The description of the queries is as follows:
- **fan.speed** – The fan speed value is the percent of maximum speed that the device's fan is currently
intended to run at. It ranges from 0 to 100 %. Note: The reported speed is the intended fan speed.
If the fan is physically blocked and unable to spin, this output will not match the actual fan speed.
Many parts do not report fan speeds because they rely on cooling via fans in the surrounding enclosure.
- **memory.used** – Total memory allocated by active contexts.
- **memory.free** – Total free memory.
- **utilization.gpu** – Percent of time over the past sample period during which one or more kernels was
executing on the GPU. The sample period may be between 1 second and 1/6 second depending on the product.
- **utilization.memory** – Percent of time over the past sample period during which global (device) memory was
being read or written. The sample period may be between 1 second and 1/6 second depending on the product.
- **temperature.gpu** – Core GPU temperature, in degrees C.
- **temperature.memory** – HBM memory temperature, in degrees C.
"""
def __init__(
self,
memory_utilization: bool = True,
gpu_utilization: bool = True,
intra_step_time: bool = False,
inter_step_time: bool = False,
fan_speed: bool = False,
temperature: bool = False
):
super().__init__()
if shutil.which('nvidia-smi') is None:
raise MisconfigurationException(
'Cannot use GPUStatsMonitor callback because NVIDIA driver is not installed.'
)
self._log_stats = AttributeDict({
'memory_utilization': memory_utilization,
'gpu_utilization': gpu_utilization,
'intra_step_time': intra_step_time,
'inter_step_time': inter_step_time,
'fan_speed': fan_speed,
'temperature': temperature
})
def on_train_start(self, trainer, pl_module) -> None:
if not trainer.logger:
raise MisconfigurationException('Cannot use GPUStatsMonitor callback with Trainer that has no logger.')
if trainer._device_type != DeviceType.GPU:
raise MisconfigurationException(
'You are using GPUStatsMonitor but are not running on GPU'
f' since gpus attribute in Trainer is set to {trainer.gpus}.'
)
self._gpu_ids = ','.join(map(str, trainer.data_parallel_device_ids))
def on_train_epoch_start(self, trainer, pl_module) -> None:
self._snap_intra_step_time = None
self._snap_inter_step_time = None
@rank_zero_only
def on_train_batch_start(self, trainer, pl_module, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
if self._log_stats.intra_step_time:
self._snap_intra_step_time = time.time()
if not self._should_log(trainer):
return
gpu_stat_keys = self._get_gpu_stat_keys()
gpu_stats = self._get_gpu_stats([k for k, _ in gpu_stat_keys])
logs = self._parse_gpu_stats(self._gpu_ids, gpu_stats, gpu_stat_keys)
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs['batch_time/inter_step (ms)'] = (time.time() - self._snap_inter_step_time) * 1000
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self, trainer, pl_module, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
gpu_stat_keys = self._get_gpu_stat_keys() + self._get_gpu_device_stat_keys()
gpu_stats = self._get_gpu_stats([k for k, _ in gpu_stat_keys])
logs = self._parse_gpu_stats(self._gpu_ids, gpu_stats, gpu_stat_keys)
if self._log_stats.intra_step_time and self._snap_intra_step_time:
logs['batch_time/intra_step (ms)'] = (time.time() - self._snap_intra_step_time) * 1000
trainer.logger.log_metrics(logs, step=trainer.global_step)
def _get_gpu_stats(self, queries: List[str]) -> List[List[float]]:
"""Run nvidia-smi to get the gpu stats"""
gpu_query = ','.join(queries)
format = 'csv,nounits,noheader'
result = subprocess.run(
[shutil.which('nvidia-smi'), f'--query-gpu={gpu_query}', f'--format={format}', f'--id={self._gpu_ids}'],
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, # for backward compatibility with python version 3.6
check=True
)
def _to_float(x: str) -> float:
try:
return float(x)
except ValueError:
return 0.
stats = result.stdout.strip().split(os.linesep)
stats = [[_to_float(x) for x in s.split(', ')] for s in stats]
return stats
@staticmethod
def _parse_gpu_stats(gpu_ids: str, stats: List[List[float]], keys: List[Tuple[str, str]]) -> Dict[str, float]:
"""Parse the gpu stats into a loggable dict"""
logs = {}
for i, gpu_id in enumerate(gpu_ids.split(',')):
for j, (x, unit) in enumerate(keys):
logs[f'gpu_id: {gpu_id}/{x} ({unit})'] = stats[i][j]
return logs
def _get_gpu_stat_keys(self) -> List[Tuple[str, str]]:
"""Get the GPU stats keys"""
stat_keys = []
if self._log_stats.gpu_utilization:
stat_keys.append(('utilization.gpu', '%'))
if self._log_stats.memory_utilization:
stat_keys.extend([('memory.used', 'MB'), ('memory.free', 'MB'), ('utilization.memory', '%')])
return stat_keys
def _get_gpu_device_stat_keys(self) -> List[Tuple[str, str]]:
"""Get the device stats keys"""
stat_keys = []
if self._log_stats.fan_speed:
stat_keys.append(('fan.speed', '%'))
if self._log_stats.temperature:
stat_keys.extend([('temperature.gpu', '°C'), ('temperature.memory', '°C')])
return stat_keys
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| 41.734884 | 116 | 0.657751 |
794166af9acc4f3af2f9facaed68d01ec46e1839 | 67,033 | py | Python | packages/vaex-core/vaex/expression.py | maartenbreddels/vaex | 633970528cb5091ef376dbca2e4721cd42525419 | [
"MIT"
] | 337 | 2016-02-11T07:36:35.000Z | 2018-12-10T07:17:35.000Z | packages/vaex-core/vaex/expression.py | maartenbreddels/vaex | 633970528cb5091ef376dbca2e4721cd42525419 | [
"MIT"
] | 127 | 2016-07-06T15:43:14.000Z | 2018-12-11T18:46:27.000Z | packages/vaex-core/vaex/expression.py | maartenbreddels/vaex | 633970528cb5091ef376dbca2e4721cd42525419 | [
"MIT"
] | 29 | 2016-10-05T14:15:28.000Z | 2018-11-29T10:17:00.000Z | import ast
import copy
import os
import base64
import datetime
from pydoc import doc
import time
import cloudpickle as pickle
import functools
import operator
import six
import collections
import weakref
from future.utils import with_metaclass
import numpy as np
import pandas as pd
import tabulate
import pyarrow as pa
import vaex.hash
import vaex.serialize
from vaex.utils import _ensure_strings_from_expressions, _ensure_string_from_expression
from vaex.column import ColumnString, _to_string_sequence
from .hash import counter_type_from_dtype
from vaex.datatype import DataType
from vaex.docstrings import docsubst
from . import expresso
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
# TODO: repeated from dataframe.py
default_shape = 128
PRINT_MAX_COUNT = 10
expression_namespace = {}
expression_namespace['nan'] = np.nan
expression_namespace = {}
expression_namespace['nan'] = np.nan
_binary_ops = [
dict(code="+", name='add', op=operator.add),
dict(code="in", name='contains', op=operator.contains),
dict(code="/", name='truediv', op=operator.truediv),
dict(code="//", name='floordiv', op=operator.floordiv),
dict(code="&", name='and', op=operator.and_),
dict(code="^", name='xor', op=operator.xor),
dict(code="|", name='or', op=operator.or_),
dict(code="**", name='pow', op=operator.pow),
dict(code="is", name='is', op=operator.is_),
dict(code="is not", name='is_not', op=operator.is_not),
dict(code="<<", name='lshift', op=operator.lshift),
dict(code="%", name='mod', op=operator.mod),
dict(code="*", name='mul', op=operator.mul),
dict(code=">>", name='rshift', op=operator.rshift),
dict(code="-", name='sub', op=operator.sub),
dict(code="<", name='lt', op=operator.lt),
dict(code="<=", name='le', op=operator.le),
dict(code="==", name='eq', op=operator.eq),
dict(code="!=", name='ne', op=operator.ne),
dict(code=">=", name='ge', op=operator.ge),
dict(code=">", name='gt', op=operator.gt),
]
if hasattr(operator, 'div'):
_binary_ops.append(dict(code="/", name='div', op=operator.div))
if hasattr(operator, 'matmul'):
_binary_ops.append(dict(code="@", name='matmul', op=operator.matmul))
reversable = 'add sub mul matmul truediv floordiv mod divmod pow lshift rshift and xor or'.split()
_unary_ops = [
dict(code="~", name='invert', op=operator.invert),
dict(code="-", name='neg', op=operator.neg),
dict(code="+", name='pos', op=operator.pos),
]
class Meta(type):
def __new__(upperattr_metaclass, future_class_name,
future_class_parents, attrs):
# attrs = {}
for op in _binary_ops:
def wrap(op=op):
def f(a, b):
self = a
# print(op, a, b)
if isinstance(b, str) and self.dtype.is_datetime:
b = np.datetime64(b)
if self.df.is_category(self.expression) and self.df._future_behaviour and not isinstance(b, Expression):
labels = self.df.category_labels(self.expression)
if b not in labels:
raise ValueError(f'Value {b} not present in {labels}')
b = labels.index(b)
a = self.index_values()
try:
stringy = isinstance(b, str) or b.is_string()
except:
# this can happen when expression is a literal, like '1' (used in propagate_unc)
# which causes the dtype to fail
stringy = False
if stringy:
if isinstance(b, str):
b = repr(b)
if op['code'] == '==':
expression = 'str_equals({0}, {1})'.format(a.expression, b)
elif op['code'] == '!=':
expression = 'str_notequals({0}, {1})'.format(a.expression, b)
elif op['code'] == '+':
expression = 'str_cat({0}, {1})'.format(a.expression, b)
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
elif isinstance(b, (np.timedelta64)):
unit, step = np.datetime_data(b.dtype)
assert step == 1
b = b.astype(np.uint64).item()
b = f'scalar_timedelta({b}, {unit!r})'
elif isinstance(b, (np.datetime64)):
b = f'scalar_datetime("{b}")'
elif isinstance(b, np.ndarray) and b.ndim == 0 and vaex.dtype_of(b).is_datetime:
b = f'scalar_datetime("{b}")'
elif isinstance(b, np.ndarray) and b.ndim == 0 and vaex.dtype_of(b).is_timedelta:
unit, step = np.datetime_data(b.dtype)
assert step == 1
b = b.astype(np.uint64).item()
b = f'scalar_timedelta({b}, {unit!r})'
expression = '({0} {1} {2})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
if op['name'] in reversable:
def f(a, b):
self = a
if isinstance(b, str):
if op['code'] == '+':
expression = 'str_cat({1}, {0})'.format(a.expression, repr(b))
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
expression = '({2} {1} {0})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__r%s__' % op['name']] = f
wrap(op)
for op in _unary_ops:
def wrap(op=op):
def f(a):
self = a
expression = '{0}({1})'.format(op['code'], a.expression)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
wrap(op)
return type(future_class_name, future_class_parents, attrs)
class DateTime(object):
"""DateTime operations
Usually accessed using e.g. `df.birthday.dt.dayofweek`
"""
def __init__(self, expression):
self.expression = expression
class TimeDelta(object):
"""TimeDelta operations
Usually accessed using e.g. `df.delay.td.days`
"""
def __init__(self, expression):
self.expression = expression
class StringOperations(object):
"""String operations.
Usually accessed using e.g. `df.name.str.lower()`
"""
def __init__(self, expression):
self.expression = expression
class StringOperationsPandas(object):
"""String operations using Pandas Series (much slower)"""
def __init__(self, expression):
self.expression = expression
class StructOperations(collections.abc.Mapping):
"""Struct Array operations.
Usually accessed using e.g. `df.name.struct.get('field1')`
"""
def __init__(self, expression):
self.expression = expression
self._array = self.expression.values
def __iter__(self):
for name in self.keys():
yield name
def __getitem__(self, key):
"""Return struct field by either field name (string) or index position (index).
In case of ambiguous field names, a `LookupError` is raised.
"""
self._assert_struct_dtype()
return self.get(key)
def __len__(self):
"""Return the number of struct fields contained in struct array.
"""
self._assert_struct_dtype()
return len(self._array.type)
def keys(self):
"""Return all field names contained in struct array.
:returns: list of field names.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.keys()
["col1", "col2"]
"""
self._assert_struct_dtype()
return [field.name for field in self._array.type]
def values(self):
"""Return all fields as vaex expressions.
:returns: list of vaex expressions corresponding to each field in struct.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.values()
[Expression = struct_get(array, 0)
Length: 2 dtype: int64 (expression)
-----------------------------------
0 1
1 2,
Expression = struct_get(array, 1)
Length: 2 dtype: string (expression)
------------------------------------
0 a
1 b]
"""
self._assert_struct_dtype()
return [self[i] for i in range(len(self))]
def items(self):
"""Return all fields with names along with corresponding vaex expressions.
:returns: list of tuples with field names and fields as vaex expressions.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.items()
[('col1',
Expression = struct_get(array, 0)
Length: 2 dtype: int64 (expression)
-----------------------------------
0 1
1 2),
('col2',
Expression = struct_get(array, 1)
Length: 2 dtype: string (expression)
------------------------------------
0 a
1 b)]
"""
self._assert_struct_dtype()
return list(zip(self.keys(), self.values()))
@property
def dtypes(self):
"""Return all field names along with corresponding types.
:returns: a pandas series with keys as index and types as values.
Example:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1,2], ["a", "b"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array)
>>> df
# array
0 {'col1': 1, 'col2': 'a'}
1 {'col1': 2, 'col2': 'b'}
>>> df.array.struct.dtypes
col1 int64
col2 string
dtype: object
"""
self._assert_struct_dtype()
dtypes = (field.type for field in self._array.type)
vaex_dtypes = [DataType(x) for x in dtypes]
return pd.Series(vaex_dtypes, index=self.keys())
def _assert_struct_dtype(self):
"""Ensure that struct operations are only called on valid struct dtype.
"""
from vaex.struct import assert_struct_dtype
assert_struct_dtype(self._array)
class Expression(with_metaclass(Meta)):
"""Expression class"""
def __init__(self, ds, expression, ast=None, _selection=False):
import vaex.dataframe
self.ds : vaex.dataframe.DataFrame = ds
assert not isinstance(ds, Expression)
if isinstance(expression, Expression):
expression = expression.expression
if expression is None and ast is None:
raise ValueError('Not both expression and the ast can be None')
self._ast = ast
self._expression = expression
self.df._expressions.append(weakref.ref(self))
self._ast_names = None
self._selection = _selection # selection have an extra scope
@property
def _label(self):
'''If a column is an invalid identified, the expression is df['long name']
This will return 'long name' in that case, otherwise simply the expression
'''
ast = self.ast
if isinstance(ast, expresso._ast.Subscript):
value = ast.slice.value
if isinstance(value, expresso.ast_Str):
return value.s
if isinstance(value, str): # py39+
return value
return self.expression
def fingerprint(self):
fp = vaex.cache.fingerprint(self.expression, self.df.fingerprint(dependencies=self.dependencies()))
return f'expression-{fp}'
def copy(self, df=None):
"""Efficiently copies an expression.
Expression objects have both a string and AST representation. Creating
the AST representation involves parsing the expression, which is expensive.
Using copy will deepcopy the AST when the expression was already parsed.
:param df: DataFrame for which the expression will be evaluated (self.df if None)
"""
# expression either has _expression or _ast not None
if df is None:
df = self.df
if self._expression is not None:
expression = Expression(df, self._expression)
if self._ast is not None:
expression._ast = copy.deepcopy(self._ast)
elif self._ast is not None:
expression = Expression(df, copy.deepcopy(self._ast))
if self._ast is not None:
expression._ast = self._ast
return expression
@property
def ast(self):
"""Returns the abstract syntax tree (AST) of the expression"""
if self._ast is None:
self._ast = expresso.parse_expression(self.expression)
return self._ast
@property
def ast_names(self):
if self._ast_names is None:
self._ast_names = expresso.names(self.ast)
return self._ast_names
@property
def _ast_slices(self):
return expresso.slices(self.ast)
@property
def expression(self):
if self._expression is None:
self._expression = expresso.node_to_string(self.ast)
return self._expression
@expression.setter
def expression(self, value):
# if we reassign to expression, we clear the ast cache
if value != self._expression:
self._expression = value
self._ast = None
def __bool__(self):
"""Cast expression to boolean. Only supports (<expr1> == <expr2> and <expr1> != <expr2>)
The main use case for this is to support assigning to traitlets. e.g.:
>>> bool(expr1 == expr2)
This will return True when expr1 and expr2 are exactly the same (in string representation). And similarly for:
>>> bool(expr != expr2)
All other cases will return True.
"""
# this is to make traitlets detect changes
import _ast
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.Eq):
return expresso.node_to_string(self.ast.left) == expresso.node_to_string(self.ast.comparators[0])
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.NotEq):
return expresso.node_to_string(self.ast.left) != expresso.node_to_string(self.ast.comparators[0])
return True
@property
def df(self):
# lets gradually move to using .df
return self.ds
@property
def dtype(self):
return self.df.data_type(self)
# TODO: remove this method?
def data_type(self, array_type=None, axis=0):
return self.df.data_type(self, axis=axis)
@property
def shape(self):
return self.df._shape_of(self)
@property
def ndim(self):
return 1 if self.dtype.is_list else len(self.df._shape_of(self))
def to_arrow(self, convert_to_native=False):
'''Convert to Apache Arrow array (will byteswap/copy if convert_to_native=True).'''
values = self.values
return vaex.array_types.to_arrow(values, convert_to_native=convert_to_native)
def __arrow_array__(self, type=None):
values = self.to_arrow()
return pa.array(values, type=type)
def to_numpy(self, strict=True):
"""Return a numpy representation of the data"""
values = self.values
return vaex.array_types.to_numpy(values, strict=strict)
def to_dask_array(self, chunks="auto"):
import dask.array as da
import uuid
dtype = self.dtype
chunks = da.core.normalize_chunks(chunks, shape=self.shape, dtype=dtype.numpy)
name = 'vaex-expression-%s' % str(uuid.uuid1())
def getitem(df, item):
assert len(item) == 1
item = item[0]
start, stop, step = item.start, item.stop, item.step
assert step in [None, 1]
return self.evaluate(start, stop, parallel=False)
if hasattr(da.core, "getem"):
dsk = da.core.getem(name, chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)
dsk[name] = self
return da.Array(dsk, name, chunks, dtype=dtype.numpy)
else:
dsk = da.core.graph_from_arraylike(self, name=name, chunks=chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)
return da.Array(dsk, name, chunks, dtype=dtype.numpy)
def to_pandas_series(self):
"""Return a pandas.Series representation of the expression.
Note: Pandas is likely to make a memory copy of the data.
"""
import pandas as pd
return pd.Series(self.values)
def __getitem__(self, slicer):
"""Provides row and optional field access (struct arrays) via bracket notation.
Examples:
>>> import vaex
>>> import pyarrow as pa
>>> array = pa.StructArray.from_arrays(arrays=[[1, 2, 3], ["a", "b", "c"]], names=["col1", "col2"])
>>> df = vaex.from_arrays(array=array, integer=[5, 6, 7])
>>> df
# array integer
0 {'col1': 1, 'col2': 'a'} 5
1 {'col1': 2, 'col2': 'b'} 6
2 {'col1': 3, 'col2': 'c'} 7
>>> df.integer[1:]
Expression = integer
Length: 2 dtype: int64 (column)
-------------------------------
0 6
1 7
>>> df.array[1:]
Expression = array
Length: 2 dtype: struct<col1: int64, col2: string> (column)
-----------------------------------------------------------
0 {'col1': 2, 'col2': 'b'}
1 {'col1': 3, 'col2': 'c'}
>>> df.array[:, "col1"]
Expression = struct_get(array, 'col1')
Length: 3 dtype: int64 (expression)
-----------------------------------
0 1
1 2
2 3
>>> df.array[1:, ["col1"]]
Expression = struct_project(array, ['col1'])
Length: 2 dtype: struct<col1: int64> (expression)
-------------------------------------------------
0 {'col1': 2}
1 {'col1': 3}
>>> df.array[1:, ["col2", "col1"]]
Expression = struct_project(array, ['col2', 'col1'])
Length: 2 dtype: struct<col2: string, col1: int64> (expression)
---------------------------------------------------------------
0 {'col2': 'b', 'col1': 2}
1 {'col2': 'c', 'col1': 3}
"""
if isinstance(slicer, slice):
indices = slicer
fields = None
elif isinstance(slicer, tuple) and len(slicer) == 2:
indices, fields = slicer
else:
raise NotImplementedError
if indices != slice(None):
expr = self.df[indices][self.expression]
else:
expr = self
if fields is None:
return expr
elif isinstance(fields, (int, str)):
if self.dtype.is_struct:
return expr.struct.get(fields)
elif self.ndim == 2:
if not isinstance(fields, int):
raise TypeError(f'Expected an integer, not {type(fields)}')
else:
return expr.getitem(fields)
else:
raise TypeError(f'Only getting struct fields or 2d columns supported')
elif isinstance(fields, (tuple, list)):
return expr.struct.project(fields)
else:
raise TypeError("Invalid type provided. Needs to be None, str or list/tuple.")
def __abs__(self):
"""Returns the absolute value of the expression"""
return self.abs()
@property
def dt(self):
"""Gives access to datetime operations via :py:class:`DateTime`"""
return DateTime(self)
@property
def td(self):
"""Gives access to timedelta operations via :py:class:`TimeDelta`"""
return TimeDelta(self)
@property
def str(self):
"""Gives access to string operations via :py:class:`StringOperations`"""
return StringOperations(self)
@property
def str_pandas(self):
"""Gives access to string operations via :py:class:`StringOperationsPandas` (using Pandas Series)"""
return StringOperationsPandas(self)
@property
def struct(self):
"""Gives access to struct operations via :py:class:`StructOperations`"""
return StructOperations(self)
@property
def values(self):
return self.evaluate()
def derivative(self, var, simplify=True):
var = _ensure_string_from_expression(var)
return self.__class__(self.ds, expresso.derivative(self.ast, var, simplify=simplify))
def expand(self, stop=[]):
"""Expand the expression such that no virtual columns occurs, only normal columns.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.expand().expression
'sqrt(((x ** 2) + (y ** 2)))'
"""
stop = _ensure_strings_from_expressions(stop)
def translate(id):
if id in self.ds.virtual_columns and id not in stop:
return self.ds.virtual_columns[id]
expr = expresso.translate(self.ast, translate)
return Expression(self.ds, expr)
def dependencies(self):
'''Get all dependencies of this expression, including ourselves'''
return self.variables(ourself=True)
def variables(self, ourself=False, expand_virtual=True, include_virtual=True):
"""Return a set of variables this expression depends on.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.variables()
{'x', 'y'}
"""
variables = set()
def record(varname):
# always do this for selection
if self._selection and self.df.has_selection(varname):
selection = self.df.get_selection(varname)
variables.update(selection.dependencies(self.df))
# do this recursively for virtual columns
if varname in self.ds.virtual_columns and varname not in variables:
if (include_virtual and (varname != self.expression)) or (varname == self.expression and ourself):
variables.add(varname)
if expand_virtual:
variables.update(self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual))
# we usually don't want to record ourself
elif varname != self.expression or ourself:
variables.add(varname)
expresso.translate(self.ast, record)
# df is a buildin, don't record it, if df is a column name, it will be collected as
# df['df']
variables -= {'df'}
for varname in self._ast_slices:
if varname in self.df.virtual_columns and varname not in variables:
if (include_virtual and (f"df['{varname}']" != self.expression)) or (f"df['{varname}']" == self.expression and ourself):
variables.add(varname)
if expand_virtual:
if varname in self.df.virtual_columns:
variables |= self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual)
elif f"df['{varname}']" != self.expression or ourself:
variables.add(varname)
return variables
def _graph(self):
""""Return a graph containing the dependencies of this expression
Structure is:
[<string expression>, <function name if callable>, <function object if callable>, [subgraph/dependencies, ....]]
"""
expression = self.expression
def walk(node):
if isinstance(node, six.string_types):
if node in self.ds.virtual_columns:
ex = Expression(self.ds, self.ds.virtual_columns[node])
return [node, None, None, [ex._graph()]]
else:
return node
else:
fname, node_repr, deps = node
if len(node_repr) > 30: # clip too long expressions
node_repr = node_repr[:26] + ' ....'
deps = [walk(dep) for dep in deps]
obj = self.ds.functions.get(fname)
# we don't want the wrapper, we want the underlying object
if isinstance(obj, Function):
obj = obj.f
if isinstance(obj, FunctionSerializablePickle):
obj = obj.f
return [node_repr, fname, obj, deps]
return walk(expresso._graph(expression))
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of the expression"""
from graphviz import Graph, Digraph
node = self._graph()
dot = dot or Digraph(comment=self.expression)
def walk(node):
if isinstance(node, six.string_types):
dot.node(node, node)
return node, node
else:
node_repr, fname, fobj, deps = node
node_id = node_repr
dot.node(node_id, node_repr)
for dep in deps:
dep_id, dep = walk(dep)
dot.edge(node_id, dep_id)
return node_id, node
walk(node)
return dot
def __str__(self):
return self.expression
# def __array__(self, dtype=None):
# '''For casting to a numpy array
# Example:
# >>> np.array(ds.x**2)
# '''
# return self.ds.evaluate(self)
def tolist(self, i1=None, i2=None):
'''Short for expr.evaluate().tolist()'''
values = self.evaluate(i1=i1, i2=i2)
if isinstance(values, (pa.Array, pa.ChunkedArray)):
return values.to_pylist()
return values.tolist()
if not os.environ.get('VAEX_DEBUG', ''):
def __repr__(self):
return self._repr_plain_()
def _repr_plain_(self):
from .formatting import _format_value
def format(values):
for i in range(len(values)):
value = values[i]
yield _format_value(value)
colalign = ("right",) * 2
try:
N = len(self.ds)
if N <= PRINT_MAX_COUNT:
values = format(self.evaluate(0, N))
values = tabulate.tabulate([[i, k] for i, k in enumerate(values)], tablefmt='plain', colalign=colalign)
else:
values_head = format(self.evaluate(0, PRINT_MAX_COUNT//2))
values_tail = format(self.evaluate(N - PRINT_MAX_COUNT//2, N))
values_head = list(zip(range(PRINT_MAX_COUNT//2), values_head)) +\
list(zip(range(N - PRINT_MAX_COUNT//2, N), values_tail))
values = tabulate.tabulate([k for k in values_head], tablefmt='plain', colalign=colalign)
values = values.split('\n')
width = max(map(len, values))
separator = '\n' + '...'.center(width, ' ') + '\n'
values = "\n".join(values[:PRINT_MAX_COUNT//2]) + separator + "\n".join(values[PRINT_MAX_COUNT//2:]) + '\n'
except Exception as e:
values = 'Error evaluating: %r' % e
expression = self.expression
if len(expression) > 60:
expression = expression[:57] + '...'
info = 'Expression = ' + expression + '\n'
dtype = self.dtype
if self.expression in self.ds.get_column_names(hidden=True):
state = "column"
elif self.expression in self.ds.get_column_names(hidden=True):
state = "virtual column"
else:
state = "expression"
line = 'Length: {:,} dtype: {} ({})\n'.format(len(self.ds), dtype, state)
info += line
info += '-' * (len(line)-1) + '\n'
info += values
return info
def count(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
'''Shortcut for ds.count(expression, ...), see `Dataset.count`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.count(**kwargs)
def sum(self, axis=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Sum elements over given axis.
If no axis is given, it will sum over all axes.
For non list elements, this is a shortcut for ds.sum(expression, ...), see `Dataset.sum`.
>>> list_data = [1, 2, None], None, [], [1, 3, 4, 5]
>>> df = vaex.from_arrays(some_list=pa.array(list_data))
>>> df.some_list.sum().item() # will sum over all axis
16
>>> df.some_list.sum(axis=1).tolist() # sums the list elements
[3, None, 0, 13]
:param int axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
'''
expression = self
if axis is None:
dtype = self.dtype
if dtype.is_list:
axis = [0]
while dtype.is_list:
axis.append(axis[-1] + 1)
dtype = dtype.value_type
elif self.ndim > 1:
axis = list(range(self.ndim))
else:
axis = [0]
elif not isinstance(axis, list):
axis = [axis]
axis = list(set(axis)) # remove repeated elements
dtype = self.dtype
if self.ndim > 1:
array_axes = axis.copy()
if 0 in array_axes:
array_axes.remove(0)
expression = expression.array_sum(axis=array_axes)
for i in array_axes:
axis.remove(i)
del i
del array_axes
elif 1 in axis:
if self.dtype.is_list:
expression = expression.list_sum()
if axis:
axis.remove(1)
else:
raise ValueError(f'axis=1 not supported for dtype={dtype}')
if axis and axis[0] != 0:
raise ValueError(f'Only axis 0 or 1 is supported')
if expression.ndim > 1:
raise ValueError(f'Cannot sum non-scalar (ndim={expression.ndim})')
if axis is None or 0 in axis:
kwargs = dict(locals())
del kwargs['self']
del kwargs['axis']
del kwargs['dtype']
kwargs['expression'] = expression.expression
return self.ds.sum(**kwargs)
else:
return expression
def mean(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.mean(expression, ...), see `Dataset.mean`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.mean(**kwargs)
def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.std`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.std(**kwargs)
def var(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.var`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.var(**kwargs)
def skew(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for df.skew(expression, ...), see `DataFrame.skew`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.df.skew(**kwargs)
def kurtosis(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for df.kurtosis(expression, ...), see `DataFrame.kurtosis`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.df.kurtosis(**kwargs)
def minmax(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.minmax(expression, ...), see `Dataset.minmax`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.minmax(**kwargs)
def min(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.min(expression, ...), see `Dataset.min`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.min(**kwargs)
def max(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.max(expression, ...), see `Dataset.max`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.max(**kwargs)
def nop(self):
"""Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy"""
return self.ds.nop(self.expression)
@property
def transient(self):
"""If this expression is not transient (e.g. on disk) optimizations can be made"""
return self.expand().expression not in self.ds.columns
@property
def masked(self):
"""Alias to df.is_masked(expression)"""
return self.ds.is_masked(self.expression)
@docsubst
def value_counts(self, dropna=False, dropnan=False, dropmissing=False, ascending=False, progress=False, axis=None):
"""Computes counts of unique values.
WARNING:
* If the expression/column is not categorical, it will be converted on the fly
* dropna is False by default, it is True by default in pandas
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param ascending: when False (default) it will report the most frequent occuring item first
:param progress: {progress}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:returns: Pandas series containing the counts
"""
from pandas import Series
if axis is not None:
raise ValueError('only axis=None is supported')
if dropna:
dropnan = True
dropmissing = True
data_type = self.data_type()
data_type_item = self.data_type(axis=-1)
transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression)
if self.is_string() and not transient:
# string is a special case, only ColumnString are not transient
ar = self.ds.columns[self.expression]
if not isinstance(ar, ColumnString):
transient = True
counter_type = counter_type_from_dtype(data_type_item, transient)
counters = [None] * self.ds.executor.thread_pool.nthreads
def map(thread_index, i1, i2, selection_masks, blocks):
ar = blocks[0]
if len(ar) == 0:
return 0
if counters[thread_index] is None:
counters[thread_index] = counter_type(1)
if data_type.is_list and axis is None:
try:
ar = ar.values
except AttributeError: # pyarrow ChunkedArray
ar = ar.combine_chunks().values
if data_type_item.is_string:
ar = _to_string_sequence(ar)
else:
ar = vaex.array_types.to_numpy(ar)
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
counters[thread_index].update(ar, mask)
else:
counters[thread_index].update(ar)
return 0
def reduce(a, b):
return a+b
progressbar = vaex.utils.progressbars(progress, title="value counts")
self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progressbar, name='value_counts', info=True, to_numpy=False)
counters = [k for k in counters if k is not None]
counter = counters[0]
for other in counters[1:]:
counter.merge(other)
if data_type_item.is_object:
# for dtype=object we use the old interface
# since we don't care about multithreading (cannot release the GIL)
key_values = counter.extract()
keys = list(key_values.keys())
counts = list(key_values.values())
if counter.has_nan and not dropnan:
keys = [np.nan] + keys
counts = [counter.nan_count] + counts
if counter.has_null and not dropmissing:
keys = [None] + keys
counts = [counter.null_count] + counts
if dropmissing and None in keys:
# we still can have a None in the values
index = keys.index(None)
keys.pop(index)
counts.pop(index)
counts = np.array(counts)
keys = np.array(keys)
else:
keys = counter.key_array()
counts = counter.counts()
if isinstance(keys, (vaex.strings.StringList32, vaex.strings.StringList64)):
keys = vaex.strings.to_arrow(keys)
deletes = []
if counter.has_nan:
null_offset = 1
else:
null_offset = 0
if dropmissing and counter.has_null:
deletes.append(counter.null_index)
if dropnan and counter.has_nan:
deletes.append(counter.nan_index)
if vaex.array_types.is_arrow_array(keys):
indices = np.delete(np.arange(len(keys)), deletes)
keys = keys.take(indices)
else:
keys = np.delete(keys, deletes)
if not dropmissing and counter.has_null:
mask = np.zeros(len(keys), dtype=np.uint8)
mask[null_offset] = 1
keys = np.ma.array(keys, mask=mask)
counts = np.delete(counts, deletes)
order = np.argsort(counts)
if not ascending:
order = order[::-1]
counts = counts[order]
keys = keys.take(order)
keys = keys.tolist()
if None in keys:
index = keys.index(None)
keys.pop(index)
keys = ["missing"] + keys
counts = counts.tolist()
count_null = counts.pop(index)
counts = [count_null] + counts
return Series(counts, index=keys)
@docsubst
def unique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, limit=None, limit_raise=True, array_type='list', progress=None, delay=False):
"""Returns all unique values.
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param selection: {selection}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:param int limit: {limit}
:param bool limit_raise: {limit_raise}
:param bool array_type: {array_type}
:param progress: {progress}
:param bool delay: {delay}
"""
return self.ds.unique(self, dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, array_type=array_type, axis=axis, limit=limit, limit_raise=limit_raise, progress=progress, delay=delay)
@docsubst
def nunique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, limit=None, limit_raise=True, progress=None, delay=False):
"""Counts number of unique values, i.e. `len(df.x.unique()) == df.x.nunique()`.
:param dropna: {dropna}
:param dropnan: {dropnan}
:param dropmissing: {dropmissing}
:param selection: {selection}
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:param int limit: {limit}
:param bool limit_raise: {limit_raise}
:param progress: {progress}
:param bool delay: {delay}
"""
def key_function():
fp = vaex.cache.fingerprint(self.fingerprint(), dropna, dropnan, dropmissing, selection, axis, limit)
return f'nunique-{fp}'
@vaex.cache._memoize(key_function=key_function, delay=delay)
def f():
value = self.unique(dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, axis=axis, limit=limit, limit_raise=limit_raise, array_type=None, progress=progress, delay=delay)
if delay:
return value.then(len)
else:
return len(value)
return f()
def countna(self):
"""Returns the number of Not Availiable (N/A) values in the expression.
This includes missing values and np.nan values.
"""
return self.isna().sum().item() # so the output is int, not array
def countnan(self):
"""Returns the number of NaN values in the expression."""
return self.isnan().sum().item() # so the output is int, not array
def countmissing(self):
"""Returns the number of missing values in the expression."""
return self.ismissing().sum().item() # so the output is int, not array
def evaluate(self, i1=None, i2=None, out=None, selection=None, parallel=True, array_type=None):
return self.ds.evaluate(self, i1, i2, out=out, selection=selection, array_type=array_type, parallel=parallel)
# TODO: it is not so elegant we need to have a custom version of this
# it now also misses the docstring, reconsider how the the meta class auto
# adds this method
def fillna(self, value, fill_nan=True, fill_masked=True):
expression = self._upcast_for(value)
return self.ds.func.fillna(expression, value=value, fill_nan=fill_nan, fill_masked=fill_masked)
def _upcast_for(self, value):
# make sure the dtype is compatible with value
expression = self
dtype = self.dtype
if dtype == int:
required_dtype = vaex.utils.required_dtype_for_int(value, signed=dtype.is_signed)
if required_dtype.itemsize > dtype.numpy.itemsize:
expression = self.astype(str(required_dtype))
return expression
def fillmissing(self, value):
'''Returns an array where missing values are replaced by value.
See :`ismissing` for the definition of missing values.
'''
expression = self._upcast_for(value)
return self.df.func.fillmissing(expression, value=value)
def clip(self, lower=None, upper=None):
return self.ds.func.clip(self, lower, upper)
def jit_metal(self, verbose=False):
from .metal import FunctionSerializableMetal
f = FunctionSerializableMetal.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_numba(self, verbose=False):
f = FunctionSerializableNumba.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_cuda(self, verbose=False):
f = FunctionSerializableCuda.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_pythran(self, verbose=False):
import logging
logger = logging.getLogger('pythran')
log_level = logger.getEffectiveLevel()
try:
if not verbose:
logger.setLevel(logging.ERROR)
import pythran
import imp
import hashlib
# self._import_all(module)
names = []
funcs = set(expression_namespace.keys())
expression = self.expression
if expression in self.ds.virtual_columns:
expression = self.ds.virtual_columns[self.expression]
all_vars = self.ds.get_column_names(virtual=True, strings=True, hidden=True) + list(self.ds.variables.keys())
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
names = list(set(names))
types = ", ".join(str(self.ds.data_type(name)) + "[]" for name in names)
argstring = ", ".join(names)
code = '''
from numpy import *
#pythran export f({2})
def f({0}):
return {1}'''.format(argstring, expression, types)
if verbose:
print("generated code")
print(code)
m = hashlib.md5()
m.update(code.encode('utf-8'))
module_name = "pythranized_" + m.hexdigest()
# print(m.hexdigest())
module_path = pythran.compile_pythrancode(module_name, code, extra_compile_args=["-DBOOST_SIMD", "-march=native"] + [] if verbose else ["-w"])
module = imp.load_dynamic(module_name, module_path)
function_name = "f_" + m.hexdigest()
function = self.ds.add_function(function_name, module.f, unique=True)
return Expression(self.ds, "{0}({1})".format(function.name, argstring))
finally:
logger.setLevel(log_level)
def _rename(self, old, new, inplace=False):
expression = self if inplace else self.copy()
if old in expression.ast_names:
for node in expression.ast_names[old]:
node.id = new
expression._ast_names[new] = expression._ast_names.pop(old)
slices = expression._ast_slices
if old in slices:
for node in slices[old]:
if node.value.id == 'df' and isinstance(node.slice.value, ast.Str):
node.slice.value.s = new
else: # py39
node.slice.value = new
expression._expression = None # resets the cached string representation
return expression
def astype(self, data_type):
if vaex.array_types.is_string_type(data_type) or data_type == str:
return self.ds.func.astype(self, 'str')
else:
return self.ds.func.astype(self, str(data_type))
def isin(self, values, use_hashmap=True):
"""Lazily tests if each value in the expression is present in values.
:param values: List/array of values to check
:param use_hashmap: use a hashmap or not (especially faster when values contains many elements)
:return: :class:`Expression` with the lazy expression.
"""
if self.df.is_category(self) and self.df._future_behaviour:
labels = self.df.category_labels(self.expression)
indices = []
for value in values:
if value not in labels:
pass
else:
indices.append(labels.index(value))
indices = np.array(indices, dtype=self.index_values().dtype.numpy)
return self.index_values().isin(indices, use_hashmap=use_hashmap)
if self.is_string():
values = pa.array(values, type=pa.large_string())
else:
# ensure that values are the same dtype as the expression (otherwise the set downcasts at the C++ level during execution)
values = np.array(values, dtype=self.dtype.numpy)
if use_hashmap:
# easiest way to create a set is using the vaex dataframe
df_values = vaex.from_arrays(x=values)
ordered_set = df_values._set(df_values.x)
var = self.df.add_variable('var_isin_ordered_set', ordered_set, unique=True)
return self.df['isin_set(%s, %s)' % (self, var)]
else:
var = self.df.add_variable('isin_values', values, unique=True)
return self.df['isin(%s, %s)' % (self, var)]
def apply(self, f, vectorize=False, multiprocessing=True):
"""Apply a function along all values of an Expression.
Shorthand for ``df.apply(f, arguments=[expression])``, see :meth:`DataFrame.apply`
Example:
>>> df = vaex.example()
>>> df.x
Expression = x
Length: 330,000 dtype: float64 (column)
---------------------------------------
0 -0.777471
1 3.77427
2 1.37576
3 -7.06738
4 0.243441
>>> def func(x):
... return x**2
>>> df.x.apply(func)
Expression = lambda_function(x)
Length: 330,000 dtype: float64 (expression)
-------------------------------------------
0 0.604461
1 14.2451
2 1.89272
3 49.9478
4 0.0592637
:param f: A function to be applied on the Expression values
:param vectorize: Call f with arrays instead of a scalars (for better performance).
:param bool multiprocessing: Use multiple processes to avoid the GIL (Global interpreter lock).
:returns: A function that is lazily evaluated when called.
"""
return self.ds.apply(f, [self.expression], vectorize=vectorize, multiprocessing=multiprocessing)
def dropmissing(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropmissing(column_names=[self.expression])
return df._expr(self.expression)
def dropnan(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropnan(column_names=[self.expression])
return df._expr(self.expression)
def dropna(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropna(column_names=[self.expression])
return df._expr(self.expression)
def map(self, mapper, nan_value=None, missing_value=None, default_value=None, allow_missing=False, axis=None):
"""Map values of an expression or in memory column according to an input
dictionary or a custom callable function.
Example:
>>> import vaex
>>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green'])
>>> mapper = {'red': 1, 'blue': 2, 'green': 3}
>>> df['color_mapped'] = df.color.map(mapper)
>>> df
# color color_mapped
0 red 1
1 red 1
2 blue 2
3 red 1
4 green 3
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'})
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 nan unknown
>>> import vaex
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, 4])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user'}, default_value='unknown')
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 4 unknown
:param mapper: dict like object used to map the values from keys to values
:param nan_value: value to be used when a nan is present (and not in the mapper)
:param missing_value: value to use used when there is a missing value
:param default_value: value to be used when a value is not in the mapper (like dict.get(key, default))
:param allow_missing: used to signal that values in the mapper should map to a masked array with missing values,
assumed True when default_value is not None.
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:return: A vaex expression
:rtype: vaex.expression.Expression
"""
assert isinstance(mapper, collectionsAbc.Mapping), "mapper should be a dict like object"
if axis is not None:
raise ValueError('only axis=None is supported')
df = self.ds
mapper_keys = list(mapper.keys())
mapper_values = list(mapper.values())
try:
mapper_nan_key_mask = np.isnan(mapper_keys)
except TypeError:
# case where we have mixed strings/nan etc
def try_nan(x):
try:
return np.isnan(x)
except:
return False
mapper_nan_key_mask = np.array([try_nan(k) for k in mapper_keys])
mapper_has_nan = mapper_nan_key_mask.sum() > 0
if mapper_nan_key_mask.sum() > 1:
raise ValueError('Insanity, you provided multiple nan values as keys for your dict')
if mapper_has_nan:
for key, value in mapper.items():
if key != key:
nan_value = value
for key, value in mapper.items():
if key is None:
missing_value = value
if axis is not None:
raise ValueError('only axis=None is supported')
# we map the keys to a ordinal values [0, N-1] using the set
key_set = df._set(self.expression, flatten=axis is None)
found_keys = vaex.array_types.tolist(key_set.keys())
# we want all possible values to be converted
# so mapper's key should be a superset of the keys found
use_masked_array = False
if default_value is not None:
allow_missing = True
if allow_missing:
use_masked_array = True
if not set(mapper_keys).issuperset(found_keys):
missing = set(found_keys).difference(mapper_keys)
missing0 = list(missing)[0]
only_has_nan = missing0 != missing0 and len(missing) == 1
if allow_missing:
if default_value is not None:
value0 = list(mapper.values())[0]
assert np.issubdtype(type(default_value), np.array(value0).dtype), "default value has to be of similar type"
else:
if only_has_nan:
pass # we're good, the hash mapper deals with nan
else:
if missing != {None}:
raise ValueError('Missing %i values in mapper: %s' % (len(missing), missing))
# and these are the corresponding choices
# note that here we map 'planned' unknown values to the default values
# and later on in _choose, we map values not even seen in the dataframe
# to the default_value
dtype_item = self.data_type(self.expression, axis=-1)
mapper_keys = dtype_item.create_array(mapper_keys)
fingerprint = key_set.fingerprint + "-mapper"
hash_map_unique = vaex.hash.HashMapUnique.from_keys(mapper_keys, fingerprint=fingerprint, dtype=dtype_item)
indices = hash_map_unique.map(mapper_keys)
mapper_values = [mapper_values[i] for i in indices]
choices = [default_value] + [mapper_values[index] for index in indices]
choices = pa.array(choices)
key_hash_map_unique_name = df.add_variable('map_key_hash_map_unique', hash_map_unique, unique=True)
choices_name = df.add_variable('map_choices', choices, unique=True)
if allow_missing:
expr = '_map({}, {}, {}, use_missing={!r}, axis={!r})'.format(self, key_hash_map_unique_name, choices_name, use_masked_array, axis)
else:
expr = '_map({}, {}, {}, axis={!r})'.format(self, key_hash_map_unique_name, choices_name, axis)
return Expression(df, expr)
@property
def is_masked(self):
return self.ds.is_masked(self.expression)
def is_string(self):
return self.df.is_string(self.expression)
class FunctionSerializable(object):
pass
@vaex.serialize.register
class FunctionSerializablePickle(FunctionSerializable):
def __init__(self, f=None, multiprocessing=False):
self.f = f
self.multiprocessing = multiprocessing
def __eq__(self, rhs):
return self.f == rhs.f
def pickle(self, function):
return pickle.dumps(function)
def unpickle(self, data):
return pickle.loads(data)
def __getstate__(self):
return self.state_get()
def __setstate__(self, state):
self.state_set(state)
def state_get(self):
data = self.pickle(self.f)
if vaex.utils.PY2:
pickled = base64.encodestring(data)
else:
pickled = base64.encodebytes(data).decode('ascii')
return dict(pickled=pickled)
@classmethod
def state_from(cls, state, trusted=True):
obj = cls()
obj.state_set(state, trusted=trusted)
return obj
def state_set(self, state, trusted=True):
data = state['pickled']
if vaex.utils.PY2:
data = base64.decodestring(data)
else:
data = base64.decodebytes(data.encode('ascii'))
if trusted is False:
raise ValueError("Will not unpickle data when source is not trusted")
self.f = self.unpickle(data)
def __call__(self, *args, **kwargs):
'''Forward the call to the real function'''
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
return self.f(*args, **kwargs)
class FunctionSerializableJit(FunctionSerializable):
def __init__(self, expression, arguments, argument_dtypes, return_dtype, verbose=False, compile=True):
self.expression = expression
self.arguments = arguments
self.argument_dtypes = argument_dtypes
self.return_dtype = return_dtype
self.verbose = verbose
if compile:
self.f = self.compile()
else:
def placeholder(*args, **kwargs):
raise Exception('You chose not to compile this function (locally), but did invoke it')
self.f = placeholder
def state_get(self):
return dict(expression=self.expression,
arguments=self.arguments,
argument_dtypes=list(map(lambda dtype: str(dtype.numpy), self.argument_dtypes)),
return_dtype=str(self.return_dtype),
verbose=self.verbose)
@classmethod
def state_from(cls, state, trusted=True):
return cls(expression=state['expression'],
arguments=state['arguments'],
argument_dtypes=list(map(lambda s: DataType(np.dtype(s)), state['argument_dtypes'])),
return_dtype=DataType(np.dtype(state['return_dtype'])),
verbose=state['verbose'])
@classmethod
def build(cls, expression, df=None, verbose=False, compile=True):
df = df or expression.df
# if it's a virtual column, we probably want to optimize that
# TODO: fully extract the virtual columns, i.e. depending ones?
expression = str(expression)
if expression in df.virtual_columns:
expression = df.virtual_columns[expression]
# function validation, and finding variable names
all_vars = df.get_column_names(hidden=True) + list(df.variables.keys())
funcs = set(list(expression_namespace.keys()) + list(df.functions.keys()))
names = []
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
# TODO: can we do the above using the Expressio API?s
arguments = list(set(names))
argument_dtypes = [df.data_type(argument, array_type='numpy') for argument in arguments]
return_dtype = df[expression].dtype
return cls(str(expression), arguments, argument_dtypes, return_dtype, verbose, compile=compile)
def __call__(self, *args, **kwargs):
'''Forward the call to the numba function'''
return self.f(*args, **kwargs)
@vaex.serialize.register
class FunctionSerializableNumba(FunctionSerializableJit):
def compile(self):
import numba
argstring = ", ".join(self.arguments)
code = '''
from numpy import *
def f({0}):
return {1}'''.format(argstring, self.expression)
if self.verbose:
print('Generated code:\n' + code)
scope = {}
exec(code, scope)
f = scope['f']
# numba part
def get_type(name):
if name == "bool":
name = "bool_"
return getattr(numba, name)
argument_dtypes_numba = [get_type(argument_dtype.numpy.name) for argument_dtype in self.argument_dtypes]
return_dtype_numba = get_type(self.return_dtype.numpy.name)
vectorizer = numba.vectorize([return_dtype_numba(*argument_dtypes_numba)])
return vectorizer(f)
@vaex.serialize.register
class FunctionSerializableCuda(FunctionSerializableJit):
def compile(self):
import cupy
# code generation
argstring = ", ".join(self.arguments)
code = '''
from cupy import *
import cupy
@fuse()
def f({0}):
return {1}
'''.format(argstring, self.expression)#, ";".join(conversions))
if self.verbose:
print("generated code")
print(code)
scope = dict()#cupy=cupy)
exec(code, scope)
func = scope['f']
def wrapper(*args):
args = [vaex.array_types.to_numpy(k) for k in args]
args = [vaex.utils.to_native_array(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
args = [cupy.asarray(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
return cupy.asnumpy(func(*args))
return wrapper
# TODO: this is not the right abstraction, since this won't allow a
# numba version for the function
@vaex.serialize.register
class FunctionToScalar(FunctionSerializablePickle):
def __call__(self, *args, **kwargs):
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
length = len(args[0])
result = []
def fix_type(v):
# TODO: only when column is str type?
if isinstance(v, np.str_):
return str(v)
if isinstance(v, np.bytes_):
return v.decode('utf8')
else:
return v
args = [vaex.array_types.tolist(k) for k in args]
for i in range(length):
scalar_result = self.f(*[fix_type(k[i]) for k in args], **{key: value[i] for key, value in kwargs.items()})
result.append(scalar_result)
result = np.array(result)
return result
class Function(object):
def __init__(self, dataset, name, f):
self.dataset = dataset
self.name = name
if not vaex.serialize.can_serialize(f): # if not serializable, assume we can use pickle
f = FunctionSerializablePickle(f)
self.f = f
def __call__(self, *args, **kwargs):
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
class FunctionBuiltin(object):
def __init__(self, dataset, name, **kwargs):
self.dataset = dataset
self.name = name
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
kwargs = dict(kwargs, **self.kwargs)
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
| 38.725014 | 217 | 0.576805 |
794166feeabd53072d01d246c1a370e85cb50400 | 58 | py | Python | cuticulus/core/datasets/types/__init__.py | ngngardner/cuticulus | 592e799ec9ae09ee12b12565a638ff9e448fbc21 | [
"MIT"
] | null | null | null | cuticulus/core/datasets/types/__init__.py | ngngardner/cuticulus | 592e799ec9ae09ee12b12565a638ff9e448fbc21 | [
"MIT"
] | null | null | null | cuticulus/core/datasets/types/__init__.py | ngngardner/cuticulus | 592e799ec9ae09ee12b12565a638ff9e448fbc21 | [
"MIT"
] | null | null | null | """Store specific dataset types to use in experiments."""
| 29 | 57 | 0.741379 |
7941697ad2ada06b81be3c94db78a47f55b80f85 | 1,890 | py | Python | UPSNet/upsnet/operators/build_mod_deform_conv.py | raviteja-kvns/cycada_release | 74fb4170f85f017796fea98a230dc6ed624211a7 | [
"BSD-2-Clause"
] | null | null | null | UPSNet/upsnet/operators/build_mod_deform_conv.py | raviteja-kvns/cycada_release | 74fb4170f85f017796fea98a230dc6ed624211a7 | [
"BSD-2-Clause"
] | null | null | null | UPSNet/upsnet/operators/build_mod_deform_conv.py | raviteja-kvns/cycada_release | 74fb4170f85f017796fea98a230dc6ed624211a7 | [
"BSD-2-Clause"
] | null | null | null | # ---------------------------------------------------------------------------
# Unified Panoptic Segmentation Network
#
# Copyright (c) 2018-2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by Yuwen Xiong
# ---------------------------------------------------------------------------
import os
import torch
from functools import reduce
from itertools import accumulate
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
from subprocess import call
def _create_module_dir(base_path, fullname):
module, _, name = fullname.rpartition('.')
if not module:
target_dir = name
else:
target_dir = reduce(os.path.join, fullname.split('.'))
target_dir = os.path.join(base_path, target_dir)
try:
os.makedirs(target_dir)
except os.error:
pass
for dirname in accumulate(fullname.split('.'), os.path.join):
init_file = os.path.join(base_path, dirname, '__init__.py')
open(init_file, 'a').close() # Create file if it doesn't exist yet
return name, target_dir
base_path = os.path.abspath(os.path.dirname('.'))
_create_module_dir(base_path, '_ext.deform_conv')
setup(
name='mod_deform_conv',
ext_modules=[
CUDAExtension('mod_deform_conv_cuda', [
'src/mod_deform_conv_cuda.cpp',
'src/mod_deform_conv_kernel.cu',
],
include_dirs=[os.path.join(base_path, 'src')]
),
],
cmdclass={
'build_ext': BuildExtension
}
)
call('mv mod_deform_conv_cuda*.so _ext/mod_deform_conv/', shell=True)
| 32.586207 | 78 | 0.639153 |
794169ccb575a21eac75486dff0e6dc33e09ec6e | 18,209 | py | Python | DICOM_RT/DicomPatient.py | mghro/MIRDCalculation | aa2435d0f77a01c81c519a6f828f508cacf55830 | [
"MIT"
] | null | null | null | DICOM_RT/DicomPatient.py | mghro/MIRDCalculation | aa2435d0f77a01c81c519a6f828f508cacf55830 | [
"MIT"
] | null | null | null | DICOM_RT/DicomPatient.py | mghro/MIRDCalculation | aa2435d0f77a01c81c519a6f828f508cacf55830 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 16:09:19 2021
@author: alejandrobertolet
"""
from os import listdir
import numpy as np
import pydicom
from rt_utils import RTStructBuilder
import matplotlib.pylab as plt
from datetime import datetime
class DicomPatient:
def __init__(self, dicomDirectory):
self.dicomDirectory = dicomDirectory
filesInDir = listdir(dicomDirectory)
self.dcmFiles = []
self.quantitiesOfInterest = []
for fname in filesInDir:
if fname[-3:] == 'dcm':
self.dcmFiles.append(pydicom.dcmread(dicomDirectory + '/' + fname))
def FilterByModality(self, modality):
modfiles = []
for f in self.dcmFiles:
if hasattr(f, 'Modality') and f.Modality == modality:
modfiles.append(f)
self.dcmFiles = modfiles
def GetImageInfo(self):
# Assumes that all slices have same characteristics
self.pixelSpacing = self.dcmFiles[0].PixelSpacing
self.sliceThickness = self.dcmFiles[0].SliceThickness
self.axAspect = self.pixelSpacing[1] / self.pixelSpacing[0]
self.sagAspect = self.pixelSpacing[1] / self.sliceThickness
self.corAspect = self.sliceThickness / self.pixelSpacing[0]
def GetVoxelDICOMPosition(self, ix, iy, iz):
xpos = self.firstVoxelPosDICOMCoordinates[0] + ix * self.pixelSpacing[1]
ypos = self.firstVoxelPosDICOMCoordinates[1] + iy * self.pixelSpacing[0]
zpos = self.firstVoxelPosDICOMCoordinates[2] + iz * self.sliceThickness
return np.array([xpos, ypos, zpos])
def GetLowerIndexesForDicomPosition(self, position):
xini = self.firstVoxelPosDICOMCoordinates[0]
yini = self.firstVoxelPosDICOMCoordinates[1]
zini = self.firstVoxelPosDICOMCoordinates[2]
dx = self.pixelSpacing[1]
dy = self.pixelSpacing[0]
dz = self.sliceThickness
ix = np.floor((position[0]-xini)/(dx+1e-6))
iy = np.floor((position[1]-yini)/(dy+1e-6))
iz = np.floor((position[2]-zini)/(dz+1e-6))
return np.array([ix, iy, iz])
def Rescale(self):
self.intercept = self.dcmFiles[0].RescaleIntercept
self.slope = self.dcmFiles[0].RescaleSlope
self.img3D = self.img3D * self.slope + self.intercept
def plotAxialSlice(self, sliceNumber, colormap='gray'):
minx = self.firstVoxelPosDICOMCoordinates[0]
miny = self.firstVoxelPosDICOMCoordinates[1]
maxx = minx + (self.img3D.shape[0]-1)*self.pixelSpacing[0]
maxy = miny + (self.img3D.shape[1]-1)*self.pixelSpacing[1]
p = plt.subplot(1,1,1)
p.imshow(self.img3D[:,:,sliceNumber], extent=[minx,maxx,miny,maxy], cmap=colormap)
p.set_aspect(self.axAspect)
def WriteRTDose(self, doseGrid = None, name = None, unit = None):
if doseGrid == None:
try:
for q in self.quantitiesOfInterest:
if q.quantity == 'Dose':
doseGrid = q.array
name = 'RTDose_' + datetime.now().strftime("%m%d%y_%H%M%S") + '.dcm'
unit = q.unit
except:
print("No dose grid was found.")
return
if isinstance(doseGrid, str):
try:
for q in self.quantitiesOfInterest:
if q.quantity == doseGrid:
if name == None:
name = 'RTDose_' + doseGrid + '_' + datetime.now().strftime("%m%d%y_%H%M%S") + '.dcm'
doseGrid = q.array
unit = q.unit
except:
print("No " + doseGrid + " grid was found.")
return
try:
base = self.slices[0].copy()
except:
base = self.dcmFiles[0].copy()
rtdoseSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.2'
rtdosemodality = 'RTDOSE'
base.SOPClassUID = rtdoseSOPClassUID
base.Modality = rtdosemodality
specificRootUID = '1.2.826.0.1.3680043.9.5872.'
base.SOPInstanceUID = pydicom.uid.generate_uid(specificRootUID)
base.SeriesInstanceUID = pydicom.uid.generate_uid(specificRootUID)
base.Manufacturer = 'MIRDCalculator'
base.ManufacturerModelName = 'RT_DICOM v1.2 by [email protected]'
base.SeriesDescription = 'Dose-RT_DICOM v1.2'
# Date and time
now = datetime.now()
base.StudyDate = now.strftime("%Y%M%d")
base.SeriesDate = now.strftime("%Y%M%d")
base.AcquisitionDate = now.strftime("%Y%M%d")
base.ContentDate = now.strftime("%Y%M%d")
base.StudyTime = now.strftime("%H%M%S")
base.SeriesTime = now.strftime("%H%M%S")
base.AcquisitionTime = now.strftime("%H%M%S")
base.ContentTime = now.strftime("%H%M%S")
# Reshape dose grid
doseGrid = self.reshapeZAxis(doseGrid)
base.PixelRepresentation = 0
base.LargestImagePixelValue = int(np.ceil(np.max(doseGrid)))
base['LargestImagePixelValue'].VR = 'US'
base.SmallestImagePixelValue = int(np.min(doseGrid))
base['SmallestImagePixelValue'].VR = 'US'
base.BitsAllocated = 16
base.BitsStored = 16
base.HighBit = 15
[newGrid, slope] = self.convertInt16(doseGrid)
del base.RescaleSlope
del base.RescaleIntercept
base.DoseGridScaling = slope
base.DoseSummationType = 'PLAN'
base.DoseUnits = unit
base.ImagePositionPatient = self.firstVoxelPosDICOMCoordinates
base.NumberOfFrames = newGrid.shape[0]
base.FrameIncrementPointer = (0x3004, 0x000c)
frame = []
for i in range(0, newGrid.shape[0]):
frame.append(i * self.sliceThickness)
base.GridFrameOffsetVector = frame
base.PixelData = newGrid.tobytes()
base.save_as(name)
def reshapeZAxis(self, grid):
shape = [grid.shape[2]]
shape.append(grid.shape[0])
shape.append(grid.shape[1])
newgrid = np.zeros(shape)
for i in range(0, shape[0]):
img2D = grid[:,:,i]
newgrid[i,:,:] = img2D
return newgrid
def convertInt16(self, grid):
# Determine scaling
maxAbsScoredValue = np.max(grid)
minScoredValue = np.min(grid)
useSigned = minScoredValue < 0
if useSigned:
outputScaleFactor = (maxAbsScoredValue - minScoredValue) / 32767
newGrid = np.zeros(grid.shape, dtype='int16')
else:
outputScaleFactor = (maxAbsScoredValue - minScoredValue) / 65535
newGrid = np.zeros(grid.shape, dtype='uint16')
for i in range(0, grid.shape[0]):
for j in range(0, grid.shape[1]):
for k in range(0, grid.shape[2]):
newGrid[i,j,k] = int(grid[i,j,k] / outputScaleFactor)
return [newGrid, outputScaleFactor]
def convertFloat64(self, grid, slope):
newGrid = np.zeros(grid.shape, dtype='float64')
newGrid = grid * slope
return newGrid
def LoadStructures(self, RTStructPath, ROIsList=None):
'''
Loads structures from DICOM RTStruct file as 3D arrays. Arrays are stored in a dictionary.
Function loads all structures if ROIsList is not specified.
Args:
RTStructPath --> path to input RTStruct file (string)
ROIsList --> list containing structure names (list of strings)
'''
rtstruct = RTStructBuilder.create_from(self.dicomDirectory, RTStructPath)
if ROIsList is None:
ROINames = rtstruct.get_roi_names()
else:
ROINames = ROIsList
structures3DList = []
self.ROINames = []
for s in ROINames:
try:
structures3DList.append(rtstruct.get_roi_mask_by_name(s))
self.ROINames.append(s)
except:
print("Structure " + s + " could not be read.")
self.structures3D = dict(zip(self.ROINames, structures3DList))
print('Structures loaded.')
def LoadRTDose(self, RTDosePath, quantity = 'Dose', unit = None, doseScale=1):
'''
Loads dose from DICOM RTDose file as 3D array.
Args:
RTDosePath --> path to input RTDose file (string)
doseScale --> scale to apply to dose distribution (int / float)
'''
ds = pydicom.read_file(RTDosePath)
dose_arr = ds.pixel_array
dose_arr = np.swapaxes(dose_arr, 0,2)
dose_arr = np.swapaxes(dose_arr, 0,1)
slope = ds.DoseGridScaling
darr = self.convertFloat64(dose_arr, slope)
qoi = QoIDistribution()
dx = ds.PixelSpacing[1]
dy = ds.PixelSpacing[0]
dz = np.abs(ds.GridFrameOffsetVector[1] - ds.GridFrameOffsetVector[0])
initPos = np.array(ds.ImagePositionPatient).copy()
initPos[0] = ds.ImagePositionPatient[1]
initPos[1] = ds.ImagePositionPatient[0]
if darr.shape == self.img3D.shape:
qoi.array = darr
else:
qoi.array = self.DoseInterpolationToCTGrid(darr, dx, dy, dz, initPos)
qoi.quantity = quantity
if unit is not None:
qoi.unit = unit
else:
try:
qoi.unit = ds.DoseUnits
except:
qoi.unit = 'arb. unit'
self.quantitiesOfInterest.append(qoi)
print(quantity + ' array loaded.')
def DoseInterpolationToCTGrid(self, dosegrid, dx, dy, dz, iniPos, threshold = None):
shape = self.img3D.shape
doseCTgrid = np.zeros(shape)
if threshold == None:
threshold = 0.01 * np.max(dosegrid)
minx = int((iniPos[0] - self.firstVoxelPosDICOMCoordinates[0])/(self.pixelSpacing[1]+1e-6))-1
miny = int((iniPos[1] - self.firstVoxelPosDICOMCoordinates[1])/(self.pixelSpacing[0]+1e-6))-1
minz = int((iniPos[2] - self.firstVoxelPosDICOMCoordinates[2])/(self.sliceThickness+1e-6))-1
maxposxCT = self.firstVoxelPosDICOMCoordinates[0] + self.pixelSpacing[1] * shape[0]
maxposyCT = self.firstVoxelPosDICOMCoordinates[1] + self.pixelSpacing[0] * shape[1]
maxposzCT = self.firstVoxelPosDICOMCoordinates[2] + self.sliceThickness * shape[2]
maxposxgrid = iniPos[0] + dx * dosegrid.shape[0]
maxposygrid = iniPos[1] + dy * dosegrid.shape[1]
maxposzgrid = iniPos[2] + dz * dosegrid.shape[2]
maxx = shape[0] - int((maxposxCT - maxposxgrid)/(self.pixelSpacing[1]+1e-6))
maxy = shape[1] - int((maxposyCT - maxposygrid)/(self.pixelSpacing[0]+1e-6))
maxz = shape[2] - int((maxposzCT - maxposzgrid)/(self.sliceThickness+1e-6))
for icx in range(minx, maxx):
porc = (icx-minx)/(maxx-minx)*100
print("Interpolating grid... (" + str(round(porc,1))+"%)")
for icy in range(miny, maxy):
for icz in range(minz, maxz):
position = self.GetVoxelDICOMPosition(icx, icy, icz)
iax = int((position[0]-iniPos[0])/(dx+1e-6))
iay = int((position[1]-iniPos[1])/(dy+1e-6))
iaz = int((position[2]-iniPos[2])/(dz+1e-6))
# 8 closest vertices. Weight inversely proportional to the distance to each vertex
cumWeight = 0
try:
if dosegrid[iax, iay, iaz] > threshold:
x = iniPos[0] + iax * dx
y = iniPos[1] + iay * dy
z = iniPos[2] + iaz * dz
pos000 = np.array([x,y,z])
d000 = self.__distance(position, pos000)
if d000 == 0:
doseCTgrid[icx, icy, icz] = dosegrid[iax, iay, iaz]
break
else:
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax, iay, iaz] / d000
cumWeight = cumWeight + 1/d000
if iaz + 1 < dosegrid.shape[2]:
pos001 = pos000
pos001[2] = pos000[2] + dz
d001 = self.__distance(position, pos001)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax, iay, iaz+1] / d001
cumWeight = cumWeight + 1/d001
if iay + 1 < dosegrid.shape[1]:
pos010 = pos000
pos010[1] = pos000[1] + dy
d010 = self.__distance(position, pos010)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax, iay+1, iaz] / d010
cumWeight = cumWeight + 1/d010
if iay + 1 < dosegrid.shape[1] and iaz + 1 < dosegrid.shape[2]:
pos011 = pos001
pos011[1] = pos000[1] + dy
d011 = self.__distance(position, pos011)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax, iay+1, iaz+1] / d011
cumWeight = cumWeight + 1/d011
if iax + 1 < dosegrid.shape[0]:
pos100 = pos000
pos100[0] = pos000[0] + dx
d100 = self.__distance(position, pos100)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax+1, iay, iaz] / d100
cumWeight = cumWeight + 1/d100
if iax + 1 < dosegrid.shape[0] and iaz + 1 < dosegrid.shape[2]:
pos101 = pos001
pos101[0] = pos000[0] + dx
d101 = self.__distance(position, pos101)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax+1, iay, iaz+1] / d101
cumWeight = cumWeight + 1/d101
if iax + 1 < dosegrid.shape[0] and iay + 1 < dosegrid.shape[1]:
pos110 = pos010
pos110[0] = pos000[0] + dx
d110 = self.__distance(position, pos110)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax+1, iay+1, iaz] / d110
cumWeight = cumWeight + 1/d110
if iax + 1 < dosegrid.shape[0] and iay + 1 < dosegrid.shape[1] and iaz + 1 < dosegrid.shape[2]:
pos111 = pos011
pos111[0] = pos000[0] + dx
d111 = self.__distance(position, pos111)
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] + dosegrid[iax+1, iay+1, iaz+1] / d111
cumWeight = cumWeight + 1/d111
doseCTgrid[icx,icy,icz] = doseCTgrid[icx,icy,icz] / cumWeight
except:
print("Error at: ", iax, iay, iaz)
pass
return doseCTgrid
def __distance(self, pos1, pos2):
pos1 = np.array(pos1)
pos2 = np.array(pos2)
return np.sqrt(np.sum(np.power(pos1-pos2, 2)))
class PatientCT(DicomPatient):
def __init__(self, dicomDirectory):
DicomPatient.__init__(self, dicomDirectory)
self.FilterByModality('CT')
self.GetSlices()
print('{} CT slices found'.format(len(self.slices)))
self.GetImageInfo()
self.ReadPixelValues()
self.Rescale()
self.GetFrameOfReference()
def GetSlices(self):
self.slices = []
for f in self.dcmFiles:
if hasattr(f, 'ImagePositionPatient'):
self.slices.append(f)
self.slices = sorted(self.slices, key=lambda s: s.ImagePositionPatient[2], reverse=False)
def GetFrameOfReference(self):
self.forUID = self.slices[0].FrameOfReferenceUID
self.firstVoxelPosDICOMCoordinates = self.slices[0].ImagePositionPatient
def ReadPixelValues(self):
imgShape = list(self.slices[0].pixel_array.shape)
imgShape.append(len(self.slices))
self.img3D = np.zeros(imgShape)
# Fill 3D array with the images from the files
for i, s in enumerate(self.slices):
img2D = s.pixel_array
self.img3D[:, :, i] = img2D
class Patient3DActivity(DicomPatient):
def __init__(self, dicomDirectory):
DicomPatient.__init__(self, dicomDirectory)
self.FilterByModality('NM')
print('{} NM slices found'.format(len(self.dcmFiles)))
self.GetImageInfo()
self.VoxelSize = self.sliceThickness
self.ReadPixelValues()
self.GetFrameOfReference()
self.totalCounts = np.sum(self.img3D)
def GetFrameOfReference(self):
self.forUID = self.dcmFiles[0].FrameOfReferenceUID
self.firstVoxelPosDICOMCoordinates = self.dcmFiles[0].DetectorInformationSequence[0].ImagePositionPatient
def ReadPixelValues(self):
imgShape = list(self.dcmFiles[0].pixel_array.shape[1:])
imgShape.append(self.dcmFiles[0].pixel_array.shape[0])
self.img3D = np.zeros(imgShape)
for i in range(0, self.dcmFiles[0].pixel_array.shape[0]):
img2D = self.dcmFiles[0].pixel_array[i,:,:]
self.img3D[:,:,i] = img2D
class QoIDistribution:
def __init__(self, array = None, quantity = None, unit = None):
self.array = array
self.quantity = quantity
self.unit = unit
| 45.866499 | 123 | 0.550277 |
79416b47987d7d57c3e7e0f990eace9492b7a481 | 9,672 | py | Python | scripts/preprocess/clean_tok_mono_corpus.py | liuzh91/gluon-nlp | 189bbdcc56d8e58aa908963949687b99ff9a3cff | [
"Apache-2.0"
] | 1 | 2020-08-19T09:31:04.000Z | 2020-08-19T09:31:04.000Z | scripts/preprocess/clean_tok_mono_corpus.py | liuzh91/gluon-nlp | 189bbdcc56d8e58aa908963949687b99ff9a3cff | [
"Apache-2.0"
] | null | null | null | scripts/preprocess/clean_tok_mono_corpus.py | liuzh91/gluon-nlp | 189bbdcc56d8e58aa908963949687b99ff9a3cff | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import multiprocessing
import time
import numpy as np
import warnings
import re
from gluonnlp.data.filtering import MosesNormalizer
from gluonnlp.data.tokenizers import MosesTokenizer, BaseTokenizer,\
WhitespaceTokenizer, JiebaTokenizer
from typing import List, Union, Optional
re._MAXCACHE = 1024
def get_tokenizer(tokenizer, lang=None):
if isinstance(tokenizer, BaseTokenizer):
return tokenizer
else:
if tokenizer == 'moses':
return MosesTokenizer(lang=lang)
elif tokenizer == 'whitespace':
return WhitespaceTokenizer()
elif tokenizer == 'jieba':
return JiebaTokenizer()
else:
raise NotImplementedError
# TODO(sxjscience) Consider whether to
def check_latin1(sentence: str) -> bool:
"""Check whether the sentence can be encoded in latin1
This is used in
https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/scripts/filter_dataset.py
The idea is to filter the sentences with rare unicode glyphs
Returns
-------
ret
Whether sentences are latin1
"""
try:
sentence.encode('latin1')
except UnicodeEncodeError:
return False
else:
return True
def get_line_byte_start(corpus_path: str) -> np.ndarray:
"""Get the start position of each lines in terms of bytes so that we can use seek + read to
load an arbitrary line.
Parameters
----------
corpus_path
The path of the corpus
Returns
-------
line_pos
Shape (#Lens + 1,)
"""
line_pos = [0]
with open(corpus_path, 'rb') as in_f:
pos = 0
for line in in_f:
pos += len(line)
line_pos.append(pos)
return np.array(line_pos, dtype=np.int64)
class MonoCorpusProcessor:
"""Process sentence of corpus.
This largely recovers the functionality of 'clean-corpus-n.perl' in mosesdecoder.
The difference is that it is customizable with pure python.
By default, we will perform the following pre-processing pipeline.
Each stage could be turned on/off and specialized based on the input arguments.
Also, you may directly revise the code and write your own processing script.
1. Normalize sentence
2. Pre-filter
3. Tokenize the sentence
4. Filter the sentence based on different rules
3.1 Remove sentences where `max(len(lhs) / len(rhs), len(rhs) / len(lhs) > max_ratio`
3.2 Remove sentences where not `min_max_words <= len(lhs) <= max_num_words` and
`min_max_words <= len(rhs) <= max_num_words`
"""
def __init__(self, lang: str,
normalize: bool = True,
tokenizer: Union[str, BaseTokenizer] = 'whitespace',
min_num_words: Optional[int] = None,
max_num_words: Optional[int] = None,
discard_non_latin1: bool = False):
self._lang = lang
if normalize:
self._normalizer = MosesNormalizer(lang=lang)
self._tokenizer = get_tokenizer(tokenizer, lang)
self._min_num_words = min_num_words
self._max_num_words = max_num_words
self._discard_non_latin1 = discard_non_latin1
def process_chunk(self, args):
path, chunk_start, chunk_size = args
processed_lines = []
with open(path, 'rb') as in_f:
# Read chunk
in_f.seek(chunk_start)
lines = in_f.read(chunk_size)
lines = lines.splitlines()
unfiltered_line_num = len(lines)
for line in lines:
line = line.decode('utf-8').strip()
# 1. Normalize
line = self._normalizer(line)
# 2. Filter after normalization.
if self._discard_non_latin1:
if not check_latin1(line):
continue
# 3. Tokenize the sentence
tokens = self._tokenizer.encode(line)
# 4. Filter after tokenization. Filter with multiple rules
if len(tokens) == 0:
continue
if self._max_num_words is not None:
if len(tokens) > self._max_num_words:
continue
if self._min_num_words is not None:
if len(tokens) < self._min_num_words:
continue
processed_lines.append(' '.join(tokens))
return processed_lines, unfiltered_line_num
def process_mono_corpus(self,
corpus_paths: List[str],
out_path: str,
chunk_size: int = 1024 * 1024,
num_process: int = 8) -> int:
"""Preprocess the mono corpus
Parameters
----------
corpus_paths
Corpus paths
out_path
Write the results to the output path
chunk_size
Approximately split the corpus files into multiple chunks
num_process
The number of process
Returns
-------
line_count
The number of lines in the final filtered file
"""
start = time.time()
total_line_count = 0
filtered_line_count = 0
def chunk_iterator(step=10):
for path in corpus_paths:
line_pos = get_line_byte_start(path)
line_size = line_pos[1:] - line_pos[:-1]
num_lines = line_pos.shape[0] - 1
budget = chunk_size
chunk_start = 0
cur_chunk_size = 0
for i in range(0, num_lines, step):
line_batch_num = min(num_lines - i, step)
batch_line_size = line_size[i:(i + line_batch_num)].sum()
budget -= batch_line_size
cur_chunk_size += batch_line_size
if budget <= 0 or i + step >= num_lines:
yield path, chunk_start, cur_chunk_size
chunk_start += cur_chunk_size
cur_chunk_size = 0
budget = chunk_size
with open(out_path, 'w', encoding='utf-8', newline='\n') as out_f:
with multiprocessing.Pool(num_process) as pool:
for i, (processed_lines, unfiltered_line_num) in \
enumerate(pool.imap(self.process_chunk, chunk_iterator())):
out_f.write('\n'.join(processed_lines) + '\n')
filtered_line_count += len(processed_lines)
total_line_count += unfiltered_line_num
if (i + 1) % 100 == 0:
print('Chunk {}, #Lines Processed: {}, Filtered: {}, Remain: {}'
.format(i + 1, total_line_count,
total_line_count - filtered_line_count,
filtered_line_count))
end = time.time()
print('Done, #Lines {}/{}, Time spent {}'.format(filtered_line_count,
total_line_count,
end - start))
return filtered_line_count
def get_parser():
parser = argparse.ArgumentParser(
description='Clean mono corpus used in machine translation.')
parser.add_argument('--corpus', type=str, nargs='+', required=True)
parser.add_argument('--lang', type=str, required=True)
parser.add_argument('--save-path', type=str, default=None,
help='Path to save the cleaned and tokenized corpus. If not set, '
'the default is "corpus.tok.{lang}"')
parser.add_argument('--tokenizer', type=str, default='moses')
parser.add_argument('--min-num-words', type=int, default=None)
parser.add_argument('--max-num-words', type=int, default=None)
parser.add_argument('--discard-non-latin1', action='store_true',
help='Whether to discard the sentence pair if both sentences cannot be '
'encoded into latin1.')
parser.add_argument('--num-process', type=int, default=8,
help='number of process')
parser.add_argument('--overwrite', action='store_true')
return parser
def main(args):
corpus_processor = MonoCorpusProcessor(lang=args.lang,
tokenizer=args.tokenizer,
min_num_words=args.min_num_words,
max_num_words=args.max_num_words,
discard_non_latin1=args.discard_non_latin1)
print('Clean the mono corpus:')
print(' {}: {}'.format(args.lang, args.corpus))
if args.save_path is None:
save_path = 'corpus.tok.{}'.format(args.lang)
else:
save_path = args.save_path
print('Save to {} -> {} \n'.format(args.lang, save_path))
if os.path.exists(save_path) and not args.overwrite:
warnings.warn('{} or {} exists, skip. If you need to overwrite this file, '
'rerun the script with --overwrite.'.format(save_path))
else:
corpus_processor.process_mono_corpus(
corpus_paths=args.corpus,
out_path=save_path,
num_process=args.num_process)
def cli_main():
parser = get_parser()
args = parser.parse_args()
main(args)
if __name__ == '__main__':
cli_main()
| 38.229249 | 99 | 0.564413 |
79416b85e409b19b7fe907e18127347c1c7dfbda | 4,635 | py | Python | scripts/map_destination_flow.py | zarajfr/Visualising_Syrian_flow | 8e7eae5e8b44ca0e50b237d98b4a5e0ecaa57efe | [
"MIT"
] | null | null | null | scripts/map_destination_flow.py | zarajfr/Visualising_Syrian_flow | 8e7eae5e8b44ca0e50b237d98b4a5e0ecaa57efe | [
"MIT"
] | null | null | null | scripts/map_destination_flow.py | zarajfr/Visualising_Syrian_flow | 8e7eae5e8b44ca0e50b237d98b4a5e0ecaa57efe | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import random
from random import shuffle
import seaborn as sns
import math
from pprint import pprint
import matplotlib
from matplotlib.cm import cool
import csv
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
import geopandas as gpd
import pandas as pd
import pysal
import libpysal
from libpysal.weights import KNN
from splot.libpysal import plot_spatial_weights
import fiona
import branca
import folium
m = folium.Map(location=[34.8021, 38.9968], zoom_start=7.2) #zoom_start=13
# m = folium.Map(location=[34.8021, 38.9968], zoom_start=7.2, tiles= "https://api.mapbox.com/v4/mapbox.run-bike-hike/{z}/{x}/{y}.png?access_token=sk.eyJ1IjoiemFocmFqYWZhcmkiLCJhIjoiY2t1ZThuNmRoMWVxajJxbXh5MTBsMDhzOCJ9.oX35NvS5itEDhWMgd8ibSQ" )
m
m.save("index.html")
def calc_d_rate():
govlabels = ["Aleppo","Damascus","Dar'a","Deir-ez-Zor","Hama","Al Hasakeh","Homs","Idleb","Lattakia","Quneitra","Ar-Raqqa","Rural Dam.","As-Sweida","Tartus","Lebanon","Turkey","Iraq", "Jordan"]
fc = []
# for fn in ["destination-social-jordan.csv", "destination-social-scenario0.csv" ]:
# for fn in ["social-tu-jo.csv", "destination-social-scenario0.csv" ]:
for fn in ["camp_up_trip_9.csv", "social-leb-tu.csv" ]:
# for fn in ["social-conflict-shift-leb-tu-2.csv", "social-leb-tu.csv" ]:
sim = []
with open(fn) as f2:
myf2 = csv.reader(f2,delimiter=',')
for row in myf2:
x = []
for i in range(len(row)-1):
x.append(200.0*float(row[i]))
sim.append(x)
f2.close()
fc.append(sim)
sums = []
for j in range(len(fc[1][0])):
x = 0.0
for i in range(len(fc[1])):
x += fc[0][i][j]
sums.append(x)
difference_in_range = []
for i in range(18):
each_gov = []
for j in range(9,19):
x = ( fc[0][i][j] - fc[1][i][j] )/(1.0*sums[j])
each_gov.append(x)
difference_in_range.append(each_gov)
return difference_in_range
def fancy_map():
indexes = [6, 1, 11, 13, 2, 3, 4,5, 7, 8, 9, 10, 12, 14 ]
observable0 = calc_d_rate()
observable = []
avgg = []
for i in range(len(observable0)):
s = 0.0
for j in range(len(observable0[0])):
s+= observable0[i][j]
avgg.append(s*10.0)
for i in indexes:
observable.append(avgg[i-1])
o =[]
for i in range(14):
o.append(observable[i])
df = pd.DataFrame({'variation': o})
gdf2 = gpd.read_file("govfile")
gdf2['variation'] = df
gdf3 = gpd.read_file("Jordan_shapefile")
gdf3['variation'] = avgg[17]
gdf3['NAME_1'] = "Jordan"
gdf4 = gpd.read_file("Iraq_shapefile")
gdf4['variation'] = avgg[16]
gdf4['NAME_1'] = "Iraq"
gdf5 = gpd.read_file("Turkey_shapefile")
gdf5['variation'] = avgg[15]
gdf5['NAME_1'] = "Turkey"
gdf6 = gpd.read_file("Lebanon_shapefile")
gdf6['variation'] = avgg[14]
gdf6['NAME_1'] = "Lebanon"
a1 = gdf2.append(gdf6)
a2 = a1.append(gdf5)
a3 = a2.append(gdf4)
a4 = a3.append(gdf3)
divnorm = colors.TwoSlopeNorm(vmin=-0.5, vcenter=0.0, vmax=0.5)
# colors = ['#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419'] ['#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2']
colormap = branca.colormap.StepColormap(
colors=['#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419'],index=a4['variation'].quantile([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]),vmin=-2.0,vmax=3)
colormap.caption="Average change of influx from 2nd rearrangement of conflict - Leb-Tu closure"
# colormap.caption="Average change of influx - Turkey-Jordan closure"
# colormap.caption="Average discrepancy - Jordan border closure - social influence"
colormap.position="topleft"
# colormap = branca.colormap.StepColormap(
# colors=sns.color_palette("Spectral", 10),index=a4['variation'].quantile([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]),vmin=-1.0,vmax=1.0)
geoJSON_df = gpd.read_file("all_syr.geojson")
stategeo = folium.GeoJson(a4,name='Syria', style_function = lambda x: {'fillColor': colormap(x['properties']['variation']), 'color': 'black','weight':1, 'fillOpacity':0.5}, tooltip=folium.GeoJsonTooltip(fields=['NAME_1', 'variation'], aliases=['NAME_1', 'variation'] , localize=True) ).add_to(m)
colormap.add_to(m)
folium.LayerControl().add_to(m)
m.save("index.html")
fancy_map()
| 38.94958 | 299 | 0.631068 |
79416c1dc80c46b3608beeaf871ddde00b88074d | 1,467 | py | Python | userbot/plugins/dictionary.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/dictionary.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/dictionary.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | """Dictionary Plugin for @UniBorg
Syntax: .meaning <word>"""
import requests
from telethon import events
from uniborg.util import admin_cmd
@borg.on(admin_cmd("meaning (.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
input_url = "https://bots.shrimadhavuk.me/dictionary/?s={}".format(input_str)
headers = {"USER-AGENT": "UniBorg"}
caption_str = f"Significato di __{input_str}__\n"
try:
response = requests.get(input_url, headers=headers).json()
pronounciation = response.get("p")
meaning_dict = response.get("lwo")
for current_meaning in meaning_dict:
current_meaning_type = current_meaning.get("type")
current_meaning_definition = current_meaning.get("definition")
caption_str += f"**{current_meaning_type}**: {current_meaning_definition}\n\n"
except Exception as e:
caption_str = str(e)
reply_msg_id = event.message.id
if event.reply_to_msg_id:
reply_msg_id = event.reply_to_msg_id
try:
await borg.send_file(
event.chat_id,
pronounciation,
caption=f"Pronouncia di __{input_str}__",
force_document=False,
reply_to=reply_msg_id,
allow_cache=True,
voice_note=True,
silent=True,
supports_streaming=True
)
except:
pass
await event.edit(caption_str)
| 32.6 | 90 | 0.6394 |
79416caa3a50059700bc54b7d17fb4cca1798635 | 809 | py | Python | Lib/corpuscrawler/crawl_nii.py | cash/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 95 | 2019-06-13T23:34:21.000Z | 2022-03-12T05:22:49.000Z | Lib/corpuscrawler/crawl_nii.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 31 | 2019-06-02T18:56:53.000Z | 2021-08-10T20:16:02.000Z | Lib/corpuscrawler/crawl_nii.py | sahwar/corpuscrawler | 8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d | [
"Apache-2.0"
] | 35 | 2019-06-18T08:26:24.000Z | 2022-01-11T13:59:40.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='nii')
crawl_bibleis(crawler, out, bible='NIIWBT')
| 36.772727 | 74 | 0.770087 |
79416d8c617ab9c2b14196dd07b0c1fe73226aa3 | 39 | py | Python | async_iter/__init__.py | weidwonder/async-iter | de971fa34ca753ee85abe09da08b19d886879a69 | [
"MIT"
] | null | null | null | async_iter/__init__.py | weidwonder/async-iter | de971fa34ca753ee85abe09da08b19d886879a69 | [
"MIT"
] | null | null | null | async_iter/__init__.py | weidwonder/async-iter | de971fa34ca753ee85abe09da08b19d886879a69 | [
"MIT"
] | null | null | null | from async_iter import AsyncIterHandler | 39 | 39 | 0.923077 |
79416dc7798ee8a37d674ee32ce35182c05a8a79 | 5,742 | py | Python | analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Clang Static Analyzer related functions.
"""
import subprocess
from codechecker_common.logger import get_logger
from codechecker_analyzer import host_check
from codechecker_analyzer.analyzers.clangsa import clang_options, version
LOG = get_logger('analyzer.clangsa')
CTU_ON_DEMAND_OPTION_NAME = 'ctu-invocation-list'
def invoke_binary_checked(binary_path, args=None, environ=None):
"""
Invoke the binary with the specified args, and return the output if the
command finished running with zero exit code. Return False otherwise.
Possible usage can be used to check the existence binaries.
:param binary_path: The path to the executable to invoke
:param args: The arguments of the invocation
:type binary_path: str
:type args: list
:rtype str
"""
args = args or []
invocation = [binary_path]
invocation.extend(args)
try:
output = subprocess.check_output(
invocation,
env=environ,
encoding="utf-8",
errors="ignore")
except (subprocess.CalledProcessError, OSError) as e:
LOG.debug(
'Command invocation failed because of non-zero exit code!'
'Details: %s', str(e))
return False
return output
class CTUAutodetection:
"""
CTUAutodetection is responsible for providing the availability information
of CTU feature, the the relevant mapping tool path and the mapping file
name.
"""
def __init__(self, analyzer_binary, environ):
self.__analyzer_binary = analyzer_binary
self.environ = environ
self.__analyzer_version_info = None
if self.__analyzer_binary is None:
LOG.debug(
'Trying to detect CTU capability, but analyzer binary is not '
'set!')
return None
analyzer_version = invoke_binary_checked(
self.__analyzer_binary, ['--version'], self.environ)
if analyzer_version is False:
LOG.debug('Failed to invoke command to get Clang version!')
return None
version_parser = version.ClangVersionInfoParser()
version_info = version_parser.parse(analyzer_version)
if not version_info:
LOG.debug('Failed to parse Clang version information!')
return None
self.__analyzer_version_info = version_info
@property
def analyzer_version_info(self):
"""
Returns the relevant parameters of the analyzer by parsing the
output of the analyzer binary when called with version flag.
"""
if not self.__analyzer_version_info:
return False
return self.__analyzer_version_info
@property
def major_version(self):
"""
Returns the major version of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.major_version
@property
def installed_dir(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.installed_dir
@property
def mapping_tool_path(self):
"""Return the path to the mapping tool."""
tool_path, _ = clang_options.ctu_mapping(self.analyzer_version_info)
if tool_path:
return tool_path
return False
@property
def display_progress(self):
"""
Return analyzer args if it is capable to display ctu progress.
Returns None if the analyzer can not display ctu progress.
The ctu display progress arguments depend on
the clang analyzer version.
"""
if not self.analyzer_version_info:
return None
ctu_display_progress_args = ['-Xclang',
'-analyzer-config',
'-Xclang',
'display-ctu-progress=true']
ok = host_check.has_analyzer_config_option(
self.__analyzer_binary, "display-ctu-progress", self.environ)
if not ok:
return None
return ctu_display_progress_args
@property
def mapping_file_name(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
_, mapping_file_name = \
clang_options.ctu_mapping(self.analyzer_version_info)
if mapping_file_name:
return mapping_file_name
return False
@property
def is_ctu_capable(self):
"""
Detects if the current clang is CTU compatible. Tries to autodetect
the correct one based on clang version.
"""
tool_path = self.mapping_tool_path
if not tool_path:
return False
return invoke_binary_checked(tool_path, ['-version'], self.environ) \
is not False
@property
def is_on_demand_ctu_available(self):
"""
Detects if the current Clang supports on-demand parsing of ASTs for
CTU analysis.
"""
analyzer_options = invoke_binary_checked(
self.__analyzer_binary, ['-cc1', '-analyzer-config-help'],
self.environ)
if analyzer_options is False:
return False
return CTU_ON_DEMAND_OPTION_NAME in analyzer_options
| 30.221053 | 78 | 0.620515 |
79416de3e1548ac303c9d23a648cf0745c5dadc3 | 143 | py | Python | _individual_1_.py | 126alexander/LAB_13 | 404e9f07a26be2204cd682940da29944366aa214 | [
"MIT"
] | null | null | null | _individual_1_.py | 126alexander/LAB_13 | 404e9f07a26be2204cd682940da29944366aa214 | [
"MIT"
] | null | null | null | _individual_1_.py | 126alexander/LAB_13 | 404e9f07a26be2204cd682940da29944366aa214 | [
"MIT"
] | null | null | null | from _individual_1_1 import *
a = [1, 2, 3, 4, 5, 65, 6, ]
max_func = func_1()
min_func = func_1('min')
print(max_func(a))
print(min_func(a)) | 17.875 | 29 | 0.65035 |
79416f0493734d2a6403034a2f591bd478ba5a26 | 13,193 | py | Python | test/functional/rpc_blockchain.py | Phonemetra/TurboCoin | 041f807a3f68bb4b6d9b6c39235e71f1598153d9 | [
"MIT"
] | null | null | null | test/functional/rpc_blockchain.py | Phonemetra/TurboCoin | 041f807a3f68bb4b6d9b6c39235e71f1598153d9 | [
"MIT"
] | null | null | null | test/functional/rpc_blockchain.py | Phonemetra/TurboCoin | 041f807a3f68bb4b6d9b6c39235e71f1598153d9 | [
"MIT"
] | 3 | 2021-04-03T09:11:50.000Z | 2021-04-03T09:12:31.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
)
class BlockchainTest(TurbocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'bip9_softforks',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
BlockchainTest().main()
| 42.15016 | 257 | 0.673691 |
79416fef605b37d504e6604efb47df014e319cb7 | 2,350 | py | Python | solutions/bs_gc.py | ikumen/rosalind | 1a265f44eef201dc82d73e9d4f97064302079cd9 | [
"MIT"
] | null | null | null | solutions/bs_gc.py | ikumen/rosalind | 1a265f44eef201dc82d73e9d4f97064302079cd9 | [
"MIT"
] | null | null | null | solutions/bs_gc.py | ikumen/rosalind | 1a265f44eef201dc82d73e9d4f97064302079cd9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
bs_gc.py: Computing GC Content (http://rosalind.info/problems/gc/)
Given: At most 10 DNA strings in FASTA format (of length at most 1 kbp each).
Return: The ID of the string having the highest GC-content, followed by the
GC-content of that string. Rosalind allows for a default error of 0.001 in all
decimal answers unless otherwise stated; please see the note on absolute error below.
Sample Dataset:
>Rosalind_6404
CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC
TCCCACTAATAATTCTGAGG
>Rosalind_5959
CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT
ATATCCATTTGTCAGCAGACACGC
>Rosalind_0808
CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC
TGGGAACCTGCGGGCAGTAGGTGGAAT
Sample Output:
Rosalind_0808
60.919540
'''
import sys
import os
import pytest
from collections import Counter
from helpers import parse_fasta, output_path
def compute_gc_content(dna):
'''Computes the gc content of a given dna string.'''
cntr = Counter(dna)
gc = sum([cntr[base] for base in ['G','C']])
return (gc / len(dna)) * 100
def highest_gc(dnas):
'''Computes the GC for each dna item from dnas list, and returns the
highest GC and id of dna item it came from.'''
hi_gc = None
for dna in dnas:
gc = compute_gc_content(dna['dna'])
if not hi_gc or hi_gc['gc'] < gc:
hi_gc = {
'id': dna['id'],
'gc': gc
}
return hi_gc
def test_highest_gc():
'''Only tests the highest_gc and compute_gc_content methods,
does not test loading the fasta file.
'''
dnas = [
{'id':'Rosalind_6404','dna':'CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCCTCCCACTAATAATTCTGAGG'},
{'id':'Rosalind_5959','dna':'CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCTATATCCATTTGTCAGCAGACACGC'},
{'id':'Rosalind_0808','dna':'CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT'}
]
hi_gc = highest_gc(dnas)
assert 'Rosalind_0808' == hi_gc['id']
assert 60.91954022988506 == hi_gc['gc']
def main():
'''Main runner, to read data, compute and saves output.'''
sequences = parse_fasta(os.path.join(os.path.dirname(__file__), 'data/rosalind_gc.txt'))
hi_gc = highest_gc(sequences)
with open(output_path(__file__), 'w') as output:
output.write(hi_gc['id'] + '\n')
output.write(str(hi_gc['gc']))
if __name__ == '__main__':
main()
| 28.313253 | 120 | 0.758298 |
7941701d6be9f1438b480304259410b52a2fef4f | 21,029 | py | Python | pymodbus/server/sync.py | khanhngd24/modbus | a5ca5d578a823a431b95fdde76b074407b38e867 | [
"W3C"
] | null | null | null | pymodbus/server/sync.py | khanhngd24/modbus | a5ca5d578a823a431b95fdde76b074407b38e867 | [
"W3C"
] | null | null | null | pymodbus/server/sync.py | khanhngd24/modbus | a5ca5d578a823a431b95fdde76b074407b38e867 | [
"W3C"
] | null | null | null | '''
Implementation of a Threaded Modbus Server
------------------------------------------
'''
from binascii import b2a_hex
import serial
import socket
import traceback
from pymodbus.constants import Defaults
from pymodbus.factory import ServerDecoder
from pymodbus.datastore import ModbusServerContext
from pymodbus.device import ModbusControlBlock
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.transaction import *
from pymodbus.exceptions import NotImplementedException, NoSuchSlaveException
from pymodbus.pdu import ModbusExceptions as merror
from pymodbus.compat import socketserver, byte2int
#---------------------------------------------------------------------------#
# Logging
#---------------------------------------------------------------------------#
import logging
_logger = logging.getLogger(__name__)
#---------------------------------------------------------------------------#
# Protocol Handlers
#---------------------------------------------------------------------------#
class ModbusBaseRequestHandler(socketserver.BaseRequestHandler):
''' Implements the modbus server protocol
This uses the socketserver.BaseRequestHandler to implement
the client handler.
'''
def setup(self):
''' Callback for when a client connects
'''
_logger.debug("Client Connected [%s:%s]" % self.client_address)
self.running = True
self.framer = self.server.framer(self.server.decoder)
self.server.threads.append(self)
def finish(self):
''' Callback for when a client disconnects
'''
_logger.debug("Client Disconnected [%s:%s]" % self.client_address)
self.server.threads.remove(self)
def execute(self, request):
''' The callback to call with the resulting message
:param request: The decoded request message
'''
try:
context = self.server.context[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException as ex:
_logger.debug("requested slave does not exist: %s" % request.unit_id )
if self.server.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception as ex:
_logger.debug("Datastore unable to fulfill request: %s; %s", ex, traceback.format_exc() )
response = request.doException(merror.SlaveFailure)
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self.send(response)
#---------------------------------------------------------------------------#
# Base class implementations
#---------------------------------------------------------------------------#
def handle(self):
''' Callback when we receive any data
'''
raise NotImplementedException("Method not implemented by derived class")
def send(self, message):
''' Send a request (string) to the network
:param message: The unencoded modbus response
'''
raise NotImplementedException("Method not implemented by derived class")
class ModbusSingleRequestHandler(ModbusBaseRequestHandler):
''' Implements the modbus server protocol
This uses the socketserver.BaseRequestHandler to implement
the client handler for a single client(serial clients)
'''
def handle(self):
''' Callback when we receive any data
'''
while self.running:
try:
data = self.request.recv(1024)
if data:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("recv: " + " ".join([hex(byte2int(x)) for x in data]))
if isinstance(self.framer, ModbusAsciiFramer):
unit_address = int(data[1:3], 16)
elif isinstance(self.framer, ModbusBinaryFramer):
unit_address = byte2int(data[1])
else:
unit_address = byte2int(data[0])
if unit_address in self.server.context:
self.framer.processIncomingPacket(data, self.execute)
except Exception as msg:
# since we only have a single socket, we cannot exit
# Clear frame buffer
self.framer.resetFrame()
_logger.error("Socket error occurred %s" % msg)
def send(self, message):
''' Send a request (string) to the network
:param message: The unencoded modbus response
'''
if message.should_respond:
#self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug('send: %s' % b2a_hex(pdu))
return self.request.send(pdu)
class CustomSingleRequestHandler(ModbusSingleRequestHandler):
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.running = True
self.setup()
class ModbusConnectedRequestHandler(ModbusBaseRequestHandler):
''' Implements the modbus server protocol
This uses the socketserver.BaseRequestHandler to implement
the client handler for a connected protocol (TCP).
'''
def handle(self):
'''Callback when we receive any data, until self.running becomes not True. Blocks indefinitely
awaiting data. If shutdown is required, then the global socket.settimeout(<seconds>) may be
used, to allow timely checking of self.running. However, since this also affects socket
connects, if there are outgoing socket connections used in the same program, then these will
be prevented, if the specfied timeout is too short. Hence, this is unreliable.
To respond to Modbus...Server.server_close() (which clears each handler's self.running),
derive from this class to provide an alternative handler that awakens from time to time when
no input is available and checks self.running. Use Modbus...Server( handler=... ) keyword
to supply the alternative request handler class.
'''
reset_frame = False
while self.running:
try:
data = self.request.recv(1024)
if not data: self.running = False
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(' '.join([hex(byte2int(x)) for x in data]))
# if not self.server.control.ListenOnly:
self.framer.processIncomingPacket(data, self.execute)
except socket.timeout as msg:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Socket timeout occurred %s", msg)
reset_frame = True
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
except:
_logger.error("Socket exception occurred %s" % traceback.format_exc() )
self.running = False
reset_frame = True
finally:
if reset_frame:
self.framer.resetFrame()
reset_frame = False
def send(self, message):
''' Send a request (string) to the network
:param message: The unencoded modbus response
'''
if message.should_respond:
#self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug('send: %s' % b2a_hex(pdu))
return self.request.send(pdu)
class ModbusDisconnectedRequestHandler(ModbusBaseRequestHandler):
''' Implements the modbus server protocol
This uses the socketserver.BaseRequestHandler to implement
the client handler for a disconnected protocol (UDP). The
only difference is that we have to specify who to send the
resulting packet data to.
'''
socket = None
def handle(self):
''' Callback when we receive any data
'''
reset_frame = False
while self.running:
try:
data, self.socket = self.request
if not data:
self.running = False
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(' '.join([hex(byte2int(x)) for x in data]))
# if not self.server.control.ListenOnly:
self.framer.processIncomingPacket(data, self.execute)
except socket.timeout: pass
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
reset_frame = True
except Exception as msg:
_logger.error(msg)
self.running = False
reset_frame = True
finally:
if reset_frame:
self.framer.resetFrame()
reset_frame = False
def send(self, message):
''' Send a request (string) to the network
:param message: The unencoded modbus response
'''
if message.should_respond:
#self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug('send: %s' % b2a_hex(pdu))
return self.socket.sendto(pdu, self.client_address)
#---------------------------------------------------------------------------#
# Server Implementations
#---------------------------------------------------------------------------#
class ModbusTcpServer(socketserver.ThreadingTCPServer):
'''
A modbus threaded tcp socket server
We inherit and overload the socket server so that we
can control the client threads as well as have a single
server context instance.
'''
def __init__(self, context, framer=None, identity=None, address=None, handler=None, **kwargs):
''' Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is ModbusConnectedRequestHandler
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusConnectedRequestHandler
self.ignore_missing_slaves = kwargs.get('ignore_missing_slaves', Defaults.IgnoreMissingSlaves)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingTCPServer.__init__(self,
self.address, self.handler)
def process_request(self, request, client):
''' Callback for connecting a new client thread
:param request: The request to handle
:param client: The address of the client
'''
_logger.debug("Started thread to serve client at " + str(client))
socketserver.ThreadingTCPServer.process_request(self, request, client)
def shutdown(self):
''' Stops the serve_forever loop.
Overridden to signal handlers to stop.
'''
for thread in self.threads:
thread.running = False
socketserver.ThreadingTCPServer.shutdown(self)
def server_close(self):
''' Callback for stopping the running server
'''
_logger.debug("Modbus server stopped")
self.socket.close()
for thread in self.threads:
thread.running = False
class ModbusUdpServer(socketserver.ThreadingUDPServer):
'''
A modbus threaded udp socket server
We inherit and overload the socket server so that we
can control the client threads as well as have a single
server context instance.
'''
def __init__(self, context, framer=None, identity=None, address=None, handler=None, **kwargs):
''' Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is ModbusDisonnectedRequestHandler
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusDisconnectedRequestHandler
self.ignore_missing_slaves = kwargs.get('ignore_missing_slaves', Defaults.IgnoreMissingSlaves)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingUDPServer.__init__(self,
self.address, self.handler)
def process_request(self, request, client):
''' Callback for connecting a new client thread
:param request: The request to handle
:param client: The address of the client
'''
packet, socket = request # TODO I might have to rewrite
_logger.debug("Started thread to serve client at " + str(client))
socketserver.ThreadingUDPServer.process_request(self, request, client)
def server_close(self):
''' Callback for stopping the running server
'''
_logger.debug("Modbus server stopped")
self.socket.close()
for thread in self.threads:
thread.running = False
class ModbusSerialServer(object):
'''
A modbus threaded serial socket server
We inherit and overload the socket server so that we
can control the client threads as well as have a single
server context instance.
'''
handler = None
def __init__(self, context, framer=None, identity=None, **kwargs):
''' Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusAsciiFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
self.device = kwargs.get('port', 0)
self.stopbits = kwargs.get('stopbits', Defaults.Stopbits)
self.bytesize = kwargs.get('bytesize', Defaults.Bytesize)
self.parity = kwargs.get('parity', Defaults.Parity)
self.baudrate = kwargs.get('baudrate', Defaults.Baudrate)
self.timeout = kwargs.get('timeout', Defaults.Timeout)
self.ignore_missing_slaves = kwargs.get('ignore_missing_slaves', Defaults.IgnoreMissingSlaves)
self.socket = None
if self._connect():
self.is_running = True
self._build_handler()
def _connect(self):
''' Connect to the serial server
:returns: True if connection succeeded, False otherwise
'''
if self.socket: return True
try:
self.socket = serial.Serial(port=self.device, timeout=self.timeout,
bytesize=self.bytesize, stopbits=self.stopbits,
baudrate=self.baudrate, parity=self.parity)
except serial.SerialException as msg:
_logger.error(msg)
return self.socket != None
def _build_handler(self):
''' A helper method to create and monkeypatch
a serial handler.
:returns: A patched handler
'''
request = self.socket
request.send = request.write
request.recv = request.read
self.handler = CustomSingleRequestHandler(request,
(self.device, self.device),
self)
def serve_forever(self):
''' Callback for connecting a new client thread
:param request: The request to handle
:param client: The address of the client
'''
if self._connect():
_logger.debug("Started thread to serve client")
if not self.handler:
self._build_handler()
while self.is_running:
self.handler.handle()
else:
_logger.error("Error opening serial port , Unable to start server!!")
def server_close(self):
''' Callback for stopping the running server
'''
_logger.debug("Modbus server stopped")
self.is_running = False
self.handler.finish()
self.handler.running = False
self.handler = None
self.socket.close()
#---------------------------------------------------------------------------#
# Creation Factories
#---------------------------------------------------------------------------#
def StartTcpServer(context=None, identity=None, address=None, **kwargs):
''' A factory to start and run a tcp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
framer = ModbusSocketFramer
server = ModbusTcpServer(context, framer, identity, address, **kwargs)
server.serve_forever()
def StartUdpServer(context=None, identity=None, address=None, **kwargs):
''' A factory to start and run a udp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param framer: The framer to operate with (default ModbusSocketFramer)
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
framer = kwargs.pop('framer', ModbusSocketFramer)
server = ModbusUdpServer(context, framer, identity, address, **kwargs)
server.serve_forever()
def StartSerialServer(context=None, identity=None, **kwargs):
''' A factory to start and run a serial modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param framer: The framer to operate with (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
framer = kwargs.pop('framer', ModbusAsciiFramer)
server = ModbusSerialServer(context, framer, identity, **kwargs)
server.serve_forever()
#---------------------------------------------------------------------------#
# Exported symbols
#---------------------------------------------------------------------------#
__all__ = [
"StartTcpServer", "StartUdpServer", "StartSerialServer"
]
| 39.602637 | 103 | 0.616339 |
7941704fbd630e404c5fc72fc723e423a6120bd4 | 300 | py | Python | src/web/modules/post/controllers/workflow/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | 2 | 2017-04-30T07:29:23.000Z | 2017-04-30T07:36:27.000Z | src/web/modules/post/controllers/workflow/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | src/web/modules/post/controllers/workflow/control.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | import importlib
def get(p):
p['mode'] = 'default'
if len(p['nav']) > 3: p['mode'] = p['nav'][3]
if not p['operation']: p['operation'] = 'post'
path = "web.modules.post.controllers.workflow.{}".format(p['mode'])
control = importlib.import_module(path)
return control.get(p)
| 27.272727 | 71 | 0.603333 |
7941710cc1370550f23ccea5fb150c1c9656fa87 | 1,364 | py | Python | route.py | Develkone/AdvancedEAST-with-Baidu-OCR | 0a22d34448cbb0f3e8d924ce3321649c29ea1d3a | [
"MIT"
] | null | null | null | route.py | Develkone/AdvancedEAST-with-Baidu-OCR | 0a22d34448cbb0f3e8d924ce3321649c29ea1d3a | [
"MIT"
] | null | null | null | route.py | Develkone/AdvancedEAST-with-Baidu-OCR | 0a22d34448cbb0f3e8d924ce3321649c29ea1d3a | [
"MIT"
] | null | null | null | from flask import request, jsonify, Flask
from flask_cors import CORS
import os
import time
import predict
import common
from baidu_api import textGO
import shutil
app = Flask(__name__)
CORS(app, resources=r'/*')
@app.route("/", methods=["GET"])
def html():
return app.send_static_file("index.html")
@app.route("/recognition", methods=["POST"])
def recognition():
if request.form["original_img"]:
t = time.time()
path = "./temp/%d/"%int(round(t * 1000))
os.mkdir(path)
ori_img_path = path + "%d.jpg"%int(round(t * 1000))
common.base64_to_image(request.form["original_img"], ori_img_path)
pre_img_path, sub_imgs = predict.predict(east_detect, ori_img_path, pixel_threshold=0.9)
pre_img = common.image_to_base64(pre_img_path)
# print(pre_img)
result_text = ""
if sub_imgs != []:
for sub_img in sub_imgs:
if sub_img != "":
# print(sub_img)
args = textGO.get_args()
text_GO = textGO.TextGO(args.config)
words = text_GO.ocr(sub_img)
result_text += words
# print(result_text)
shutil.rmtree(path)
return jsonify(img=pre_img, text=result_text)
if __name__ == "__main__":
east_detect = predict.east_detect()
app.run() | 31.72093 | 96 | 0.605572 |
79417120aa4ffd7640d2379de2b37f7be354a6cf | 409 | py | Python | backend/black_disk_31503/wsgi.py | crowdbotics-apps/black-disk-31503 | 3f6febbf243f3d0cb508b776eda4633d37a23121 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/black_disk_31503/wsgi.py | crowdbotics-apps/black-disk-31503 | 3f6febbf243f3d0cb508b776eda4633d37a23121 | [
"FTL",
"AML",
"RSA-MD"
] | 6 | 2021-10-18T00:26:48.000Z | 2021-10-18T00:26:52.000Z | backend/black_disk_31503/wsgi.py | crowdbotics-apps/black-disk-31503 | 3f6febbf243f3d0cb508b776eda4633d37a23121 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for black_disk_31503 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'black_disk_31503.settings')
application = get_wsgi_application()
| 24.058824 | 78 | 0.794621 |
7941713c0b01c4da7197f8635bff26f1a569422c | 414 | py | Python | students/k3340/practical_works/Dobryakov_David/simple_django_app/django_project_dobryakov/simple_django_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3340/practical_works/Dobryakov_David/simple_django_app/django_project_dobryakov/simple_django_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3340/practical_works/Dobryakov_David/simple_django_app/django_project_dobryakov/simple_django_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('auto', views.AutoView.as_view()),
path('owner/<int:owner_id>', views.show_owner),
path('owners', views.show_owners),
path('add_auto', views.AddAuto.as_view(success_url='thanks')),
path('thanks', views.show_thanks),
path('add_owner', views.add_owner)
]
| 31.846154 | 70 | 0.664251 |
7941713f6a896dff775a8e2d8b895fcb4dd6d5db | 2,247 | py | Python | src/engine/public_components/scene.py | gabdube/panic-panda | ecbda506eeafe3dbdf932cdb20b938646502f892 | [
"MIT"
] | 67 | 2019-01-06T13:01:46.000Z | 2022-01-04T17:50:58.000Z | src/engine/public_components/scene.py | gabdube/panic-panda | ecbda506eeafe3dbdf932cdb20b938646502f892 | [
"MIT"
] | 2 | 2019-01-07T18:25:00.000Z | 2021-05-10T09:32:17.000Z | src/engine/public_components/scene.py | gabdube/panic-panda | ecbda506eeafe3dbdf932cdb20b938646502f892 | [
"MIT"
] | 6 | 2019-07-31T08:16:26.000Z | 2020-12-26T04:34:52.000Z | from . import Shader, Mesh, GameObject, Image, Sampler, Compute
from ..base_types import Id
class Scene(object):
def __init__(self):
self._id = Id()
self.shaders = ComponentArray(Shader)
self.computes = ComponentArray(Compute)
self.meshes = ComponentArray(Mesh)
self.objects = ComponentArray(GameObject)
self.samplers = ComponentArray(Sampler)
self.images = ComponentArray(Image)
self.update_obj_set = set()
self.update_shader_set = set()
empty = lambda: None
empty_w_events = lambda x, y: None
self.on_initialized = empty
self.on_window_resized = empty_w_events
self.on_mouse_move = empty_w_events
self.on_mouse_click = empty_w_events
self.on_key_pressed = empty_w_events
self.on_mouse_scroll = empty_w_events
@classmethod
def empty(cls):
scene = super().__new__(cls)
scene.__init__()
return scene
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id.value = value
@property
def loaded(self):
return self._id.value is not None
def update_objects(self, *objects):
self.update_obj_set.update(set(obj for obj in objects if isinstance(obj, GameObject)))
def update_shaders(self, *shaders):
self.update_shader_set.update(set(shader for shader in shaders if isinstance(shader, (Shader, Compute))))
class ComponentArray(list):
def __init__(self, component_type):
self.component_type = component_type
def append(self, i):
if not isinstance(i, self.component_type):
raise TypeError(f"Item type must be {self.component_type.__qualname__}, got {type(i)}")
i.id = len(self)
super().append(i)
def extend(self, *items):
offset = 0
for i in items:
if not isinstance(i, self.component_type):
raise TypeError(f"Item type must be {self.component_type.__qualname__}, got {type(i)}")
i.id = len(self) + offset
offset += 1
super().extend(items)
def __repr__(self):
return f"{self.component_type.__qualname__}({super().__repr__()})"
| 29.181818 | 113 | 0.635514 |
794172a9872bbd43c522338005c6577a55326279 | 667 | py | Python | ovs/lib/__init__.py | mflu/openvstorage_centos | 280a98d3e5d212d58297e0ffcecd325dfecef0f8 | [
"Apache-2.0"
] | 1 | 2015-08-29T16:36:40.000Z | 2015-08-29T16:36:40.000Z | ovs/lib/__init__.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | ovs/lib/__init__.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains the BLL
"""
import sys
sys.path.append('/opt/OpenvStorage')
| 33.35 | 74 | 0.758621 |
79417356ac96c7610c1d9c42917c0da7b6f65472 | 2,820 | py | Python | selfdrive/manager/process_config.py | sunnyc-op/jc01rho_BoltEV_EON | 48067cd344e01346037600f768912c3cd97ac5bf | [
"MIT"
] | null | null | null | selfdrive/manager/process_config.py | sunnyc-op/jc01rho_BoltEV_EON | 48067cd344e01346037600f768912c3cd97ac5bf | [
"MIT"
] | null | null | null | selfdrive/manager/process_config.py | sunnyc-op/jc01rho_BoltEV_EON | 48067cd344e01346037600f768912c3cd97ac5bf | [
"MIT"
] | null | null | null | import os
from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess
from selfdrive.hardware import EON, TICI, PC
WEBCAM = os.getenv("USE_WEBCAM") is not None
procs = [
# DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True),
NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True),
# NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]),
# NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
NativeProcess("navd", "selfdrive/ui/navd", ["./navd"], enabled=(PC or TICI), persistent=True),
NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]),
NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON),
NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)),
NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)),
NativeProcess("soundd", "selfdrive/ui/soundd", ["./soundd"], persistent=True),
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]),
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False),
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
#PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True),
# PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("pandad", "selfdrive.pandad", persistent=True),
PythonProcess("paramsd", "selfdrive.locationd.paramsd"),
PythonProcess("plannerd", "selfdrive.controls.plannerd"),
PythonProcess("radard", "selfdrive.controls.radard"),
PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True),
PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True),
#PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
#PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True),
# PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
# EON only
PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON),
PythonProcess("androidd", "selfdrive.hardware.eon.androidd", enabled=EON, persistent=True),
]
managed_processes = {p.name: p for p in procs}
| 61.304348 | 127 | 0.73617 |
794173600fbbbdbbcc3b051a208ac6f59f4d9338 | 8,856 | py | Python | pyatv/mrp/srp.py | ckeehan/pyatv | a3c0d572c60cf6fa562939a232165ed839cdc592 | [
"MIT"
] | null | null | null | pyatv/mrp/srp.py | ckeehan/pyatv | a3c0d572c60cf6fa562939a232165ed839cdc592 | [
"MIT"
] | null | null | null | pyatv/mrp/srp.py | ckeehan/pyatv | a3c0d572c60cf6fa562939a232165ed839cdc592 | [
"MIT"
] | null | null | null | """Prototype code for MRP."""
import os
import uuid
import binascii
import hashlib
import logging
from srptools import SRPContext, SRPClientSession, constants
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PrivateKey,
X25519PublicKey,
)
from pyatv import exceptions
from pyatv.support import log_binary
from pyatv.mrp import tlv8, chacha20
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Credentials:
"""Identifiers and encryption keys used by MRP."""
def __init__(self, ltpk, ltsk, atv_id, client_id):
"""Initialize a new Credentials."""
self.ltpk = ltpk
self.ltsk = ltsk
self.atv_id = atv_id
self.client_id = client_id
@classmethod
def parse(cls, detail_string):
"""Parse a string represention of Credentials."""
split = detail_string.split(":")
if len(split) != 4:
raise exceptions.InvalidCredentialsError(
"invalid credentials: " + detail_string
)
ltpk = binascii.unhexlify(split[0])
ltsk = binascii.unhexlify(split[1])
atv_id = binascii.unhexlify(split[2])
client_id = binascii.unhexlify(split[3])
return Credentials(ltpk, ltsk, atv_id, client_id)
def __str__(self):
"""Return a string representation of credentials."""
return "{0}:{1}:{2}:{3}".format(
binascii.hexlify(self.ltpk).decode("utf-8"),
binascii.hexlify(self.ltsk).decode("utf-8"),
binascii.hexlify(self.atv_id).decode("utf-8"),
binascii.hexlify(self.client_id).decode("utf-8"),
)
def hkdf_expand(salt, info, shared_secret):
"""Derive encryption keys from shared secret."""
hkdf = HKDF(
algorithm=hashes.SHA512(),
length=32,
salt=salt.encode(),
info=info.encode(),
backend=default_backend(),
)
return hkdf.derive(shared_secret)
# pylint: disable=too-many-instance-attributes
class SRPAuthHandler:
"""Handle SRP crypto routines for auth and key derivation."""
def __init__(self):
"""Initialize a new SRPAuthHandler."""
self.pairing_id = str(uuid.uuid4()).encode()
self._signing_key = None
self._auth_private = None
self._auth_public = None
self._verify_private = None
self._verify_public = None
self._public_bytes = None
self._session = None
self._shared = None
self._session_key = None
def initialize(self):
"""Initialize operation by generating new keys."""
self._signing_key = Ed25519PrivateKey.from_private_bytes(os.urandom(32))
self._auth_private = self._signing_key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption(),
)
self._auth_public = self._signing_key.public_key().public_bytes(
encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw
)
self._verify_private = X25519PrivateKey.from_private_bytes(os.urandom(32))
self._verify_public = self._verify_private.public_key()
self._public_bytes = self._verify_public.public_bytes(
encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw
)
return self._auth_public, self._public_bytes
def verify1(self, credentials, session_pub_key, encrypted):
"""First verification step."""
self._shared = self._verify_private.exchange(
X25519PublicKey.from_public_bytes(session_pub_key)
)
session_key = hkdf_expand(
"Pair-Verify-Encrypt-Salt", "Pair-Verify-Encrypt-Info", self._shared
)
chacha = chacha20.Chacha20Cipher(session_key, session_key)
decrypted_tlv = tlv8.read_tlv(
chacha.decrypt(encrypted, nounce="PV-Msg02".encode())
)
identifier = decrypted_tlv[tlv8.TLV_IDENTIFIER]
signature = decrypted_tlv[tlv8.TLV_SIGNATURE]
if identifier != credentials.atv_id:
raise exceptions.AuthenticationError("incorrect device response")
info = session_pub_key + bytes(identifier) + self._public_bytes
ltpk = Ed25519PublicKey.from_public_bytes(bytes(credentials.ltpk))
try:
ltpk.verify(bytes(signature), bytes(info))
except InvalidSignature as ex:
raise exceptions.AuthenticationError("signature error") from ex
device_info = self._public_bytes + credentials.client_id + session_pub_key
device_signature = Ed25519PrivateKey.from_private_bytes(credentials.ltsk).sign(
device_info
)
tlv = tlv8.write_tlv(
{
tlv8.TLV_IDENTIFIER: credentials.client_id,
tlv8.TLV_SIGNATURE: device_signature,
}
)
return chacha.encrypt(tlv, nounce="PV-Msg03".encode())
def verify2(self):
"""Last verification step.
The derived keys (output, input) are returned here.
"""
output_key = hkdf_expand(
"MediaRemote-Salt", "MediaRemote-Write-Encryption-Key", self._shared
)
input_key = hkdf_expand(
"MediaRemote-Salt", "MediaRemote-Read-Encryption-Key", self._shared
)
log_binary(_LOGGER, "Keys", Output=output_key, Input=input_key)
return output_key, input_key
def step1(self, pin):
"""First pairing step."""
context = SRPContext(
"Pair-Setup",
str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
)
self._session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()
)
def step2(self, atv_pub_key, atv_salt):
"""Second pairing step."""
pk_str = binascii.hexlify(atv_pub_key).decode()
salt = binascii.hexlify(atv_salt).decode()
self._session.process(pk_str, salt)
if not self._session.verify_proof(self._session.key_proof_hash):
raise exceptions.AuthenticationError("proofs do not match")
pub_key = binascii.unhexlify(self._session.public)
proof = binascii.unhexlify(self._session.key_proof)
log_binary(_LOGGER, "Client", Public=pub_key, Proof=proof)
return pub_key, proof
def step3(self):
"""Third pairing step."""
ios_device_x = hkdf_expand(
"Pair-Setup-Controller-Sign-Salt",
"Pair-Setup-Controller-Sign-Info",
binascii.unhexlify(self._session.key),
)
self._session_key = hkdf_expand(
"Pair-Setup-Encrypt-Salt",
"Pair-Setup-Encrypt-Info",
binascii.unhexlify(self._session.key),
)
device_info = ios_device_x + self.pairing_id + self._auth_public
device_signature = self._signing_key.sign(device_info)
tlv = tlv8.write_tlv(
{
tlv8.TLV_IDENTIFIER: self.pairing_id,
tlv8.TLV_PUBLIC_KEY: self._auth_public,
tlv8.TLV_SIGNATURE: device_signature,
}
)
chacha = chacha20.Chacha20Cipher(self._session_key, self._session_key)
encrypted_data = chacha.encrypt(tlv, nounce="PS-Msg05".encode())
log_binary(_LOGGER, "Data", Encrypted=encrypted_data)
return encrypted_data
def step4(self, encrypted_data):
"""Last pairing step."""
chacha = chacha20.Chacha20Cipher(self._session_key, self._session_key)
decrypted_tlv_bytes = chacha.decrypt(encrypted_data, nounce="PS-Msg06".encode())
if not decrypted_tlv_bytes:
raise exceptions.AuthenticationError("data decrypt failed")
decrypted_tlv = tlv8.read_tlv(decrypted_tlv_bytes)
_LOGGER.debug("PS-Msg06: %s", decrypted_tlv)
atv_identifier = decrypted_tlv[tlv8.TLV_IDENTIFIER]
atv_signature = decrypted_tlv[tlv8.TLV_SIGNATURE]
atv_pub_key = decrypted_tlv[tlv8.TLV_PUBLIC_KEY]
log_binary(
_LOGGER,
"Device",
Identifier=atv_identifier,
Signature=atv_signature,
Public=atv_pub_key,
)
# TODO: verify signature here
return Credentials(
atv_pub_key, self._auth_private, atv_identifier, self.pairing_id
)
| 34.325581 | 88 | 0.649729 |
7941740f130012b8fa85bb99db85cde619b8a773 | 732 | py | Python | src/train_toynetv1.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | 5 | 2021-06-09T02:11:19.000Z | 2021-10-04T09:00:31.000Z | src/train_toynetv1.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | null | null | null | src/train_toynetv1.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | null | null | null | import os
from common.trainer import getTrainComponents
from spectrainer import ToyNetTrainer
from toynet.toynetv1 import ToyNetV1
def post_script(post):
if post and os.path.exists(post):
with open(post) as f:
# use exec here since
# 1. `import` will excute the script at once
# 2. you can modify the script when training
exec(compile(f.read(), post, exec))
def main():
# for capability when spawn start
trainer, net, data = getTrainComponents(
ToyNetTrainer, ToyNetV1, "./config/toynetv1.yml"
)
trainer.fit(net, datamodule=data)
post = trainer.paths.get("post_training", "")
post_script(post)
if __name__ == "__main__":
main()
| 24.4 | 56 | 0.654372 |
79417477f3cf95c7879250f354aa67b01cd55b43 | 3,679 | py | Python | sysdescrparser/linux.py | rlaneyjr/sysdescrparser | 4ebe35a41f26da6ff3b9aaa69ba5a99cbbb4d33f | [
"MIT"
] | null | null | null | sysdescrparser/linux.py | rlaneyjr/sysdescrparser | 4ebe35a41f26da6ff3b9aaa69ba5a99cbbb4d33f | [
"MIT"
] | null | null | null | sysdescrparser/linux.py | rlaneyjr/sysdescrparser | 4ebe35a41f26da6ff3b9aaa69ba5a99cbbb4d33f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""sysdescrparser.linux."""
import re
from sysdescrparser.sysdescr import SysDescr
from sysdescrparser.utils import os_version_names_regex, extract_version_number
# pylint: disable=no-name-in-module
# pylint: disable=no-member
class Linux(SysDescr):
"""Class Linux.
This class is only for vendor definition.
"""
versions = {
"ubuntu_linux": {
"lucid": "10.04",
"precise": "12.04",
"trusty": "14.04",
"xenial": "16.04",
"bionic": "18.04",
},
"centos": {
"centos6": "6.0",
"el6": "6.0",
"centos7": "7.0",
"el7": "7.0",
"centos": "",
}
}
def __init__(self, raw):
"""Constructor."""
super(Linux, self).__init__(raw)
self.vendor = 'LINUX'
self.model = self.UNKNOWN
self.os = 'LINUX_KERNEL'
self.version = self.UNKNOWN
def parse(self):
"""Parsing for sysDescr value."""
version_name = ""
kernel_version = ""
regex = (r'^Linux\s+')
pat = re.compile(regex)
res = pat.search(self.raw)
if res:
regex = (r'\s+')
pat = re.compile(regex)
res = pat.split(self.raw)
if len(res) > 2:
kernel_version = res[2]
ubuntu_version_names_regex = os_version_names_regex(self.__class__.versions["ubuntu_linux"])
ubuntu_lts_match = re.search(
ubuntu_version_names_regex,
self.raw,
flags=re.IGNORECASE
)
ubuntu_main_version_name_match = re.search(
"ubuntu",
self.raw,
flags=re.IGNORECASE
)
centos_version_names_regex = os_version_names_regex(self.__class__.versions["centos"])
centos_match = re.search(
centos_version_names_regex,
self.raw,
flags=re.IGNORECASE
)
if ubuntu_lts_match or ubuntu_main_version_name_match:
self.vendor = "CANONICAL"
self.os = "UBUNTU_LINUX"
if not ubuntu_lts_match:
version_name = self.UNKNOWN
else:
version_name = ubuntu_lts_match.group()
if centos_match:
self.vendor = "CENTOS"
self.os = "CENTOS"
version_name = centos_match.group()
if version_name and version_name != 'UNKNOWN':
# make it lower cause our versions dict keys are lower
os_lookup_name = self.os.lower()
os_versions = self.__class__.versions[os_lookup_name]
version_number = extract_version_number(
os_versions,
version_name,
)
if version_number:
self.version = version_number
else:
self.version = self.UNKNOWN
self.model = kernel_version if kernel_version else self.UNKNOWN
elif version_name == 'UNKNOWN':
self.version = version_name
self.model = kernel_version if kernel_version else self.UNKNOWN
else:
self.version = kernel_version
return self
return False
| 30.658333 | 104 | 0.480837 |
7941755da32f42a1ca6f81054b51931c683a0784 | 4,510 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_service_tags_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_service_tags_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_service_tags_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations(object):
"""ServiceTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ServiceTagsListResult"
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceTagsListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
| 44.653465 | 139 | 0.68071 |
7941758530fc6ab5e11883f90e945e9f3109c57e | 23,808 | py | Python | tests/integration/states/test_pip_state.py | menglong81/salt | aeeb047f18bd41c31e6f184fbf2f4620f565aae7 | [
"Apache-2.0"
] | 1 | 2019-01-07T08:32:12.000Z | 2019-01-07T08:32:12.000Z | tests/integration/states/test_pip_state.py | menglong81/salt | aeeb047f18bd41c31e6f184fbf2f4620f565aae7 | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_pip_state.py | menglong81/salt | aeeb047f18bd41c31e6f184fbf2f4620f565aae7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio ([email protected])
tests.integration.states.pip_state
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import glob
import shutil
import sys
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import (
destructiveTest,
requires_system_grains,
with_system_user,
skip_if_not_root,
with_tempdir
)
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.versions
import salt.utils.win_dacl
import salt.utils.win_functions
import salt.utils.win_runas
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
def can_runas():
'''
Detect if we are running in a limited shell (winrm) and are un-able to use
the runas utility method.
'''
if salt.utils.platform.is_windows():
try:
salt.utils.win_runas.runas(
'cmd.exe /c echo 1', 'noexistuser', 'n0existp4ss',
)
except WindowsError as exc: # pylint: disable=undefined-variable
if exc.winerror == 5:
# Access Denied
return False
return True
CAN_RUNAS = can_runas()
class VirtualEnv(object):
def __init__(self, test, venv_dir):
self.venv_dir = venv_dir
self.test = test
def __enter__(self):
ret = self.test.run_function('virtualenv.create', [self.venv_dir])
self.test.assertEqual(ret['retcode'], 0)
def __exit__(self, exc_type, exc_value, traceback):
if os.path.isdir(self.venv_dir):
shutil.rmtree(self.venv_dir, ignore_errors=True)
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
@skip_if_not_root
def test_pip_installed_removed(self):
'''
Tests installed and removed states
'''
name = 'pudb'
if name in self.run_function('pip.list'):
self.skipTest('{0} is already installed, uninstall to run this test'.format(name))
ret = self.run_state('pip.installed', name=name)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name)
self.assertSaltTrueReturn(ret)
def test_pip_installed_removed_venv(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip_installed_removed'
)
with VirtualEnv(self, venv_dir):
name = 'pudb'
ret = self.run_state('pip.installed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
def test_pip_installed_errors(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-errors'
)
orig_shell = os.environ.get('SHELL')
try:
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
# Example error strings:
# * "Error installing 'pep8': /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/sh: 1: /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/bash: /tmp/pip-installed-errors: No such file or directory"
os.environ['SHELL'] = '/bin/sh'
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltFalseReturn(ret)
self.assertSaltCommentRegexpMatches(
ret,
'Error installing \'pep8\':'
)
# We now create the missing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# The state should not have any issues running now
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltTrueReturn(ret)
finally:
if orig_shell is None:
# Didn't exist before, don't leave it there. This should never
# happen, but if it does, we don't want this test to affect
# others elsewhere in the suite.
os.environ.pop('SHELL')
else:
os.environ['SHELL'] = orig_shell
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(six.PY3, 'Issue is specific to carbon module, which is PY2-only')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
@requires_system_grains
def test_pip_installed_weird_install(self, grains=None):
# First, check to see if this is running on CentOS 5 or MacOS.
# If so, skip this test.
if grains['os'] in ('CentOS',) and grains['osrelease_info'][0] in (5,):
self.skipTest('This test does not run reliably on CentOS 5')
if grains['os'] in ('MacOS',):
self.skipTest('This test does not run reliably on MacOS')
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-weird-install')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='pip-installed-weird-install'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.1':
continue
self.assertEqual(
ret[key]['comment'],
'There was no error installing package \'carbon < 1.1\' '
'although it does not show when calling \'pip.freeze\'.'
)
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
def test_issue_2028_pip_installed_state(self):
ret = self.run_function('state.sls', mods='issue-2028-pip-installed')
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2028-pip-installed'
)
pep8_bin = os.path.join(venv_dir, 'bin', 'pep8')
if salt.utils.platform.is_windows():
pep8_bin = os.path.join(venv_dir, 'Scripts', 'pep8.exe')
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(pep8_bin)
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_2087_missing_pip(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2087-missing-pip'
)
try:
# Let's create the testing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# Let's remove the pip binary
pip_bin = os.path.join(venv_dir, 'bin', 'pip')
site_dir = self.run_function('virtualenv.get_distribution_path', [venv_dir, 'pip'])
if salt.utils.platform.is_windows():
pip_bin = os.path.join(venv_dir, 'Scripts', 'pip.exe')
site_dir = os.path.join(venv_dir, 'lib', 'site-packages')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Also remove the pip dir from site-packages
# This is needed now that we're using python -m pip instead of the
# pip binary directly. python -m pip will still work even if the
# pip binary is missing
shutil.rmtree(os.path.join(site_dir, 'pip'))
# Let's run the state which should fail because pip is missing
ret = self.run_function('state.sls', mods='issue-2087-missing-pip')
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
'Error installing \'pep8\': Could not find a `pip` binary',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_5940_multiple_pip_mirrors(self):
'''
Test multiple pip mirrors. This test only works with pip < 7.0.0
'''
ret = self.run_function(
'state.sls', mods='issue-5940-multiple-pip-mirrors'
)
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '5940-multiple-pip-mirrors'
)
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(os.path.join(venv_dir, 'bin', 'pep8'))
)
except (AssertionError, CommandExecutionError):
pip_version = self.run_function('pip.version', [venv_dir])
if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'):
self.skipTest('the --mirrors arg has been deprecated and removed in pip==7.0.0')
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@destructiveTest
@skip_if_not_root
@skipIf(not CAN_RUNAS, 'Runas support required')
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('Failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install passing the package name in `name`
ret = self.run_state(
'pip.installed', name='pep8', user=username, bin_env=venv_dir,
password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
@destructiveTest
@skip_if_not_root
@skipIf(salt.utils.platform.is_darwin(), 'Test is flaky on macosx')
@skipIf(not CAN_RUNAS, 'Runas support required')
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner_requirements_file(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment again as it should have been removed
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install using a requirements file
req_filename = os.path.join(
RUNTIME_VARS.TMP_STATE_TREE, 'issue-6912-requirements.txt'
)
with salt.utils.files.fopen(req_filename, 'wb') as reqf:
reqf.write(b'pep8\n')
ret = self.run_state(
'pip.installed', name='', user=username, bin_env=venv_dir,
requirements='salt://issue-6912-requirements.txt',
password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
def test_issue_6833_pip_upgrade_pip(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '6833-pip-upgrade-pip'
)
ret = self.run_function('virtualenv.create', [venv_dir])
try:
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'New python executable',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's install a fixed version pip over whatever pip was
# previously installed
ret = self.run_function(
'pip.install', ['pip==8.0'], upgrade=True,
bin_env=venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'Successfully installed pip',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's make sure we have pip 8.0 installed
self.assertEqual(
self.run_function('pip.list', ['pip'], bin_env=venv_dir),
{'pip': '8.0.0'}
)
# Now the actual pip upgrade pip test
ret = self.run_state(
'pip.installed', name='pip==8.0.1', upgrade=True,
bin_env=venv_dir
)
try:
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(
ret, {'pip==8.0.1': 'Installed'})
except AssertionError:
import pprint
pprint.pprint(ret)
raise
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_pip_installed_specific_env(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-specific-env'
)
# Let's write a requirements file
requirements_file = os.path.join(
RUNTIME_VARS.TMP_PRODENV_STATE_TREE, 'prod-env-requirements.txt'
)
with salt.utils.files.fopen(requirements_file, 'wb') as reqf:
reqf.write(b'pep8\n')
try:
self.run_function('virtualenv.create', [venv_dir])
# The requirements file should not be found the base environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
"'salt://prod-env-requirements.txt' not found", ret
)
# The requirements file must be found in the prod environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir, saltenv='prod',
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Successfully processed requirements file '
'salt://prod-env-requirements.txt', ret
)
# We're using the base environment but we're passing the prod
# environment as an url arg to salt://
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt?saltenv=prod'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Requirements were already installed.',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
if os.path.isfile(requirements_file):
os.unlink(requirements_file)
def test_22359_pip_installed_unless_does_not_trigger_warnings(self):
# This test case should be moved to a format_call unit test specific to
# the state internal keywords
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-unless')
venv_create = self.run_function('virtualenv.create', [venv_dir])
if venv_create['retcode'] > 0:
self.skipTest(
'Failed to create testcase virtual environment: {0}'.format(
venv_create
)
)
false_cmd = '/bin/false'
if salt.utils.platform.is_windows():
false_cmd = 'exit 1 >nul'
try:
ret = self.run_state(
'pip.installed', name='pep8', bin_env=venv_dir, unless=false_cmd
)
self.assertSaltTrueReturn(ret)
self.assertNotIn('warnings', next(six.itervalues(ret)))
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(sys.version_info[:2] >= (3, 6), 'Old version of virtualenv too old for python3.6')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
def test_46127_pip_env_vars(self):
'''
Test that checks if env_vars passed to pip.installed are also passed
to pip.freeze while checking for existing installations
'''
# This issue is most easily checked while installing carbon
# Much of the code here comes from the test_weird_install function above
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'issue-46127-pip-env-vars')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
'All packages were successfully installed'
)
break
else:
raise Exception('Expected state did not run')
# Run the state again. Now the already installed message should
# appear
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
# As we are re-running the formula, some states will not be run
# and "name" may or may not be present, so we use .get() pattern
if ret[key].get('name', '') != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
('All packages were successfully installed'))
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
| 39.417219 | 107 | 0.573589 |
7941775e22304f77986be7519e8dcfc38f796526 | 1,072 | py | Python | mg/main/learn.py | matomatical/memograph | 0560e246cc9485c0d1792836d4a52db65a3354ad | [
"MIT"
] | 13 | 2020-11-26T07:23:16.000Z | 2022-02-20T05:46:34.000Z | mg/main/learn.py | matomatical/memograph | 0560e246cc9485c0d1792836d4a52db65a3354ad | [
"MIT"
] | null | null | null | mg/main/learn.py | matomatical/memograph | 0560e246cc9485c0d1792836d4a52db65a3354ad | [
"MIT"
] | 1 | 2021-02-19T08:49:24.000Z | 2021-02-19T08:49:24.000Z | import random
from mg.io import print, input
def run_learn(graph, options):
# decide which links to introduce
print("introduce some new links...")
hand = graph.query(
number=options.num_cards,
topics=options.topics,
new=True
)
n = len(hand)
if n == 0:
print("no new links! try drilling some old ones.")
return
random.shuffle(hand)
# introduce the links
for i, link in enumerate(hand, 1):
print(f"<bold>**<reset> learn {i}/{n} <bold>**<reset>")
face, back = link.u, link.v
if link.t: print("topics:", link.t)
print("prompt:", face.label())
face.media()
input("return:")
print("answer:", back.label())
back.media()
instructions = "easy (g+↵) | medium (↵) | hard (h+↵)"
rating = input("rating:", r=instructions)
if rating == "g":
link.m.init([1, 1, 2*24*60*60])
elif rating == "h":
link.m.init([1, 1, 1*60])
else:
link.m.init([1, 1, 1*60*60])
| 28.210526 | 63 | 0.521455 |
7941779784a4d23501bab7749349c2ede4e3febd | 89,939 | py | Python | tests/pipeline/test_blaze.py | leonarduschen/zipline | 5e6c9fce7e0f812bd181024ad192ca2976d49667 | [
"Apache-2.0"
] | 14,525 | 2015-01-01T02:57:52.000Z | 2022-03-31T18:16:35.000Z | tests/pipeline/test_blaze.py | leonarduschen/zipline | 5e6c9fce7e0f812bd181024ad192ca2976d49667 | [
"Apache-2.0"
] | 2,146 | 2015-01-01T13:03:44.000Z | 2022-02-22T03:25:28.000Z | tests/pipeline/test_blaze.py | leonarduschen/zipline | 5e6c9fce7e0f812bd181024ad192ca2976d49667 | [
"Apache-2.0"
] | 4,517 | 2015-01-01T14:26:47.000Z | 2022-03-31T14:38:05.000Z | """
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta, time
from functools import partial
from itertools import product, chain
from unittest import skipIf
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
from odo import odo
import pandas as pd
import pytz
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.assets.synthetic import make_simple_equity_info
from zipline.errors import UnsupportedPipelineOutput
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn, Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoMetaDataWarning,
)
from zipline.pipeline.loaders.blaze.core import (
ExprData,
NonPipelineField,
)
from zipline.testing import (
ZiplineTestCase,
parameter_space,
tmp_asset_finder,
)
from zipline.testing.fixtures import WithAssetFinder
from zipline.testing.predicates import (
assert_equal,
assert_frame_equal,
assert_isidentical,
)
from zipline.utils.numpy_utils import float64_dtype, int64_dtype
from zipline.utils.pandas_utils import empty_dataframe, new_pandas, \
skip_pipeline_new_pandas
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_equity_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_equity_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
simple_asset_info = asset_infos[0][0]
def with_extra_sid():
return parameterized.expand(asset_infos)
def with_ignore_sid():
return parameterized.expand(
product(chain.from_iterable(asset_infos), [True, False])
)
class BlazeToPipelineTestCase(WithAssetFinder, ZiplineTestCase):
START_DATE = pd.Timestamp(0)
END_DATE = pd.Timestamp('2015')
@classmethod
def init_class_fixtures(cls):
super(BlazeToPipelineTestCase, cls).init_class_fixtures()
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
cls.asof_dates = asof_dates = dates - pd.Timedelta(days=1)
cls.timestamps = timestamps = dates - pd.Timedelta(hours=1)
cls.df = df = pd.DataFrame({
'sid': cls.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0., 1., 2., 1., 2., 3., 2., 3., 4.),
'int_value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': asof_dates.repeat(3),
'timestamp': timestamps.repeat(3),
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
int_value: ?int64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
cls.missing_values = {'int_value': 0}
cls.value_dshape = dshape("""var * {
sid: ?int64,
value: float64,
asof_date: datetime,
timestamp: datetime,
}""")
def create_domain(self,
sessions,
data_query_time=time(0, 0, tzinfo=pytz.utc),
data_query_date_offset=0):
if sessions.tz is None:
sessions = sessions.tz_localize('UTC')
return EquitySessionDomain(
sessions,
country_code=self.ASSET_FINDER_COUNTRY_CODE,
data_query_time=data_query_time,
data_query_date_offset=data_query_date_offset,
)
def test_tabular(self):
name = 'expr'
expr = bz.data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertIs(ds.value.dtype, float64_dtype)
self.assertIs(ds.int_value.dtype, int64_dtype)
self.assertTrue(np.isnan(ds.value.missing_value))
self.assertEqual(ds.int_value.missing_value, 0)
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertIs(value.dtype, float64_dtype)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_missing_timestamp(self):
expr = bz.data(
self.df.loc[:, ['sid', 'value', 'asof_date']],
name='expr',
dshape="""var * {
sid: int64,
value: float64,
asof_date: datetime,
}""",
)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
self.assertEqual(len(loader), 2) # added the two columns
for column in ds.columns:
exprdata = loader[column]
assert_isidentical(
exprdata.expr,
bz.transform(expr, timestamp=expr.asof_date),
)
def test_from_blaze_no_resources_dataset_expr(self):
expr = bz.symbol('expr', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'no resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_no_resources_metadata_expr(self, metadata):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
metadata_expr = bz.symbol('metadata', self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'no resources provided to compute %s' % metadata,
)
def test_from_blaze_mixed_resources_dataset_expr(self):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute expr',
)
@parameter_space(metadata={'deltas', 'checkpoints'})
def test_from_blaze_mixed_resources_metadata_expr(self, metadata):
expr = bz.symbol('expr', self.dshape)
metadata_expr = bz.data(self.df, name=metadata, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
resources={metadata_expr: self.df},
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
**{metadata: metadata_expr}
)
assert_equal(
str(e.exception),
'explicit and implicit resources provided to compute %s' %
metadata,
)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'raise'))
m = {'ds': self.df}
if deltas:
m['ds_deltas'] = pd.DataFrame(columns=self.df.columns),
if checkpoints:
m['ds_checkpoints'] = pd.DataFrame(columns=self.df.columns),
expr = bz.data(
m,
dshape=var * Record((k, self.dshape.measure) for k in m),
)
loader = BlazeLoader()
ds = from_blaze(
expr.ds,
loader=loader,
missing_values=self.missing_values,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertEqual(len(loader), 3) # added the three columns
for column in ds.columns:
exprdata = loader[column]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
if deltas:
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
else:
self.assertIsNone(exprdata.deltas)
if checkpoints:
self.assertTrue(
exprdata.checkpoints.isidentical(expr.ds_checkpoints),
)
else:
self.assertIsNone(exprdata.checkpoints)
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_warn(self, deltas, checkpoints):
select_level = op.getitem(('ignore', 'warn'))
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
missing_values=self.missing_values,
)
self.assertEqual(len(ws), deltas + checkpoints)
for w in ws:
w = w.message
self.assertIsInstance(w, NoMetaDataWarning)
self.assertIn(str(expr), str(w))
@parameter_space(deltas={True, False}, checkpoints={True, False})
def test_auto_metadata_fail_raise(self, deltas, checkpoints):
if not (deltas or checkpoints):
# not a real case
return
select_level = op.getitem(('ignore', 'raise'))
loader = BlazeLoader()
expr = bz.data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule=select_level(deltas),
no_checkpoints_rule=select_level(checkpoints),
)
self.assertIn(str(expr), str(e.exception))
def test_non_pipeline_field(self):
expr = bz.data(
[],
dshape="""
var * {
a: complex,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_cols_with_all_missing_vals(self):
"""
Tests that when there is no known data, we get output where the
columns have the right dtypes and the right missing values filled in.
input (self.df):
Empty DataFrame
Columns: [sid, float_value, str_value, int_value, bool_value, dt_value,
asof_date, timestamp]
Index: []
output (expected)
str_value float_value int_value
2014-01-01 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-02 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
2014-01-03 Equity(65 [A]) None NaN 0
Equity(66 [B]) None NaN 0
Equity(67 [C]) None NaN 0
dt_value bool_value
2014-01-01 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-02 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
2014-01-03 Equity(65 [A]) NaT False
Equity(66 [B]) NaT False
Equity(67 [C]) NaT False
"""
df = empty_dataframe(
('sid', 'int64'),
('float_value', 'float64'),
('str_value', 'object'),
('int_value', 'int64'),
('bool_value', 'bool'),
('dt_value', 'datetime64[ns]'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame({
"str_value": np.array([None,
None,
None,
None,
None,
None,
None,
None,
None],
dtype='object'),
"float_value": np.array([np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN,
np.NaN],
dtype='float64'),
"int_value": np.array([0,
0,
0,
0,
0,
0,
0,
0,
0],
dtype='int64'),
"bool_value": np.array([False,
False,
False,
False,
False,
False,
False,
False,
False],
dtype='bool'),
"dt_value": [pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT],
},
columns=['str_value', 'float_value', 'int_value', 'bool_value',
'dt_value'],
index=pd.MultiIndex.from_product(
(self.dates.tz_localize('UTC'), self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
))
)
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('float_value', 'str_value', 'int_value', 'bool_value',
'dt_value'),
)
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_cols_with_some_missing_vals(self):
"""
Tests the following:
1) Forward filling replaces missing values correctly for the data
types supported in pipeline.
2) We don't forward fill when the missing value is the actual value
we got for a date in the case of int/bool columns.
3) We get the correct type of missing value in the output.
input (self.df):
asof_date bool_value dt_value float_value int_value sid
0 2014-01-01 True 2011-01-01 0 1 65
1 2014-01-03 True 2011-01-02 1 2 66
2 2014-01-01 True 2011-01-03 2 3 67
3 2014-01-02 False NaT NaN 0 67
str_value timestamp
0 a 2014-01-01
1 b 2014-01-03
2 c 2014-01-01
3 None 2014-01-02
output (expected)
str_value float_value int_value bool_value
2014-01-01 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 3 True
2014-01-02 Equity(65 [A]) a 0 1 True
Equity(66 [B]) None NaN 0 False
Equity(67 [C]) c 2 0 False
2014-01-03 Equity(65 [A]) a 0 1 True
Equity(66 [B]) b 1 2 True
Equity(67 [C]) c 2 0 False
dt_value
2014-01-01 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-02 Equity(65 [A]) 2011-01-01
Equity(66 [B]) NaT
Equity(67 [C]) 2011-01-03
2014-01-03 Equity(65 [A]) 2011-01-01
Equity(66 [B]) 2011-01-02
Equity(67 [C]) 2011-01-03
"""
dates = pd.Index([
self.dates[0],
self.dates[-1],
self.dates[0],
self.dates[1],
])
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS[:-1] +
(self.ASSET_FINDER_EQUITY_SIDS[-1],) * 2,
'float_value': (0., 1., 2., np.NaN),
'str_value': ('a', 'b', 'c', None),
'cat_value': pd.Categorical(
values=['a', 'b', 'c', None],
categories=['a', 'b', 'c', None],
),
'int_value': (1, 2, 3, 0),
'bool_value': (True, True, True, False),
'dt_value': (pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.NaT),
'asof_date': dates - pd.Timedelta(days=2),
'timestamp': dates - pd.Timedelta(days=1),
})
expr = bz.data(
df,
dshape="""
var * {
sid: int64,
float_value: float64,
str_value: string,
cat_value: string,
int_value: int64,
bool_value: bool,
dt_value: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
fields = OrderedDict(expr.dshape.measure.fields)
expected = pd.DataFrame(
{
'str_value': np.array(
['a', None, 'c', 'a', None, 'c', 'a', 'b', 'c'],
dtype='object',
),
'cat_value': np.array(
['a', None, 'c', 'a', None, 'c', 'a', 'b', 'c'],
dtype='object',
),
'float_value': np.array(
[0, np.NaN, 2, 0, np.NaN, 2, 0, 1, 2],
dtype='float64',
),
'int_value': np.array(
[1, 0, 3, 1, 0, 3, 1, 2, 3],
dtype='int64',
),
'bool_value': np.array(
[True, False, True, True, False, False, True, True, False],
dtype='bool',
),
'dt_value': [
pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
],
},
columns=[
'str_value',
'cat_value',
'float_value',
'int_value',
'bool_value',
'dt_value',
],
index=pd.MultiIndex.from_product((
self.dates.tz_localize('UTC'),
self.asset_finder.retrieve_all(self.ASSET_FINDER_EQUITY_SIDS),
)),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
expected.columns,
)
def test_complex_expr(self):
expr = bz.data(self.df, dshape=self.dshape, name='expr')
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# test that we can have complex expressions with no metadata
from_blaze(
expr_with_add,
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
checkpoints=None,
loader=self.garbage_loader,
missing_values=self.missing_values,
no_checkpoints_rule='ignore',
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
deltas = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='deltas',
)
checkpoints = bz.data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
name='checkpoints',
)
# test that we can have complex expressions with explicit metadata
from_blaze(
expr_with_add,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
with self.assertRaises(TypeError) as e:
# test that we cannot create a single column from a non field
# even with explicit metadata
from_blaze(
expr.value + 1,
deltas=deltas,
checkpoints=checkpoints,
loader=self.garbage_loader,
missing_values=self.missing_values,
)
assert_equal(
str(e.exception),
"expression 'expr.value + 1' was array-like but not a simple field"
" of some larger table",
)
def _test_id(self, df, dshape, expected, finder, add):
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
domain = self.create_domain(self.dates)
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
domain=domain
)
p = Pipeline(domain=domain)
for a in add:
p.add(getattr(ds, a).latest, a)
dates = self.dates
result = SimplePipelineEngine(
loader, finder,
).run_pipeline(p, dates[0], dates[-1])
assert_frame_equal(
result.sort_index(axis=1),
expected.sort_index(axis=1),
check_dtype=False,
)
def _test_id_macro(self, df, dshape, expected, finder, add, dates=None):
if dates is None:
dates = self.dates
expr = bz.data(df, name='expr', dshape=dshape)
loader = BlazeLoader()
domain = self.create_domain(dates)
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
domain=domain,
)
p = Pipeline(domain=domain)
macro_inputs = []
for column_name in add:
column = getattr(ds, column_name)
macro_inputs.append(column)
with self.assertRaises(UnsupportedPipelineOutput):
# Single column output terms cannot be added to a pipeline.
p.add(column.latest, column_name)
class UsesMacroInputs(CustomFactor):
inputs = macro_inputs
window_length = 1
def compute(self, today, assets, out, *inputs):
e = expected.loc[today]
for i, input_ in enumerate(inputs):
# Each macro input should only have one column.
assert_equal(input_.shape, (self.window_length, 1))
assert_equal(input_[0, 0], e[i])
# Run the pipeline with our custom factor. Assertions about the
# expected macro data are made in the `compute` function of our custom
# factor above.
p.add(UsesMacroInputs(), 'uses_macro_inputs')
engine = SimplePipelineEngine(loader, finder)
engine.run_pipeline(p, dates[0], dates[-1])
def test_custom_query_time_tz(self):
"""
input (df):
asof_date int_value sid timestamp value
0 2013-12-31 0 65 2014-01-01 13:44:00 0.0
1 2013-12-31 1 66 2014-01-01 13:44:00 1.0
2 2013-12-31 2 67 2014-01-01 13:44:00 2.0
3 2013-12-31 1 65 2014-01-01 13:45:00 1.0
4 2013-12-31 2 66 2014-01-01 13:45:00 2.0
5 2013-12-31 3 67 2014-01-01 13:45:00 3.0
6 2014-01-02 2 65 2014-01-03 13:44:00 2.0
7 2014-01-02 3 66 2014-01-03 13:44:00 3.0
8 2014-01-02 4 67 2014-01-03 13:44:00 4.0
output (expected):
int_value value
2014-01-01 00:00:00+00:00 Equity(65 [A]) 0 0.0
Equity(66 [B]) 1 1.0
Equity(67 [C]) 2 2.0
2014-01-02 00:00:00+00:00 Equity(65 [A]) 1 1.0
Equity(66 [B]) 2 2.0
Equity(67 [C]) 3 3.0
2014-01-03 00:00:00+00:00 Equity(65 [A]) 2 2.0
Equity(66 [B]) 3 3.0
Equity(67 [C]) 4 4.0
"""
df = self.df.copy()
df['timestamp'] = (
pd.DatetimeIndex(df['timestamp'], tz='EST') +
timedelta(hours=8, minutes=44)
).tz_convert('utc').tz_localize(None)
df.ix[3:5, 'timestamp'] = pd.Timestamp('2014-01-01 13:45')
expr = bz.data(df, name='expr', dshape=self.dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
domain=self.create_domain(
self.dates,
data_query_time=time(8, 45, tzinfo=pytz.timezone('EST')),
),
)
p = Pipeline()
p.add(ds.value.latest, 'value')
p.add(ds.int_value.latest, 'int_value')
result = SimplePipelineEngine(
loader, self.asset_finder,
).run_pipeline(p, self.dates[0], self.dates[-1])
expected = df.drop('asof_date', axis=1)
expected['timestamp'] = expected['timestamp'].dt.normalize().astype(
'datetime64[ns]',
).dt.tz_localize('utc')
expected.ix[3:5, 'timestamp'] += timedelta(days=1)
expected.set_index(['timestamp', 'sid'], inplace=True)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
self.asset_finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id(self):
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
int_value value
2014-01-01 Equity(65 [A]) 0 0
Equity(66 [B]) 1 1
Equity(67 [C]) 2 2
2014-01-02 Equity(65 [A]) 1 1
Equity(66 [B]) 2 2
Equity(67 [C]) 3 3
2014-01-03 Equity(65 [A]) 2 2
Equity(66 [B]) 3 3
Equity(67 [C]) 4 4
"""
expected = self.df.drop(['timestamp', 'asof_date', 'sid'], axis=1)
expected.index = pd.MultiIndex.from_product((
self.dates.tz_localize('UTC'),
self.asset_finder.retrieve_all(self.asset_finder.sids),
))
self._test_id(
self.df,
self.dshape,
expected,
self.asset_finder,
('int_value', 'value',)
)
def test_id_with_asof_date(self):
"""
input (self.df):
asof_date sid timestamp int_value value
0 2014-01-01 65 2014-01-01 0 0
1 2014-01-01 66 2014-01-01 1 1
2 2014-01-01 67 2014-01-01 2 2
3 2014-01-02 65 2014-01-02 1 1
4 2014-01-02 66 2014-01-02 2 2
5 2014-01-02 67 2014-01-02 3 3
6 2014-01-03 65 2014-01-03 2 2
7 2014-01-03 66 2014-01-03 3 3
8 2014-01-03 67 2014-01-03 4 4
output (expected)
asof_date
2014-01-01 Equity(65 [A]) 2014-01-01
Equity(66 [B]) 2014-01-01
Equity(67 [C]) 2014-01-01
2014-01-02 Equity(65 [A]) 2014-01-02
Equity(66 [B]) 2014-01-02
Equity(67 [C]) 2014-01-02
2014-01-03 Equity(65 [A]) 2014-01-03
Equity(66 [B]) 2014-01-03
Equity(67 [C]) 2014-01-03
"""
expected = self.df.drop(
['timestamp', 'sid', 'value', 'int_value'],
axis=1,
)
expected.index = pd.MultiIndex.from_product((
self.dates.tz_localize('UTC'),
self.asset_finder.retrieve_all(self.asset_finder.sids),
))
self._test_id(
self.df,
self.dshape,
expected,
self.asset_finder,
('asof_date',)
)
def test_id_ffill_out_of_window(self):
"""
input (df):
asof_date timestamp sid other value
0 2013-12-22 2013-12-22 65 0 0
1 2013-12-22 2013-12-22 66 NaN 1
2 2013-12-22 2013-12-22 67 2 NaN
3 2013-12-23 2013-12-23 65 NaN 1
4 2013-12-23 2013-12-23 66 2 NaN
5 2013-12-23 2013-12-23 67 3 3
6 2013-12-24 2013-12-24 65 2 NaN
7 2013-12-24 2013-12-24 66 3 3
8 2013-12-24 2013-12-24 67 NaN 4
output (expected):
other value
2014-01-01 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-02 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 1
Equity(66 [B]) 3 3
Equity(67 [C]) 3 4
"""
dates = self.dates.repeat(3) - timedelta(days=10)
df = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 3,
'value': (0, 1, np.nan, 1, np.nan, 3, np.nan, 3, 4),
'other': (0, np.nan, 2, np.nan, 2, 3, 2, 3, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
np.array([[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4],
[2, 1],
[3, 3],
[3, 4]]),
columns=['other', 'value'],
index=pd.MultiIndex.from_product(
(self.dates.tz_localize('UTC'), self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_multiple_columns(self):
"""
input (df):
asof_date sid timestamp value other
0 2014-01-01 65 2014-01-01 0 1
1 2014-01-01 66 2014-01-01 1 2
2 2014-01-01 67 2014-01-01 2 3
3 2014-01-02 65 2014-01-02 1 2
4 2014-01-02 66 2014-01-02 2 3
5 2014-01-02 67 2014-01-02 3 4
6 2014-01-03 65 2014-01-03 2 3
7 2014-01-03 66 2014-01-03 3 4
8 2014-01-03 67 2014-01-03 4 5
output (expected):
value other
2014-01-01 Equity(65 [A]) 0 1
Equity(66 [B]) 1 2
Equity(67 [C]) 2 3
2014-01-02 Equity(65 [A]) 1 2
Equity(66 [B]) 2 3
Equity(67 [C]) 3 4
2014-01-03 Equity(65 [A]) 2 3
Equity(66 [B]) 3 4
Equity(67 [C]) 4 5
"""
df = self.df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = df.drop(['timestamp', 'asof_date', 'sid'], axis=1)
expected.index = pd.MultiIndex.from_product((
self.dates.tz_localize('UTC'),
self.asset_finder.retrieve_all(self.asset_finder.sids),
))
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'int_value', 'other'),
)
def test_id_macro_dataset(self):
"""
input (self.macro_df)
asof_date timestamp value
0 2014-01-01 2014-01-01 0
3 2014-01-02 2014-01-02 1
6 2014-01-03 2014-01-03 2
output (expected):
value
2014-01-01 0
2014-01-02 1
2014-01-03 2
"""
expected = pd.DataFrame(
data=[[0],
[1],
[2]],
columns=['value'],
index=self.dates,
)
self._test_id_macro(
self.macro_df,
self.macro_dshape,
expected,
self.asset_finder,
('value',),
)
def test_id_ffill_out_of_window_macro_dataset(self):
"""
input (df):
asof_date timestamp other value
0 2013-12-22 2013-12-22 NaN 0
1 2013-12-23 2013-12-23 1 NaN
2 2013-12-24 2013-12-24 NaN NaN
output (expected):
other value
2014-01-01 1 0
2014-01-02 1 0
2014-01-03 1 0
"""
dates = self.dates - timedelta(days=10)
df = pd.DataFrame({
'value': (0, np.nan, np.nan),
'other': (np.nan, 1, np.nan),
'asof_date': dates,
'timestamp': dates,
})
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[0, 1],
[0, 1],
[0, 1]],
columns=['other', 'value'],
index=self.dates.tz_localize('UTC'),
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_macro_dataset_multiple_columns(self):
"""
input (df):
asof_date timestamp other value
0 2014-01-01 2014-01-01 1 0
3 2014-01-02 2014-01-02 2 1
6 2014-01-03 2014-01-03 3 2
output (expected):
other value
2014-01-01 1 0
2014-01-02 2 1
2014-01-03 3 2
"""
df = self.macro_df.copy()
df['other'] = df.value + 1
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected = pd.DataFrame(
data=[[0, 1],
[1, 2],
[2, 3]],
columns=['value', 'other'],
index=self.dates,
dtype=np.float64,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
finder,
('value', 'other'),
)
def test_id_take_last_in_group(self):
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'sid', 'other', 'value'],
data=[
[T('2013-12-31'), T('2013-12-31 22'), 65, 0, 0],
[T('2013-12-31'), T('2013-12-31 23'), 65, 1, np.nan],
[T('2013-12-31'), T('2013-12-31 22'), 66, np.nan, np.nan],
[T('2013-12-31'), T('2013-12-31 23'), 66, np.nan, 1],
[T('2013-12-31'), T('2013-12-31 22'), 67, 2, np.nan],
[T('2013-12-31'), T('2013-12-31 23'), 67, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 22'), 65, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 23'), 65, np.nan, 1],
[T('2014-01-01'), T('2014-01-01 22'), 66, np.nan, np.nan],
[T('2014-01-01'), T('2014-01-01 23'), 66, 2, np.nan],
[T('2014-01-01'), T('2014-01-01 22'), 67, 3, 3],
[T('2014-01-01'), T('2014-01-01 23'), 67, 3, 3],
[T('2014-01-02'), T('2014-01-02 22'), 65, 2, np.nan],
[T('2014-01-02'), T('2014-01-02 23'), 65, 2, np.nan],
[T('2014-01-02'), T('2014-01-02 22'), 66, 3, 3],
[T('2014-01-02'), T('2014-01-02 23'), 66, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 22'), 67, np.nan, np.nan],
[T('2014-01-02'), T('2014-01-02 23'), 67, np.nan, 4],
],
)
fields = OrderedDict(self.dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
columns=['other', 'value'],
data=[
[1, 0], # 2014-01-01 Equity(65 [A])
[np.nan, 1], # Equity(66 [B])
[2, np.nan], # Equity(67 [C])
[1, 1], # 2014-01-02 Equity(65 [A])
[2, 1], # Equity(66 [B])
[3, 3], # Equity(67 [C])
[2, 1], # 2014-01-03 Equity(65 [A])
[3, 3], # Equity(66 [B])
[3, 4], # Equity(67 [C])
],
index=pd.MultiIndex.from_product(
(self.dates.tz_localize('UTC'), self.asset_finder.retrieve_all(
self.ASSET_FINDER_EQUITY_SIDS
)),
),
)
self._test_id(
df,
var * Record(fields),
expected,
self.asset_finder,
('value', 'other'),
)
def test_id_take_last_in_group_macro(self):
"""
output (expected):
other value
2014-01-01 NaN 2
2014-01-02 1 3
2014-01-03 2 3
"""
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
[T('2013-12-31'), T('2013-12-31 01'), np.nan, 1],
[T('2013-12-31'), T('2013-12-31 02'), np.nan, 2],
[T('2014-01-01'), T('2014-01-01 01'), 1, np.nan],
[T('2014-01-01'), T('2014-01-01 02'), np.nan, 3],
[T('2014-01-02'), T('2014-01-02 01'), 2, np.nan],
[T('2014-01-02'), T('2014-01-02 02'), np.nan, np.nan],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, 2], # 2014-01-01
[1, 3], # 2014-01-02
[2, 3]], # 2014-01-03
columns=['other', 'value'],
index=self.dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
)
def _run_pipeline(self,
expr,
deltas,
checkpoints,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn=None):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
checkpoints,
loader=loader,
no_deltas_rule='raise',
no_checkpoints_rule='ignore',
missing_values=self.missing_values,
domain=self.create_domain(calendar),
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
if compute_fn is None:
self.assertIsNone(
expected_output,
'expected_output must be None if compute_fn is None',
)
def compute_fn(data):
return data[0]
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(
data,
expected_views[today],
err_msg=str(today),
)
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader, finder,
).run_pipeline(p, start, end)
if expected_output is not None:
assert_frame_equal(
result,
expected_output,
check_dtype=False,
)
@with_ignore_sid()
def test_deltas(self, asset_info, add_extra_sid):
df = self.df.copy()
if add_extra_sid:
extra_sid_df = pd.DataFrame({
'asof_date': self.asof_dates,
'timestamp': self.timestamps,
'sid': (ord('E'),) * 3,
'value': (3., 4., 5.,),
'int_value': (3, 4, 5),
})
df = df.append(extra_sid_df, ignore_index=True)
expr = bz.data(df, name='expr', dshape=self.dshape)
deltas = bz.data(df, dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_ignore_sid()
def test_deltas_before_index_0(self, asset_info, add_extra_sid):
df = empty_dataframe(
('sid', 'int64'),
('value', 'float64'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(df, name='expr', dshape=self.dshape)
T = pd.Timestamp
# These data are interesting because we have four rows with an asof
# date prior to the start of the query window. The first, second, and
# fourth rows should become the best-known value on their timestamp.
# The third row's asof date is less than the second row's asof date so,
# due to forward filling rules, it is *not* the most recent value on
# its timestamp. The value for row three should never be shown to the
# user.
deltas_df_single_sid = pd.DataFrame({
'value': [0.0, 1.0, 2.0, 3.0],
'asof_date': [
T('2013-12-01'),
T('2013-12-15'),
T('2013-12-02'), # not more recent than the previous day
T('2013-12-16'),
],
'timestamp': [
T('2014-01-01 23:00'),
T('2014-01-02 23:00'),
T('2014-01-03 23:00'),
T('2014-01-04 23:00'),
],
})
sids = asset_info.index
if add_extra_sid:
# add a sid to the dataset that the asset finder doesn't know about
sids = sids.insert(0, ord('Z'))
deltas_df = pd.concat([
deltas_df_single_sid.assign(
sid=sid,
value=deltas_df_single_sid.value + (100 * n),
)
for n, sid in enumerate(asset_info.index)
])
deltas = bz.data(deltas_df, name='deltas', dshape=self.dshape)
expected_views_single_sid = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-02': np.array([[0.0],
[0.0]]),
'2014-01-03': np.array([[1.0],
[1.0]]),
# The third row's value of 2.0 is *not* the best known value
# because its asof date of 2013-12-02 is earlier than the previous
# row's asof date of 2013-12-15. We continue to surface the second
# row's value on this day.
'2014-01-04': np.array([[1.0],
[1.0]]),
'2014-01-05': np.array([[3.0],
[3.0]]),
})
column_constant = np.arange(len(asset_info)) * 100
expected_views = {
k: v + column_constant
for k, v in expected_views_single_sid.items()
}
with tmp_asset_finder(equities=asset_info) as finder:
dates = pd.date_range('2014-01-01', '2014-01-05')
self._run_pipeline(
expr,
deltas,
None,
expected_views,
None,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
)
@with_ignore_sid()
def test_deltas_on_same_ix_out_of_order(self, asset_info, add_extra_sid):
df = empty_dataframe(
('sid', 'int64'),
('value', 'float64'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(df, name='expr', dshape=self.dshape)
T = pd.Timestamp
# These data are interesting because we have pairs of rows that come on
# the same asof_date in index space. The catch is that the asof dates
# are sometimes out of order relative to their timestamps. This is used
# to test cases where we get novel rows for dates between trading days
# (weekends and holidays) although we learn about them out of order.
#
# The first two rows both map to index 0 in the output. The first row
# has an earlier timestamp but later asof date so it should be
# selected.
#
# The third and fourth rows both map to index 1 in the output. The
# fourth row (second in the group) has both a later timestamp and asof
# date so it should be selected.
#
# The fifth and sixth rows both map to index 2 in the output. The fifth
# row (first in the group) has an earlier timestamp but later asof date
# so it should be selected.
deltas_df_single_sid = pd.DataFrame({
'value': [
0.0, # selected
1.0, # ignored
2.0, # ignored
3.0, # selected
4.0, # selected
5.0, # ignored
],
'asof_date': [
# swapped order: second row is before the first
T('2014-01-02'),
T('2014-01-01'),
# chronological order: second row is after the first
T('2014-01-03'),
T('2014-01-04'),
# swapped order: second row is before the first
T('2014-01-06'),
T('2014-01-05'),
],
'timestamp': [
# we learn about all rows in monotonically increasing order
T('2013-01-02 22:00'),
T('2014-01-02 23:00'),
T('2014-01-04 22:00'),
T('2014-01-04 23:00'),
T('2014-01-06 22:00'),
T('2014-01-06 23:00'),
],
})
sids = asset_info.index
if add_extra_sid:
# add a sid to the dataset that the asset finder doesn't know about
sids = sids.insert(0, ord('Z'))
deltas_df = pd.concat([
deltas_df_single_sid.assign(
sid=sid,
value=deltas_df_single_sid.value + (100 * n),
)
for n, sid in enumerate(asset_info.index)
])
deltas = bz.data(deltas_df, name='deltas', dshape=self.dshape)
expected_views_single_sid = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-05': np.array([[0.0],
[3.0]]),
'2014-01-07': np.array([[3.0],
[4.0]]),
})
column_constant = np.arange(len(asset_info)) * 100
expected_views = {
k: v + column_constant
for k, v in expected_views_single_sid.items()
}
with tmp_asset_finder(equities=asset_info) as finder:
# The dates queried are non-contiguous. We have two day groups to
# capture the two day pairs in the input data.
dates = pd.to_datetime(['2014-01-03', '2014-01-05', '2014-01-07'])
self._run_pipeline(
expr=expr,
deltas=deltas,
checkpoints=None,
expected_views=expected_views,
expected_output=None,
finder=finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
)
@with_extra_sid()
def test_deltas_only_one_delta_in_universe(self, asset_info):
expr = bz.data(self.df, name='expr', dshape=self.dshape)
deltas = pd.DataFrame({
'sid': [65, 66],
'asof_date': [self.asof_dates[1], self.asof_dates[0]],
'timestamp': [self.timestamps[2], self.timestamps[1]],
'value': [10, 11],
})
deltas = bz.data(deltas, name='deltas', dshape=self.dshape)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-02': np.array([[0.0, 11.0, 2.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[10.0, 2.0, 3.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[2.0, 3.0, 4.0],
[2.0, 3.0, 4.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
columns=[
'value',
],
data=np.array([11, 10, 4]).repeat(len(asset_info.index)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
expr = bz.data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-02': np.array([[10.0],
[1.0]]),
'2014-01-03': np.array([[11.0],
[2.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_before_index_0_macro(self):
df = empty_dataframe(
('value', 'float64'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(df, name='expr', dshape=self.macro_dshape)
T = pd.Timestamp
# These data are interesting because we have four rows with an asof
# date prior to the start of the query window. The first, second, and
# fourth rows should become the best-known value on their timestamp.
# The third row's asof date is less than the second row's asof date so,
# due to forward filling rules, it is *not* the most recent value on
# its timestamp. The value for row three should never be shown to the
# user.
deltas_df = pd.DataFrame({
'value': [0.0, 1.0, 2.0, 3.0],
'asof_date': [
T('2013-12-01'),
T('2013-12-15'),
T('2013-12-02'), # not more recent than the previous day
T('2013-12-16'),
],
'timestamp': [
T('2014-01-01 23:00'),
T('2014-01-02 23:00'),
T('2014-01-03 23:00'),
T('2014-01-04 23:00'),
],
})
deltas = bz.data(deltas_df, name='deltas', dshape=self.macro_dshape)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-02': np.array([[0.0],
[0.0]]),
'2014-01-03': np.array([[1.0],
[1.0]]),
# The third row's value of 2.0 is *not* the best known value
# because its asof date of 2013-12-02 is earlier than the previous
# row's asof date of 2013-12-15. We continue to surface the second
# row's value on this day.
'2014-01-04': np.array([[1.0],
[1.0]]),
'2014-01-05': np.array([[3.0],
[3.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
dates = pd.date_range('2014-01-01', '2014-01-05')
self._run_pipeline(
expr,
deltas,
None,
expected_views,
None,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
)
def test_deltas_on_same_ix_out_of_order_macro(self):
df = empty_dataframe(
('value', 'float64'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(df, name='expr', dshape=self.macro_dshape)
T = pd.Timestamp
# These data are interesting because we have pairs of rows that come on
# the same asof_date in index space. The catch is that the asof dates
# are sometimes out of order relative to their timestamps. This is used
# to test cases where we get novel rows for dates between trading days
# (weekends and holidays) although we learn about them out of order.
#
# The first two rows both map to index 0 in the output. The first row
# has an earlier timestamp but later asof date so it should be
# selected.
#
# The third and fourth rows both map to index 1 in the output. The
# fourth row (second in the group) has both a later timestamp and asof
# date so it should be selected.
#
# The fifth and sixth rows both map to index 2 in the output. The fifth
# row (first in the group) has an earlier timestamp but later asof date
# so it should be selected.
deltas_df = pd.DataFrame({
'value': [
0.0, # selected
1.0, # ignored
2.0, # ignored
3.0, # selected
4.0, # selected
5.0, # ignored
],
'asof_date': [
# swapped order: second row is before the first
T('2014-01-02'),
T('2014-01-01'),
# chronological order: second row is after the first
T('2014-01-03'),
T('2014-01-04'),
# swapped order: second row is before the first
T('2014-01-06'),
T('2014-01-05'),
],
'timestamp': [
# we learn about all rows in monotonically increasing order
T('2013-01-02 22:00'),
T('2014-01-02 23:00'),
T('2014-01-04 22:00'),
T('2014-01-04 23:00'),
T('2014-01-06 22:00'),
T('2014-01-06 23:00'),
],
})
deltas = bz.data(deltas_df, name='deltas', dshape=self.macro_dshape)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-05': np.array([[0.0],
[3.0]]),
'2014-01-07': np.array([[3.0],
[4.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
# The dates queried are non-contiguous. We have two day groups to
# capture the two day pairs in the input data.
dates = pd.to_datetime(['2014-01-03', '2014-01-05', '2014-01-07'])
self._run_pipeline(
expr=expr,
deltas=deltas,
checkpoints=None,
expected_views=expected_views,
expected_output=None,
finder=finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
)
def test_stacked_deltas_macro(self):
df = empty_dataframe(
('value', 'float64'),
('asof_date', 'datetime64[ns]'),
('timestamp', 'datetime64[ns]'),
)
expr = bz.data(df, name='expr', dshape=self.macro_dshape)
T = pd.Timestamp
# These data are interesting because they exercise the tie breaking of
# adjustments. Here we have 4 rows which we learn about within a single
# calendar index. The first row provides the most recently known value
# for some day in the window. All of the following rows are adjustments
# to the same (earlier) historical value. We expect that the first
# row's value is the most recently know value, and the lookback window
# will be filled with the *last* row's value. This is because each
# adjustment gets applied in timestamp order, and the last row was
# learned most recently.
deltas_df = pd.DataFrame({
'value': [
0.0, # selected
1.0, # ignored
2.0, # ignored
3.0, # ignored
4.0, # selected
],
'asof_date': [
# the first row is for current data
T('2014-01-02'),
# all other rows are restating the same historical value
T('2013-12-01'),
T('2013-12-01'),
T('2013-12-01'),
T('2013-12-01'),
],
'timestamp': [
# we learn about all rows within a single calendar index
T('2014-01-02 23:00'),
T('2014-01-02 23:01'),
T('2014-01-02 23:02'),
T('2014-01-02 23:03'),
T('2014-01-02 23:04'),
],
})
deltas = bz.data(deltas_df, name='deltas', dshape=self.macro_dshape)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-03': np.array([[4.0],
[4.0],
[0.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
# The dates queried are non-contiguous. We have two day groups to
# capture the two day pairs in the input data.
dates = pd.date_range('2014-01-01', '2014-01-03')
self._run_pipeline(
expr=expr,
deltas=deltas,
checkpoints=None,
expected_views=expected_views,
expected_output=None,
finder=finder,
calendar=dates,
start=dates[-1],
end=dates[-1],
window_length=3,
)
@with_extra_sid()
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2013-12-31'),
pd.Timestamp('2014-01-03')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.ASSET_FINDER_EQUITY_SIDS * 2,
'value': (0., 1., 2., 1., 2., 3.),
'int_value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates + pd.Timedelta(hours=23),
})
expr = bz.data(baseline, name='expr', dshape=self.dshape)
deltas = bz.data(
odo(
bz.transform(
expr,
value=expr.value + 10,
timestamp=expr.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
def get_fourth_asset_view(expected_views, window_length):
return valmap(
lambda view: np.c_[view, [np.nan] * window_length],
expected_views,
)
expected_views = get_fourth_asset_view(
expected_views,
window_length=3,
)
expected_output_buffer = [
10,
11,
12,
np.nan,
11,
12,
13,
np.nan,
]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
expected_output_buffer,
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
def test_novel_deltas_macro(self):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2013-12-31'),
pd.Timestamp('2014-01-03')
])
baseline = pd.DataFrame({
'value': (0., 1.),
'asof_date': base_dates,
'timestamp': base_dates + pd.Timedelta(days=1),
})
expr = bz.data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(simple_asset_info)
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
'2014-01-03': np.array([[10.0],
[10.0],
[10.0]]),
'2014-01-06': np.array([[10.0],
[10.0],
[11.0]]),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
def get_expected_output(expected_views, values, asset_info):
return pd.DataFrame(
list(concatv(*([value] * nassets for value in values))),
index=pd.MultiIndex.from_product(
(sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),)
), columns=('value',),
)
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = get_expected_output(
expected_views,
[10, 11],
simple_asset_info,
)
self._run_pipeline(
expr,
deltas,
None,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
test_checkpoints_dates = pd.date_range('2013-12-31', '2014-01-04')
test_checkpoints_expected_view_date = pd.Timestamp('2014-01-03')
def _test_checkpoints_macro(self, checkpoints, ffilled_value=-1.0):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03 for macro datasets.
The underlying data has value -1.0 on 2014-01-01 and 1.0 on 2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
dates = self.test_checkpoints_dates[[1, -1]]
asof_dates = dates - pd.Timedelta(days=1)
timestamps = asof_dates + pd.Timedelta(hours=23)
baseline = pd.DataFrame({
'value': [-1.0, 1.0],
'asof_date': asof_dates,
'timestamp': timestamps,
})
nassets = len(simple_asset_info)
expected_views = keymap(lambda t: t.tz_localize('UTC'), {
self.test_checkpoints_expected_view_date: (
np.array([[ffilled_value]])
),
self.test_checkpoints_dates[-1]: np.array([[1.0]]),
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([ffilled_value] * nassets, [1.0] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.macro_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.macro_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
@parameter_space(checkpoints_ts_fuzz_minutes=range(-5, 5))
def test_checkpoints_macro(self, checkpoints_ts_fuzz_minutes):
ffilled_value = 0.0
checkpoints_ts = (
self.test_checkpoints_expected_view_date -
pd.Timedelta(days=1)
)
checkpoints = pd.DataFrame({
'value': [ffilled_value],
'asof_date': checkpoints_ts,
'timestamp': (
checkpoints_ts +
# Fuzz the checkpoints timestamp a little so that it doesn't
# align with the data query time. This should not affect the
# correctness of the output.
pd.Timedelta(minutes=checkpoints_ts_fuzz_minutes)
),
})
self._test_checkpoints_macro(checkpoints, ffilled_value)
def test_empty_checkpoints_macro(self):
empty_checkpoints = pd.DataFrame({
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints_macro(empty_checkpoints)
def test_checkpoints_out_of_bounds_macro(self):
# provide two checkpoints, one before the data in the base table
# and one after, these should not affect the value on the third
asof_dates = self.test_checkpoints_dates[[0, -1]]
out_of_bounds = pd.DataFrame({
'value': [-2, 2],
'asof_date': asof_dates,
'timestamp': asof_dates + pd.Timedelta(hours=23),
})
# Add a single checkpoint on the query day with a timestamp of exactly
# the data query time. This should not get pulled to overwrite the
# expected data on the 3rd.
exact_query_time = pd.DataFrame({
'value': [1],
'asof_date': [
self.test_checkpoints_expected_view_date -
pd.Timedelta(days=1)
],
'timestamp': [self.test_checkpoints_expected_view_date],
})
self._test_checkpoints_macro(
pd.concat([out_of_bounds, exact_query_time]),
)
def _test_checkpoints(self, checkpoints, ffilled_values=None):
"""Simple checkpoints test that accepts a checkpoints dataframe and
the expected value for 2014-01-03.
The underlying data has value -(sid + 1) on 2014-01-01 and sid + 1 on
2014-01-04.
Parameters
----------
checkpoints : pd.DataFrame
The checkpoints data.
ffilled_value : float, optional
The value to be read on the third, if not provided, it will be the
value in the base data that will be naturally ffilled there.
"""
nassets = len(simple_asset_info)
dates = self.test_checkpoints_dates[[1, -1]]
asof_dates = dates - pd.Timedelta(days=1)
asof_dates_repeated = np.tile(asof_dates, nassets)
timestamps = asof_dates + pd.Timedelta(hours=23)
timestamps_repeated = np.tile(timestamps, nassets)
values = simple_asset_info.index.values + 1
values = np.hstack((values[::-1], values))
baseline = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': values,
'asof_date': asof_dates_repeated,
'timestamp': timestamps_repeated,
})
if ffilled_values is None:
ffilled_values = baseline.value.iloc[:nassets]
updated_values = baseline.value.iloc[nassets:]
expected_views = keymap(partial(pd.Timestamp, tz='UTC'), {
self.test_checkpoints_expected_view_date: [ffilled_values],
self.test_checkpoints_dates[-1]: [updated_values],
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv(ffilled_values, updated_values)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.value_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.value_dshape,
),
expected_views,
expected_output,
finder,
calendar=pd.date_range('2014-01-01', '2014-01-04'),
start=pd.Timestamp('2014-01-03'),
end=dates[-1],
window_length=1,
compute_fn=op.itemgetter(-1),
)
@parameter_space(checkpoints_ts_fuzz_minutes=range(-5, 5))
def test_checkpoints(self, checkpoints_ts_fuzz_minutes):
nassets = len(simple_asset_info)
ffilled_values = (np.arange(nassets, dtype=np.float64) + 1) * 10
dates = pd.Index([pd.Timestamp('2014-01-01')] * nassets)
checkpoints = pd.DataFrame({
'sid': simple_asset_info.index,
'value': ffilled_values,
'asof_date': dates,
'timestamp': (
dates +
# Fuzz the checkpoints timestamp a little so that it doesn't
# align with the data query time. This should not affect the
# correctness of the output.
pd.Timedelta(days=1, minutes=checkpoints_ts_fuzz_minutes)
),
})
self._test_checkpoints(checkpoints, ffilled_values)
def test_empty_checkpoints(self):
checkpoints = pd.DataFrame({
'sid': [],
'value': [],
'asof_date': [],
'timestamp': [],
})
self._test_checkpoints(checkpoints)
def test_checkpoints_out_of_bounds(self):
nassets = len(simple_asset_info)
# provide two sets of checkpoints, one before the data in the base
# table and one after, these should not affect the value on the third
asof_dates = self.test_checkpoints_dates[[0, -1]]
asof_dates_repeated = np.tile(asof_dates, nassets)
ffilled_values = (np.arange(nassets) + 2) * 10
ffilled_values = np.hstack((ffilled_values[::-1], ffilled_values))
out_of_bounds = pd.DataFrame({
'sid': np.tile(simple_asset_info.index, 2),
'value': ffilled_values,
'asof_date': asof_dates_repeated,
'timestamp': asof_dates_repeated + pd.Timedelta(hours=23),
})
# Add a single checkpoint on the query day with a timestamp of exactly
# the data query time. This should not get pulled to overwrite the
# expected data on the 3rd.
exact_query_time = pd.DataFrame({
'sid': simple_asset_info.index,
'value': simple_asset_info.index + 1,
'asof_date': (
self.test_checkpoints_expected_view_date -
pd.Timedelta(days=1)
),
'timestamp': self.test_checkpoints_expected_view_date,
})
self._test_checkpoints(pd.concat([out_of_bounds, exact_query_time]))
def test_id_take_last_in_group_sorted(self):
"""
input
asof_date timestamp other value
2014-01-03 2014-01-04 00 3 3
2014-01-02 2014-01-04 00 2 2
output (expected):
other value
2014-01-02 NaN NaN
2014-01-03 NaN NaN
2014-01-06 3 3
"""
dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-06'),
]).tz_localize('UTC')
T = pd.Timestamp
df = pd.DataFrame(
columns=['asof_date', 'timestamp', 'other', 'value'],
data=[
# asof-dates are flipped in terms of order so that if we
# don't sort on asof-date before getting the last in group,
# we will get the wrong result.
[T('2014-01-03'), T('2014-01-04 00'), 3, 3],
[T('2014-01-02'), T('2014-01-04 00'), 2, 2],
],
)
fields = OrderedDict(self.macro_dshape.measure.fields)
fields['other'] = fields['value']
expected = pd.DataFrame(
data=[[np.nan, np.nan], # 2014-01-02
[np.nan, np.nan], # 2014-01-03
[3, 3]], # 2014-01-06
columns=['other', 'value'],
index=dates,
)
self._test_id_macro(
df,
var * Record(fields),
expected,
self.asset_finder,
('other', 'value'),
dates=dates,
)
class MiscTestCase(ZiplineTestCase):
def test_exprdata_repr(self):
strd = set()
class BadRepr(object):
"""A class which cannot be repr'd.
"""
def __init__(self, name):
self._name = name
def __repr__(self): # pragma: no cover
raise AssertionError('ayy')
def __str__(self):
strd.add(self)
return self._name
assert_equal(
repr(ExprData(
expr=BadRepr('expr'),
deltas=BadRepr('deltas'),
checkpoints=BadRepr('checkpoints'),
odo_kwargs={'a': 'b'},
)),
"ExprData(expr=expr, deltas=deltas,"
" checkpoints=checkpoints, odo_kwargs={'a': 'b'})",
)
def test_exprdata_eq(self):
dshape = 'var * {sid: int64, asof_date: datetime, value: float64}'
base_expr = bz.symbol('base', dshape)
checkpoints_expr = bz.symbol('checkpoints', dshape)
# use a nested dict to emulate real call sites
odo_kwargs = {'a': {'c': 1, 'd': 2}, 'b': {'e': 3, 'f': 4}}
actual = ExprData(
expr=base_expr,
deltas=None,
checkpoints=checkpoints_expr,
odo_kwargs=odo_kwargs,
)
same = ExprData(
expr=base_expr,
deltas=None,
checkpoints=checkpoints_expr,
odo_kwargs=odo_kwargs,
)
self.assertEqual(actual, same)
self.assertEqual(hash(actual), hash(same))
different_obs = [
actual.replace(expr=bz.symbol('not base', dshape)),
actual.replace(expr=bz.symbol('not deltas', dshape)),
actual.replace(checkpoints=bz.symbol('not checkpoints', dshape)),
actual.replace(checkpoints=None),
actual.replace(odo_kwargs={
# invert the leaf values
ok: {ik: ~iv for ik, iv in ov.items()}
for ok, ov in odo_kwargs.items()
}),
]
for different in different_obs:
self.assertNotEqual(actual, different)
actual_with_none_odo_kwargs = actual.replace(odo_kwargs=None)
same_with_none_odo_kwargs = same.replace(odo_kwargs=None)
self.assertEqual(
actual_with_none_odo_kwargs,
same_with_none_odo_kwargs,
)
self.assertEqual(
hash(actual_with_none_odo_kwargs),
hash(same_with_none_odo_kwargs),
)
def test_blaze_loader_lookup_failure(self):
class D(DataSet):
c = Column(dtype='float64')
with self.assertRaises(KeyError) as e:
BlazeLoader()(D.c)
assert_equal(str(e.exception), 'D.c::float64')
| 36.33899 | 79 | 0.478402 |
7941784a19471bc45c490a8e6fa0789a2132837c | 1,904 | py | Python | geometry/point_group.py | Zylphrex/csc420-project | 71fbacb9dbf4331946d50176d97d65edabc208dc | [
"MIT"
] | null | null | null | geometry/point_group.py | Zylphrex/csc420-project | 71fbacb9dbf4331946d50176d97d65edabc208dc | [
"MIT"
] | 6 | 2020-03-31T09:35:50.000Z | 2022-03-12T00:04:13.000Z | geometry/point_group.py | Zylphrex/csc420-project | 71fbacb9dbf4331946d50176d97d65edabc208dc | [
"MIT"
] | null | null | null | import numpy as np
import geometry
class PointGroup(object):
def __init__(self, threshold=geometry.deg_to_rad(15)):
self.points = []
self.cache = {}
self.top = None
self.bottom = None
self.left = None
self.right = None
def add(self, point):
self.points.append(point)
self.cache = {}
self.update_bounds(point)
@property
def xs(self):
if 'xs' not in self.cache:
self.cache['xs'] = [p.x for p in self.points]
return self.cache['xs']
@property
def ys(self):
if 'ys' not in self.cache:
self.cache['ys'] = [p.y for p in self.points]
return self.cache['ys']
def update_bounds(self, point):
if self.top is None or point.y < self.top.y:
self.top = point
if self.bottom is None or point.y > self.bottom.y:
self.bottom = point
if self.left is None or point.x < self.left.x:
self.left = point
if self.right is None or point.x > self.right.x:
self.right = point
@property
def start(self):
dx = self.right.x - self.left.x
dy = self.bottom.y - self.top.y
if dx > dy:
return self.left
else:
return self.top
@property
def stop(self):
dx = self.right.x - self.left.x
dy = self.bottom.y - self.top.y
if dx > dy:
return self.right
else:
return self.bottom
def __mul__(self, n):
group = PointGroup(threshold=self.threshold)
for p in self.points:
group.add(p * 6)
return group
def __rmul__(self, n):
return self.__mul__(n)
def __iter__(self):
return iter(self.points)
def __len__(self):
return len(self.points)
def __repr__(self):
return repr(self.points)
| 23.506173 | 58 | 0.543067 |
794179859fd8aa89761fe80b82af930c5d8595e7 | 17,057 | py | Python | image/docker/schema2/list.py | jakedt/quay | 424c1a19d744be444ed27aa1718fd74af311d863 | [
"Apache-2.0"
] | 1 | 2020-10-16T19:30:41.000Z | 2020-10-16T19:30:41.000Z | image/docker/schema2/list.py | jakedt/quay | 424c1a19d744be444ed27aa1718fd74af311d863 | [
"Apache-2.0"
] | 15 | 2020-06-18T15:32:06.000Z | 2022-03-03T23:06:24.000Z | image/docker/schema2/list.py | jakedt/quay | 424c1a19d744be444ed27aa1718fd74af311d863 | [
"Apache-2.0"
] | null | null | null | import logging
import json
from cachetools.func import lru_cache
from jsonschema import validate as validate_schema, ValidationError
from digest import digest_tools
from image.docker import ManifestException
from image.docker.interfaces import ManifestInterface
from image.docker.schema1 import DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE
from image.docker.schema1 import DockerSchema1Manifest
from image.docker.schema2 import (
DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
)
from image.docker.schema2.manifest import DockerSchema2Manifest
from util.bytes import Bytes
logger = logging.getLogger(__name__)
# Keys.
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY = "schemaVersion"
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY = "mediaType"
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY = "size"
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY = "digest"
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY = "manifests"
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY = "platform"
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY = "architecture"
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY = "os"
DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY = "os.version"
DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY = "os.features"
DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY = "features"
DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY = "variant"
class MalformedSchema2ManifestList(ManifestException):
"""
Raised when a manifest list fails an assertion that should be true according to the Docker
Manifest v2.2 Specification.
"""
pass
class MismatchManifestException(MalformedSchema2ManifestList):
"""
Raised when a manifest list contains a schema 1 manifest with a differing architecture from that
specified in the manifest list for the manifest.
"""
pass
class LazyManifestLoader(object):
def __init__(self, manifest_data, content_retriever):
self._manifest_data = manifest_data
self._content_retriever = content_retriever
self._loaded_manifest = None
@property
def manifest_obj(self):
if self._loaded_manifest is not None:
return self._loaded_manifest
self._loaded_manifest = self._load_manifest()
return self._loaded_manifest
def _load_manifest(self):
digest = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
size = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY]
manifest_bytes = self._content_retriever.get_manifest_bytes_with_digest(digest)
if manifest_bytes is None:
raise MalformedSchema2ManifestList(
"Could not find child manifest with digest `%s`" % digest
)
if len(manifest_bytes) != size:
raise MalformedSchema2ManifestList(
"Size of manifest does not match that retrieved: %s vs %s",
len(manifest_bytes),
size,
)
content_type = self._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
if content_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE:
return DockerSchema2Manifest(Bytes.for_string_or_unicode(manifest_bytes))
if content_type == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
return DockerSchema1Manifest(
Bytes.for_string_or_unicode(manifest_bytes), validate=False
)
raise MalformedSchema2ManifestList("Unknown manifest content type")
class DockerSchema2ManifestList(ManifestInterface):
METASCHEMA = {
"type": "object",
"properties": {
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: {
"type": "number",
"description": "The version of the manifest list. Must always be `2`.",
"minimum": 2,
"maximum": 2,
},
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
"type": "string",
"description": "The media type of the manifest list.",
"enum": [DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE],
},
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: {
"type": "array",
"description": "The manifests field contains a list of manifests for specific platforms",
"items": {
"type": "object",
"properties": {
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: {
"type": "string",
"description": "The MIME type of the referenced object. This will generally be "
+ "application/vnd.docker.distribution.manifest.v2+json, but it "
+ "could also be application/vnd.docker.distribution.manifest.v1+json "
+ "if the manifest list references a legacy schema-1 manifest.",
"enum": [
DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE,
DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE,
],
},
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: {
"type": "number",
"description": "The size in bytes of the object. This field exists so that a "
+ "client will have an expected size for the content before "
+ "validating. If the length of the retrieved content does not "
+ "match the specified length, the content should not be trusted.",
},
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: {
"type": "string",
"description": "The content addressable digest of the manifest in the blob store",
},
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: {
"type": "object",
"description": "The platform object describes the platform which the image in "
+ "the manifest runs on",
"properties": {
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: {
"type": "string",
"description": "Specifies the CPU architecture, for example amd64 or ppc64le.",
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: {
"type": "string",
"description": "Specifies the operating system, for example linux or windows",
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_VERSION_KEY: {
"type": "string",
"description": "Specifies the operating system version, for example 10.0.10586",
},
DOCKER_SCHEMA2_MANIFESTLIST_OS_FEATURES_KEY: {
"type": "array",
"description": "specifies an array of strings, each listing a required OS "
+ "feature (for example on Windows win32k)",
"items": {"type": "string",},
},
DOCKER_SCHEMA2_MANIFESTLIST_VARIANT_KEY: {
"type": "string",
"description": "Specifies a variant of the CPU, for example armv6l to specify "
+ "a particular CPU variant of the ARM CPU",
},
DOCKER_SCHEMA2_MANIFESTLIST_FEATURES_KEY: {
"type": "array",
"description": "specifies an array of strings, each listing a required CPU "
+ "feature (for example sse4 or aes).",
"items": {"type": "string",},
},
},
"required": [
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY,
],
},
},
"required": [
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY,
],
},
},
},
"required": [
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY,
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY,
],
}
def __init__(self, manifest_bytes):
assert isinstance(manifest_bytes, Bytes)
self._layers = None
self._manifest_bytes = manifest_bytes
try:
self._parsed = json.loads(manifest_bytes.as_unicode())
except ValueError as ve:
raise MalformedSchema2ManifestList("malformed manifest data: %s" % ve)
try:
validate_schema(self._parsed, DockerSchema2ManifestList.METASCHEMA)
except ValidationError as ve:
raise MalformedSchema2ManifestList("manifest data does not match schema: %s" % ve)
@property
def is_manifest_list(self):
"""
Returns whether this manifest is a list.
"""
return True
@property
def schema_version(self):
return 2
@property
def digest(self):
"""
The digest of the manifest, including type prefix.
"""
return digest_tools.sha256_digest(self._manifest_bytes.as_encoded_str())
@property
def media_type(self):
"""
The media type of the schema.
"""
return self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY]
@property
def manifest_dict(self):
"""
Returns the manifest as a dictionary ready to be serialized to JSON.
"""
return self._parsed
@property
def bytes(self):
return self._manifest_bytes
def get_layers(self, content_retriever):
"""
Returns the layers of this manifest, from base to leaf or None if this kind of manifest does
not support layers.
"""
return None
@property
def blob_digests(self):
# Manifest lists have no blob digests, since everything is stored as a manifest.
return []
@property
def local_blob_digests(self):
return self.blob_digests
def get_blob_digests_for_translation(self):
return self.blob_digests
@property
def layers_compressed_size(self):
return None
@lru_cache(maxsize=1)
def manifests(self, content_retriever):
"""
Returns the manifests in the list.
"""
manifests = self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
return [LazyManifestLoader(m, content_retriever) for m in manifests]
def validate(self, content_retriever):
"""
Performs validation of required assertions about the manifest.
Raises a ManifestException on failure.
"""
for index, m in enumerate(self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]):
if m[DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY] == DOCKER_SCHEMA1_MANIFEST_CONTENT_TYPE:
platform = m[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
# Validate the architecture against the schema 1 architecture defined.
parsed = self.manifests(content_retriever)[index].manifest_obj
assert isinstance(parsed, DockerSchema1Manifest)
if (
parsed.architecture
and parsed.architecture
!= platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
):
raise MismatchManifestException(
"Mismatch in arch for manifest `%s`" % parsed.digest
)
def child_manifests(self, content_retriever):
return self.manifests(content_retriever)
def child_manifest_digests(self):
return [
m[DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY]
for m in self._parsed[DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY]
]
def get_manifest_labels(self, content_retriever):
return None
def get_leaf_layer_v1_image_id(self, content_retriever):
return None
def get_legacy_image_ids(self, content_retriever):
return None
@property
def has_legacy_image(self):
return False
def get_requires_empty_layer_blob(self, content_retriever):
return False
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
"""
Returns the manifest that is compatible with V1, by virtue of being `amd64` and `linux`.
If none, returns None.
"""
legacy_manifest = self._get_legacy_manifest(content_retriever)
if legacy_manifest is None:
return None
return legacy_manifest.get_schema1_manifest(
namespace_name, repo_name, tag_name, content_retriever
)
def convert_manifest(
self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever
):
if self.media_type in allowed_mediatypes:
return self
legacy_manifest = self._get_legacy_manifest(content_retriever)
if legacy_manifest is None:
return None
return legacy_manifest.convert_manifest(
allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever
)
def _get_legacy_manifest(self, content_retriever):
"""
Returns the manifest under this list with architecture amd64 and os linux, if any, or None
if none or error.
"""
for manifest_ref in self.manifests(content_retriever):
platform = manifest_ref._manifest_data[DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY]
architecture = platform[DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY]
os = platform[DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY]
if architecture != "amd64" or os != "linux":
continue
try:
return manifest_ref.manifest_obj
except (ManifestException, IOError):
logger.exception("Could not load child manifest")
return None
return None
def unsigned(self):
return self
def generate_legacy_layers(self, images_map, content_retriever):
return None
class DockerSchema2ManifestListBuilder(object):
"""
A convenient abstraction around creating new DockerSchema2ManifestList's.
"""
def __init__(self):
self.manifests = []
def add_manifest(self, manifest, architecture, os):
"""
Adds a manifest to the list.
"""
manifest = manifest.unsigned() # Make sure we add the unsigned version to the list.
self.add_manifest_digest(
manifest.digest,
len(manifest.bytes.as_encoded_str()),
manifest.media_type,
architecture,
os,
)
def add_manifest_digest(self, manifest_digest, manifest_size, media_type, architecture, os):
"""
Adds a manifest to the list.
"""
self.manifests.append(
(
manifest_digest,
manifest_size,
media_type,
{
DOCKER_SCHEMA2_MANIFESTLIST_ARCHITECTURE_KEY: architecture,
DOCKER_SCHEMA2_MANIFESTLIST_OS_KEY: os,
},
)
)
def build(self):
"""
Builds and returns the DockerSchema2ManifestList.
"""
assert self.manifests
manifest_list_dict = {
DOCKER_SCHEMA2_MANIFESTLIST_VERSION_KEY: 2,
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE,
DOCKER_SCHEMA2_MANIFESTLIST_MANIFESTS_KEY: [
{
DOCKER_SCHEMA2_MANIFESTLIST_MEDIATYPE_KEY: manifest[2],
DOCKER_SCHEMA2_MANIFESTLIST_DIGEST_KEY: manifest[0],
DOCKER_SCHEMA2_MANIFESTLIST_SIZE_KEY: manifest[1],
DOCKER_SCHEMA2_MANIFESTLIST_PLATFORM_KEY: manifest[3],
}
for manifest in self.manifests
],
}
json_str = Bytes.for_string_or_unicode(json.dumps(manifest_list_dict, indent=3))
return DockerSchema2ManifestList(json_str)
| 38.678005 | 116 | 0.594008 |
79417a06d672ff34d6c118e2301feec472c2c8f7 | 820 | py | Python | analog_tests/models.py | Oksamies/django-analog | 1b977efd6553e043f2f1ad399d810ab8c5b51dcb | [
"MIT"
] | 4 | 2016-06-06T06:11:53.000Z | 2017-09-07T01:50:01.000Z | analog_tests/models.py | Oksamies/django-analog | 1b977efd6553e043f2f1ad399d810ab8c5b51dcb | [
"MIT"
] | 17 | 2016-07-01T10:31:23.000Z | 2021-09-22T19:37:25.000Z | analog_tests/models.py | Oksamies/django-analog | 1b977efd6553e043f2f1ad399d810ab8c5b51dcb | [
"MIT"
] | 4 | 2016-11-04T22:38:04.000Z | 2019-12-10T14:11:14.000Z | from django.db import models
from analog.define import define_log_model
from analog.models import BaseLogEntry
class LoggedModel(models.Model):
pass
class PrivateLogEntry(BaseLogEntry):
private = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
LoggedModelLogEntry = define_log_model(LoggedModel, base_class=PrivateLogEntry)
class FreeLogEntry(BaseLogEntry):
pass
class SecondLoggedModel(models.Model):
pass
SecondLoggedModelLogEntry = define_log_model(
SecondLoggedModel, allow_null_target=True)
class ExtraLogEntry(BaseLogEntry):
extra = models.CharField(null=True, blank=True, max_length=64)
class ThirdLoggedModel(models.Model):
pass
ThirdLoggedModelLogEntry = define_log_model(
ThirdLoggedModel, base_class=ExtraLogEntry)
| 19.069767 | 79 | 0.784146 |
79417a07df748ac4d3bc86f1fc4566c65c437d0d | 1,180 | py | Python | bancodedados_teste/exercicio-teste.py | r-luis/Py4e-Projects | 9f3398b231934e1a3982f23167535295402ebca1 | [
"MIT"
] | null | null | null | bancodedados_teste/exercicio-teste.py | r-luis/Py4e-Projects | 9f3398b231934e1a3982f23167535295402ebca1 | [
"MIT"
] | null | null | null | bancodedados_teste/exercicio-teste.py | r-luis/Py4e-Projects | 9f3398b231934e1a3982f23167535295402ebca1 | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect('emaildb.sqlite')
cur = conn.cursor()
cur.execute('''
DROP TABLE IF EXISTS Counts''')
cur.execute('''
CREATE TABLE Counts (email TEXT, count INTEGER)''')
fname = raw_input('Enter file name: ')
if ( len(fname) < 1 ) : fname = 'mbox-short.txt'
fh = open(fname)
for line in fh:
if not line.startswith('From: ') : continue
pieces = line.split()
email = pieces[1]
print email
cur.execute('SELECT count FROM Counts WHERE email = ? ', (email, ))
row = cur.fetchone()
if row is None:
cur.execute('''INSERT INTO Counts (email, count)
VALUES ( ?, 1 )''', ( email, ) )
else :
cur.execute('UPDATE Counts SET count=count+1 WHERE email = ?',
(email, ))
# This statement commits outstanding changes to disk each
# time through the loop - the program can be made faster
# by moving the commit so it runs only after the loop completes
conn.commit()
# https://www.sqlite.org/lang_select.html
sqlstr = 'SELECT email, count FROM Counts ORDER BY count DESC LIMIT 10'
print
print "Counts:"
for row in cur.execute(sqlstr) :
print str(row[0]), row[1]
cur.close()
| 28.095238 | 71 | 0.64322 |
79417c20a94ecf8dff1bebefb9dec9a056f27a81 | 1,078 | py | Python | graph4nlp/pytorch/modules/graph_construction/utils.py | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa | [
"Apache-2.0"
] | 1,269 | 2021-06-06T03:27:41.000Z | 2022-03-30T06:33:53.000Z | graph4nlp/pytorch/modules/graph_construction/utils.py | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa | [
"Apache-2.0"
] | 106 | 2021-06-07T05:24:01.000Z | 2022-03-31T19:18:48.000Z | graph4nlp/pytorch/modules/graph_construction/utils.py | cminusQAQ/graph4nlp | d980e897131f1b9d3766750c06316d94749904fa | [
"Apache-2.0"
] | 160 | 2021-06-06T15:09:17.000Z | 2022-03-23T02:06:33.000Z | import torch
CORENLP_TIMEOUT_SIGNATURE = "CoreNLP request timed out. Your document may be too long."
def convert_adj_to_graph(graph, adj, reverse_adj, mask_off_val):
slides = (adj != mask_off_val).nonzero(as_tuple=False)
batch_nodes_tensor = torch.Tensor([0] + graph._batch_num_nodes).to(slides.device)
batch_prefix = batch_nodes_tensor.view(-1, 1).expand(-1, batch_nodes_tensor.shape[0])
batch_prefix = batch_prefix.triu().long().sum(0)
src = slides[:, 1] + batch_prefix.index_select(dim=0, index=slides[:, 0])
tgt = slides[:, 2] + batch_prefix.index_select(dim=0, index=slides[:, 0])
graph_data = graph
graph_data.remove_all_edges() # remove all existing edges
graph_data.add_edges(src.detach().cpu().numpy().tolist(), tgt.detach().cpu().numpy().tolist())
value = adj[slides[:, 0], slides[:, 1], slides[:, 2]]
reverse_value = reverse_adj[slides[:, 0], slides[:, 1], slides[:, 2]]
graph_data.edge_features["edge_weight"] = value
graph_data.edge_features["reverse_edge_weight"] = reverse_value
return graph_data
| 39.925926 | 98 | 0.702226 |
79417c5cdfe89ed2e5068d11c63139c802f63bff | 1,140 | py | Python | venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_api.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_api.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 8 | 2021-09-22T12:47:32.000Z | 2022-01-14T21:30:38.000Z | venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_api.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | def add_line_breakpoint(plugin, pydb, type, canonical_normalized_filename, line, condition, expression, func_name):
return None
def add_exception_breakpoint(plugin, pydb, type, exception):
return False
def remove_exception_breakpoint(plugin, pydb, type, exception):
return False
def remove_all_exception_breakpoints(plugin, pydb):
return False
def get_breakpoints(plugin, pydb):
return None
def can_skip(plugin, pydb, frame):
return True
def has_exception_breaks(plugin):
return False
def has_line_breaks(plugin):
return False
def cmd_step_into(plugin, pydb, frame, event, args, stop_info, stop):
return False
def cmd_step_over(plugin, pydb, frame, event, args, stop_info, stop):
return False
def stop(plugin, pydb, frame, event, args, stop_info, arg, step_cmd):
return False
def get_breakpoint(plugin, pydb, pydb_frame, frame, event, args):
return None
def suspend(plugin, pydb, thread, frame):
return None
def exception_break(plugin, pydb, pydb_frame, frame, args, arg):
return None
def change_variable(plugin, frame, attr, expression):
return False
| 19.322034 | 115 | 0.739474 |
79417c8e025f5eb6893a3dfa21343aab012173ab | 360 | py | Python | helloworld/greet/urls.py | jimmcslim/example-django | 0c05f9e7d4968e79f912d44cdc0b5a1ada29eeb3 | [
"Apache-2.0"
] | 8 | 2021-08-01T10:29:21.000Z | 2022-02-21T18:28:48.000Z | helloworld/greet/urls.py | g-cassie/example-django | b5957fa847b5ba29e9becf601e65db02ec6f2712 | [
"Apache-2.0"
] | 13 | 2021-05-13T14:54:04.000Z | 2022-03-29T17:45:44.000Z | helloworld/greet/urls.py | g-cassie/example-django | b5957fa847b5ba29e9becf601e65db02ec6f2712 | [
"Apache-2.0"
] | 6 | 2021-05-13T06:53:41.000Z | 2022-03-29T07:15:03.000Z | # Copyright 2021 Pants project contributors.
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from django.urls import path, re_path
from helloworld.greet import views
urlpatterns = [
re_path(
r"^tod/(?P<time_of_day>\d\d:\d\d:\d\d)/$", views.for_time_of_day, name="tod"
),
path("<str:slug>/", views.index, name="index"),
]
| 25.714286 | 84 | 0.675 |
79417d1d3fd157c6d19255116d42f8480df87fdb | 2,485 | py | Python | sdk/python/pulumi_azure_native/devtestlab/list_lab_vhds.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devtestlab/list_lab_vhds.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devtestlab/list_lab_vhds.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ListLabVhdsResult',
'AwaitableListLabVhdsResult',
'list_lab_vhds',
]
@pulumi.output_type
class ListLabVhdsResult:
"""
The response of a list operation.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
Link for next set of results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.LabVhdResponseResult']]:
"""
Results of the list operation.
"""
return pulumi.get(self, "value")
class AwaitableListLabVhdsResult(ListLabVhdsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListLabVhdsResult(
next_link=self.next_link,
value=self.value)
def list_lab_vhds(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListLabVhdsResult:
"""
The response of a list operation.
API Version: 2018-09-15.
:param str name: The name of the lab.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devtestlab:listLabVhds', __args__, opts=opts, typ=ListLabVhdsResult).value
return AwaitableListLabVhdsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| 30.679012 | 124 | 0.657143 |
79417d683324217f3340465a0c627c473a011ce3 | 1,502 | py | Python | src/BearSki/case/TestSuitSet.py | Sirius1942/BearSki | bdc75d6f06946896e2128f1c095b9baf9863b124 | [
"MIT"
] | 13 | 2019-12-10T09:07:45.000Z | 2021-09-08T01:24:22.000Z | src/BearSki/case/TestSuitSet.py | Sirius1942/BearSki | bdc75d6f06946896e2128f1c095b9baf9863b124 | [
"MIT"
] | 1 | 2020-05-06T01:43:50.000Z | 2020-05-06T01:44:46.000Z | build/lib/BearSki/case/TestSuitSet.py | Sirius1942/BearSki | bdc75d6f06946896e2128f1c095b9baf9863b124 | [
"MIT"
] | 6 | 2020-01-07T07:07:42.000Z | 2021-06-04T03:38:19.000Z | from abc import ABCMeta, abstractmethod
import threading
import random
class TestSuit(metaclass=ABCMeta):
def __init__(self,caselist):
self.testcaselist = []
self.createSuit(caselist)
@ abstractmethod
def createSuit(self):
pass
def getCaselist(self):
return self.testcaselist
def addCaselist(self, caselist):
self.testcaselist=caselist
@abstractmethod
def run(self):
pass
class SequentialTestRunner(TestSuit):
def createSuit(self,caselist):
self.addCaselist(caselist)
def run(self):
for testcase in self.getCaselist():
print(testcase.run().getResult())
class ParallelTestRunner(TestSuit):
def createSuit(self, caselist):
self.addCaselist(caselist)
def run(self):
for testcase in self.getCaselist():
self.TestSuitThread(testcase).start()
class TestSuitThread(threading.Thread):
def __init__(self, TestCase):
threading.Thread.__init__(self)
self.testcase = TestCase
def run(self):
result=self.testcase.run().getResult()
print("开启用例执行线程,结果: {0}".format(result))
return result
class RandomTestRunner(TestSuit):
def createSuit(self, caselist):
self.addCaselist(caselist)
def run(self):
number=random.randint(0,len(self.getCaselist())-1)
print("radom number is :{0}".format(number))
print(self.getCaselist()[number].run().getResult()) | 30.04 | 59 | 0.649134 |
79417d94309fe519862c5903d0d2c58159ff95e0 | 168 | py | Python | comportamentais/strategy/impostos.py | jgabriellima/design-patterns-python | e955d570265c154863fbfc65564dd4781f549042 | [
"Apache-2.0"
] | 363 | 2018-07-30T18:52:55.000Z | 2022-03-29T23:04:26.000Z | comportamentais/strategy/impostos.py | sou-rafael/design-patterns-python | e955d570265c154863fbfc65564dd4781f549042 | [
"Apache-2.0"
] | 7 | 2018-07-14T20:19:23.000Z | 2020-04-17T00:24:30.000Z | comportamentais/strategy/impostos.py | sou-rafael/design-patterns-python | e955d570265c154863fbfc65564dd4781f549042 | [
"Apache-2.0"
] | 99 | 2018-09-06T18:11:43.000Z | 2022-03-27T13:32:45.000Z | class ISS:
def calcula(self, orcamento):
return orcamento.valor * 0.1
class ICMS:
def calcula(self, orcamento):
return orcamento.valor * 0.06
| 18.666667 | 37 | 0.642857 |
7941804888d5be9dd1b2d110c75a0c5a7d1d86e4 | 2,975 | py | Python | redash/query_runner/sqlite.py | xu-bin-bin/redash | 551007d82a5c8a7eaa27ca4f4cf55cef1b140886 | [
"BSD-2-Clause"
] | 2 | 2019-01-24T09:17:57.000Z | 2019-06-18T11:10:51.000Z | redash/query_runner/sqlite.py | xu-bin-bin/redash | 551007d82a5c8a7eaa27ca4f4cf55cef1b140886 | [
"BSD-2-Clause"
] | 1 | 2018-09-28T06:12:48.000Z | 2018-09-28T06:12:48.000Z | redash/query_runner/sqlite.py | xu-bin-bin/redash | 551007d82a5c8a7eaa27ca4f4cf55cef1b140886 | [
"BSD-2-Clause"
] | 1 | 2018-10-29T01:11:50.000Z | 2018-10-29T01:11:50.000Z | import logging
import sqlite3
import sys
from six import reraise
from redash.query_runner import BaseSQLQueryRunner, register
from redash.utils import json_dumps, json_loads
from redash.query_runner.i18n_dataSource import zh
logger = logging.getLogger(__name__)
class Sqlite(BaseSQLQueryRunner):
noop_query = "pragma quick_check"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"dbpath": {
"type": "string",
"title": zh.get("Database Path", "Database Path")
}
},
"required": ["dbpath"],
}
@classmethod
def type(cls):
return "sqlite"
def __init__(self, configuration):
super(Sqlite, self).__init__(configuration)
self._dbpath = self.configuration['dbpath']
def _get_tables(self, schema):
query_table = "select tbl_name from sqlite_master where type='table'"
query_columns = "PRAGMA table_info(%s)"
results, error = self.run_query(query_table, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results['rows']:
table_name = row['tbl_name']
schema[table_name] = {'name': table_name, 'columns': []}
results_table, error = self.run_query(query_columns % (table_name,), None)
if error is not None:
raise Exception("Failed getting schema.")
results_table = json_loads(results_table)
for row_column in results_table['rows']:
schema[table_name]['columns'].append(row_column['name'])
return schema.values()
def run_query(self, query, user):
connection = sqlite3.connect(self._dbpath)
cursor = connection.cursor()
try:
cursor.execute(query)
if cursor.description is not None:
columns = self.fetch_columns([(i[0], None) for i in cursor.description])
rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
error = None
json_data = json_dumps(data)
else:
error = 'Query completed but it returned no data.'
json_data = None
except KeyboardInterrupt:
connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
# handle unicode error message
err_class = sys.exc_info()[1].__class__
err_args = [arg.decode('utf-8') for arg in sys.exc_info()[1].args]
unicode_err = err_class(*err_args)
reraise(unicode_err, None, sys.exc_info()[2])
finally:
connection.close()
return json_data, error
register(Sqlite)
| 30.989583 | 88 | 0.581849 |
794180a2f24594ccaa627757d7e5d833364c0053 | 775 | py | Python | project/parsers/products_parser.py | Anton7177/product_optimizer | 1b1ceb32ac4881adda5f0902748b20159090fe0e | [
"MIT"
] | null | null | null | project/parsers/products_parser.py | Anton7177/product_optimizer | 1b1ceb32ac4881adda5f0902748b20159090fe0e | [
"MIT"
] | null | null | null | project/parsers/products_parser.py | Anton7177/product_optimizer | 1b1ceb32ac4881adda5f0902748b20159090fe0e | [
"MIT"
] | null | null | null | import logging
import httpx
from bs4 import BeautifulSoup
from project.parsers.link_list import get_links
from project.parsers.price import get_price
def run_parser():
"""Entrypoint."""
logging.info('Start parser')
link_list = get_links()
# Открываем сессию на запрос страниц
with httpx.Client(timeout=10.0) as client:
for url in link_list:
try:
res = client.get(url)
res.raise_for_status()
result = res.text
except(httpx.RequestError, ValueError) as e:
print(f"Сетевая ошибка: {e} url: {url}")
result = False
if result is not False:
soup = BeautifulSoup(result, 'html.parser')
get_price(soup)
| 27.678571 | 59 | 0.593548 |
794180a4fc0c645d62290699668db35fd95365d4 | 800 | py | Python | pycnal_toolbox/pycnal_toolbox/plot_coast_line_from_mask.py | ESMG/PyCNAL_legacy | a4f6547bce872068a5bb5751231017bc3e4a4503 | [
"BSD-3-Clause"
] | null | null | null | pycnal_toolbox/pycnal_toolbox/plot_coast_line_from_mask.py | ESMG/PyCNAL_legacy | a4f6547bce872068a5bb5751231017bc3e4a4503 | [
"BSD-3-Clause"
] | 3 | 2018-01-23T23:23:24.000Z | 2018-02-07T22:37:28.000Z | pycnal_toolbox/pycnal_toolbox/plot_coast_line_from_mask.py | ESMG/PyCNAL_legacy | a4f6547bce872068a5bb5751231017bc3e4a4503 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.collections as collections
from pycnal_toolbox import get_coast_line_from_mask
def plot_coast_line_from_mask(msk, lon, lat, proj=None):
'''
plot_coast_line_from_mask(msk, {proj})
plot the coastline from msk.
proj=map (optional) is a Basemap object for
projection.
'''
a = plt.gca()
coast = get_coast_line_from_mask(msk, lon, lat)
c = np.array(coast)
if proj is None:
col = collections.LineCollection(c)
else:
cp = np.zeros(c.shape)
for i in range(c.shape[0]):
cp[i,:,0], cp[i,:,1] = proj(c[i,:,0], c[i,:,1])
col = collections.LineCollection(cp)
a.add_collection(col, autolim=True)
col.set_color('k')
#a.autoscale_view()
| 22.857143 | 59 | 0.64375 |
794180c67f0da318b2e8541d23c18dedc8efe5b4 | 3,810 | py | Python | bokeh/server/views/autoload_js_handler.py | Suicoleiro/bokeh | a212acdf091a7a4df639fa9d443be6ade0018039 | [
"BSD-3-Clause"
] | 15,193 | 2015-01-01T05:11:45.000Z | 2022-03-31T19:30:20.000Z | bokeh/server/views/autoload_js_handler.py | Suicoleiro/bokeh | a212acdf091a7a4df639fa9d443be6ade0018039 | [
"BSD-3-Clause"
] | 9,554 | 2015-01-01T03:16:54.000Z | 2022-03-31T22:59:39.000Z | bokeh/server/views/autoload_js_handler.py | Suicoleiro/bokeh | a212acdf091a7a4df639fa9d443be6ade0018039 | [
"BSD-3-Clause"
] | 4,829 | 2015-01-02T03:35:32.000Z | 2022-03-30T16:40:26.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a request handler that returns a page displaying a document.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from urllib.parse import urlparse
# Bokeh imports
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.embed.bundle import Script, bundle_for_objs_and_resources
from bokeh.embed.elements import script_for_render_items
from bokeh.embed.util import RenderItem
# Bokeh imports
from .session_handler import SessionHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AutoloadJsHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class AutoloadJsHandler(SessionHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
def set_default_headers(self):
self.set_header("Access-Control-Allow-Headers", "*")
self.set_header('Access-Control-Allow-Methods', 'PUT, GET, OPTIONS')
self.set_header("Access-Control-Allow-Origin", "*")
async def get(self, *args, **kwargs):
session = await self.get_session()
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))
else:
server_url = None
resources_param = self.get_argument("resources", "default")
resources = self.application.resources(server_url) if resources_param != "none" else None
bundle = bundle_for_objs_and_resources(None, resources)
render_items = [RenderItem(token=session.token, elementid=element_id, use_for_title=False)]
bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
js = AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
self.set_header("Content-Type", 'application/javascript')
self.write(js)
async def options(self, *args, **kwargs):
'''Browsers make OPTIONS requests under the hood before a GET request'''
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 38.484848 | 115 | 0.475066 |
79418125cc1b144ebfa7854cdc9f46f90f1acb76 | 614 | py | Python | la/__init__.py | fhal/la | 5f537345ec7fd627fa4bf16cab3a3f4d5a72800c | [
"BSD-2-Clause"
] | 1 | 2015-01-30T19:49:19.000Z | 2015-01-30T19:49:19.000Z | la/__init__.py | fhal/la | 5f537345ec7fd627fa4bf16cab3a3f4d5a72800c | [
"BSD-2-Clause"
] | null | null | null | la/__init__.py | fhal/la | 5f537345ec7fd627fa4bf16cab3a3f4d5a72800c | [
"BSD-2-Clause"
] | null | null | null | "la init"
# Classes
from la.deflarry import larry
try:
from la.io import IO
from la.io import *
except:
# Cannot import h5py; no archiving available.
pass
from numpy import nan, inf
from la.flarry import *
from la.util.report import info
from la.version import __version__
from la.util import testing
try:
from numpy.testing import Tester
test = Tester().test
del Tester
except (ImportError, ValueError):
print "No la unit testing available."
try:
# Namespace cleaning
del deflarry, flabel, func, io, missing, testing, util, version
except:
pass
| 19.1875 | 67 | 0.688925 |
79418143b47bdfc5e09720589bd70a80bbc19bec | 738 | py | Python | Source/Services/RPSLS.PythonPlayer.Api/app/pick/strategies.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | Source/Services/RPSLS.PythonPlayer.Api/app/pick/strategies.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | Source/Services/RPSLS.PythonPlayer.Api/app/pick/strategies.py | ivan-b-ivanov/RockPaperScissorsLizardSpock | 9167bcbe5ad2937e834408475c2ec66cf92fef84 | [
"MIT"
] | null | null | null | import random
from flask import jsonify
from .rpsls import RPSLS
# Fixed pick Game Strategy
def fixed_strategy(pick_value):
pick_RPSLS=pick_value
def pick():
return pick_RPSLS
return pick
# Random pick Game Strategy
def random_strategy():
def pick():
pick_RPSLS = random.choice(list(RPSLS))
return pick_RPSLS
return pick
# Iterative pick Game Strategy
def iterative_generator(value):
while True:
yield value
value += 1
value = value % len(RPSLS)
def iterative_strategy():
pick_generator = iterative_generator(0)
def pick():
pick_RPSLS = RPSLS(next(pick_generator))
return pick_RPSLS
return pick
| 21.705882 | 49 | 0.643631 |
794183283b831395d2457adf4f1490a740b5a308 | 1,035 | py | Python | src/cache/commands/static_fetch.py | alvarosg/covid-19-open-data | b252b0c78ac8712618fa53785441c7428230e759 | [
"Apache-2.0"
] | null | null | null | src/cache/commands/static_fetch.py | alvarosg/covid-19-open-data | b252b0c78ac8712618fa53785441c7428230e759 | [
"Apache-2.0"
] | 1 | 2022-03-02T14:54:27.000Z | 2022-03-02T14:54:27.000Z | src/cache/commands/static_fetch.py | alvarosg/covid-19-open-data | b252b0c78ac8712618fa53785441c7428230e759 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script downloads a resource into the specified output file.
"""
from argparse import ArgumentParser
import requests
# Parse arguments
parser = ArgumentParser()
parser.add_argument("--url", type=str, required=True)
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
# Download the resource into the output file
with open(args.output, "wb") as fd:
fd.write(requests.get(args.url).content)
| 31.363636 | 74 | 0.755556 |
7941832950c5d1798bd8e43eb9d8fdcb007e56b7 | 2,146 | py | Python | schedule/main/department/department_table.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | schedule/main/department/department_table.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | schedule/main/department/department_table.py | DSD-ESDC-EDSC/dynamic-org-chart-scripts | a2247ac85ddadc5b33256a501be16bf787b395b6 | [
"MIT"
] | null | null | null | import json
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Text
from schedule.config import SQLAlchemyConfig
from schedule.main.utils.db_utils import assemble_sqlalchemy_url
def create_department_table(df, org_chart_en, org_chart_fr):
'''
Creates the department table in the database.
'''
# Create database connection
engine = create_engine(assemble_sqlalchemy_url(SQLAlchemyConfig))
# Keep unique departments as rows
dept_df = df[["dept_id", "department_en", "department_fr"]].drop_duplicates()
# Create column to hold serialized org chart
dept_df["org_chart_en"] = dept_df["department_en"].apply(
lambda x: get_department_org_chart(x, org_chart_en))
dept_df["org_chart_fr"] = dept_df["department_fr"].apply(
lambda x: get_department_org_chart(x, org_chart_fr))
# Now write department's dataframe to another table in our database. In the
# current use case, we only need to access each department's org chart from
# the root.
dept_df[["dept_id", "department_en", "department_fr", "org_chart_en", "org_chart_fr"]].to_sql(
"departments",
engine,
if_exists="replace",
index=False,
dtype={
"dept_id": Integer,
"department_en": Text,
"department_fr": Text,
"org_chart_en": Text,
"org_chart_fr": Text,
})
return dept_df
def get_department_org_chart(department, org_chart):
'''
Gets the org chart assiciated with a department name.
Args:
department: a string containing the department name.
org_chart: a python dict containing the org chart.
Returns:
dept_org_chart: the org chart for the department being searched.
'''
# Find the department matching the search; from context we know department will be
# unique and at the first level of the tree.
dept = [dept for dept in org_chart
if dept.get("name") == department]
if dept:
dept = dept[0]
# Return serialized json
return json.dumps(dept)
return json.dumps(org_chart)
| 37 | 98 | 0.67521 |
7941836c1c549aa4f0b4f833094066dafa7249f6 | 445 | py | Python | pyxel/examples/01_hello_pyxel.py | jamad/pyxel | c14534f5fcdcb53d583b00dacc0d1531b2c022e2 | [
"MIT"
] | null | null | null | pyxel/examples/01_hello_pyxel.py | jamad/pyxel | c14534f5fcdcb53d583b00dacc0d1531b2c022e2 | [
"MIT"
] | null | null | null | pyxel/examples/01_hello_pyxel.py | jamad/pyxel | c14534f5fcdcb53d583b00dacc0d1531b2c022e2 | [
"MIT"
] | null | null | null | import pyxel as P
from pyxel import init,image,run,cls,text,blt,btnp
class App:
def __init__(self):
init(160, 120, caption="Hello Pyxel",scale=1)
image(0).load(0, 0, "assets/pyxel_logo_38x16.png")
run(self.update, self.draw)
def update(self):
if btnp(P.KEY_Q):quit()
def draw(self):
cls(0)
text(55, 41, "Hello, Pyxel!", P.frame_count % 16)
blt(61, 66, 0, 0, 0, 38, 16)
App()
| 26.176471 | 58 | 0.588764 |
7941839a9da80a7ef98caa0456465c3a9b8e91b8 | 485 | py | Python | clipper-parm/util/get_volume_for_instance_id.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 32 | 2019-09-11T16:49:58.000Z | 2022-01-26T15:40:40.000Z | clipper-parm/util/get_volume_for_instance_id.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 5 | 2019-11-10T16:13:40.000Z | 2022-01-13T01:31:51.000Z | clipper-parm/util/get_volume_for_instance_id.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 9 | 2019-09-03T14:05:26.000Z | 2021-12-22T07:17:27.000Z | import argparse
import boto3
import pdb
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("instance_id", type=str, help="ID of instance to query")
args = parser.parse_args()
client = boto3.client('ec2')
volumes = client.describe_volumes(Filters=[{
"Name": "attachment.instance-id",
"Values": [args.instance_id]
}])
volume_ids = [v["VolumeId"] for v in volumes["Volumes"]]
print(volume_ids)
| 26.944444 | 80 | 0.647423 |
794183efe89dd288ceed0279046dd9325a5cdd4f | 686 | py | Python | oelint_adv/rule_base/rule_var_insaneskip.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | oelint_adv/rule_base/rule_var_insaneskip.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | oelint_adv/rule_base/rule_var_insaneskip.py | gstroz/oelint-adv | 089b43492df0b2ca78e17df26c215e5e19ed90cc | [
"BSD-2-Clause"
] | null | null | null | from oelint_adv.cls_item import Variable
from oelint_adv.cls_rule import Rule
class VarInsaneSkip(Rule):
def __init__(self):
super().__init__(id="oelint.vars.insaneskip",
severity="error",
message="INSANE_SKIP should be avoided at any cost")
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR)
for i in [x for x in items if x.VarName.startswith("INSANE_SKIP_") or x.VarName == "INSANE_SKIP"]:
res += self.finding(i.Origin, i.InFileLine)
return res
| 38.111111 | 106 | 0.610787 |
7941848d0646dbdfb0a010e2071ce148fad381dd | 6,514 | py | Python | tests/cfngin/hooks/test_utils.py | animaxcg/runway | dfe38107126b61a7f297874f235f68a045a5ca09 | [
"Apache-2.0"
] | null | null | null | tests/cfngin/hooks/test_utils.py | animaxcg/runway | dfe38107126b61a7f297874f235f68a045a5ca09 | [
"Apache-2.0"
] | null | null | null | tests/cfngin/hooks/test_utils.py | animaxcg/runway | dfe38107126b61a7f297874f235f68a045a5ca09 | [
"Apache-2.0"
] | null | null | null | """Tests for runway.cfngin.hooks.utils."""
# pylint: disable=unused-argument
import queue
import unittest
from runway.cfngin.config import Hook
from runway.cfngin.hooks.utils import handle_hooks
from ..factories import mock_context, mock_provider
HOOK_QUEUE = queue.Queue()
class TestHooks(unittest.TestCase):
"""Tests for runway.cfngin.hooks.utils."""
def setUp(self):
"""Run before tests."""
self.context = mock_context(namespace="namespace")
self.provider = mock_provider(region="us-east-1")
def test_empty_hook_stage(self):
"""Test empty hook stage."""
hooks = []
handle_hooks("fake", hooks, self.provider, self.context)
self.assertTrue(HOOK_QUEUE.empty())
def test_missing_required_hook(self):
"""Test missing required hook."""
hooks = [Hook({"path": "not.a.real.path", "required": True})]
with self.assertRaises(ImportError):
handle_hooks("missing", hooks, self.provider, self.context)
def test_missing_required_hook_method(self):
"""Test missing required hook method."""
hooks = [{"path": "runway.cfngin.hooks.blah", "required": True}]
with self.assertRaises(AttributeError):
handle_hooks("missing", hooks, self.provider, self.context)
def test_missing_non_required_hook_method(self):
"""Test missing non required hook method."""
hooks = [Hook({"path": "runway.cfngin.hooks.blah", "required": False})]
handle_hooks("missing", hooks, self.provider, self.context)
self.assertTrue(HOOK_QUEUE.empty())
def test_default_required_hook(self):
"""Test default required hook."""
hooks = [Hook({"path": "runway.cfngin.hooks.blah"})]
with self.assertRaises(AttributeError):
handle_hooks("missing", hooks, self.provider, self.context)
def test_valid_hook(self):
"""Test valid hook."""
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.mock_hook",
"required": True})]
handle_hooks("missing", hooks, self.provider, self.context)
good = HOOK_QUEUE.get_nowait()
self.assertEqual(good["provider"].region, "us-east-1")
with self.assertRaises(queue.Empty):
HOOK_QUEUE.get_nowait()
def test_valid_enabled_hook(self):
"""Test valid enabled hook."""
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.mock_hook",
"required": True, "enabled": True})]
handle_hooks("missing", hooks, self.provider, self.context)
good = HOOK_QUEUE.get_nowait()
self.assertEqual(good["provider"].region, "us-east-1")
with self.assertRaises(queue.Empty):
HOOK_QUEUE.get_nowait()
def test_valid_enabled_false_hook(self):
"""Test valid enabled false hook."""
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.mock_hook",
"required": True, "enabled": False})]
handle_hooks("missing", hooks, self.provider, self.context)
self.assertTrue(HOOK_QUEUE.empty())
def test_context_provided_to_hook(self):
"""Test context provided to hook."""
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.context_hook",
"required": True})]
handle_hooks("missing", hooks, "us-east-1", self.context)
def test_hook_failure(self):
"""Test hook failure."""
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.fail_hook",
"required": True})]
with self.assertRaises(SystemExit):
handle_hooks("fail", hooks, self.provider, self.context)
hooks = [{"path": "tests.cfngin.hooks.test_utils.exception_hook",
"required": True}]
with self.assertRaises(Exception):
handle_hooks("fail", hooks, self.provider, self.context)
hooks = [
Hook({"path": "tests.cfngin.hooks.test_utils.exception_hook",
"required": False})]
# Should pass
handle_hooks("ignore_exception", hooks, self.provider, self.context)
def test_return_data_hook(self):
"""Test return data hook."""
hooks = [
Hook({
"path": "tests.cfngin.hooks.test_utils.result_hook",
"data_key": "my_hook_results"
}),
# Shouldn't return data
Hook({
"path": "tests.cfngin.hooks.test_utils.context_hook"
})
]
handle_hooks("result", hooks, "us-east-1", self.context)
self.assertEqual(
self.context.hook_data["my_hook_results"]["foo"],
"bar"
)
# Verify only the first hook resulted in stored data
self.assertEqual(
list(self.context.hook_data.keys()), ["my_hook_results"]
)
def test_return_data_hook_duplicate_key(self):
"""Test return data hook duplicate key."""
hooks = [
Hook({
"path": "tests.cfngin.hooks.test_utils.result_hook",
"data_key": "my_hook_results"
}),
Hook({
"path": "tests.cfngin.hooks.test_utils.result_hook",
"data_key": "my_hook_results"
})
]
with self.assertRaises(KeyError):
handle_hooks("result", hooks, "us-east-1", self.context)
def test_resolve_lookups_in_args(self):
"""Test the resolution of lookups in hook args."""
hooks = [Hook({
"path": "tests.cfngin.hooks.test_utils.kwargs_hook",
"data_key": "my_hook_results",
"args": {
"default_lookup": "${default env_var::default_value}"
}
})]
handle_hooks("lookups", hooks, "us-east-1", self.context)
self.assertEqual(
self.context.hook_data["my_hook_results"]["default_lookup"],
"default_value"
)
def mock_hook(*args, **kwargs):
"""Mock hook."""
HOOK_QUEUE.put(kwargs)
return True
def fail_hook(*args, **kwargs):
"""Fail hook."""
return None
def exception_hook(*args, **kwargs):
"""Exception hook."""
raise Exception
def context_hook(*args, **kwargs):
"""Context hook."""
return "context" in kwargs
def result_hook(*args, **kwargs):
"""Results hook."""
return {"foo": "bar"}
def kwargs_hook(*args, **kwargs):
"""Kwargs hook."""
return kwargs
| 34.104712 | 79 | 0.593951 |
7941855687e4130840a00aee4b150f6a4014d59d | 8,378 | py | Python | idcmanager_sdk/model/next_builder/storyboard_node_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | idcmanager_sdk/model/next_builder/storyboard_node_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | idcmanager_sdk/model/next_builder/storyboard_node_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: storyboard_node.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from idcmanager_sdk.model.next_builder import storyboard_brick_pb2 as idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2
from idcmanager_sdk.model.next_builder import storyboard_route_pb2 as idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2
from idcmanager_sdk.model.next_builder import micro_app_project_pb2 as idcmanager__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='storyboard_node.proto',
package='next_builder',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builder'),
serialized_pb=_b('\n\x15storyboard_node.proto\x12\x0cnext_builder\x1a\x38idcmanager_sdk/model/next_builder/storyboard_brick.proto\x1a\x38idcmanager_sdk/model/next_builder/storyboard_route.proto\x1a\x39idcmanager_sdk/model/next_builder/micro_app_project.proto\"\xe8\x02\n\x0eStoryboardNode\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\t\x12\x12\n\nmountPoint\x18\x05 \x01(\t\x12\x0c\n\x04sort\x18\x06 \x01(\x05\x12\x0c\n\x04type\x18\x07 \x01(\t\x12,\n\x05\x62rick\x18\x08 \x01(\x0b\x32\x1d.next_builder.StoryboardBrick\x12,\n\x05route\x18\t \x01(\x0b\x32\x1d.next_builder.StoryboardRoute\x12.\n\x07project\x18\n \x01(\x0b\x32\x1d.next_builder.MicroAppProject\x12,\n\x06parent\x18\x0b \x01(\x0b\x32\x1c.next_builder.StoryboardNode\x12.\n\x08\x63hildren\x18\x0c \x03(\x0b\x32\x1c.next_builder.StoryboardNodeBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builderb\x06proto3')
,
dependencies=[idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2.DESCRIPTOR,idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2.DESCRIPTOR,idcmanager__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2.DESCRIPTOR,])
_STORYBOARDNODE = _descriptor.Descriptor(
name='StoryboardNode',
full_name='next_builder.StoryboardNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='next_builder.StoryboardNode.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alias', full_name='next_builder.StoryboardNode.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='next_builder.StoryboardNode.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='next_builder.StoryboardNode.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mountPoint', full_name='next_builder.StoryboardNode.mountPoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort', full_name='next_builder.StoryboardNode.sort', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='next_builder.StoryboardNode.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brick', full_name='next_builder.StoryboardNode.brick', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='next_builder.StoryboardNode.route', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='next_builder.StoryboardNode.project', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='next_builder.StoryboardNode.parent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='next_builder.StoryboardNode.children', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=215,
serialized_end=575,
)
_STORYBOARDNODE.fields_by_name['brick'].message_type = idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2._STORYBOARDBRICK
_STORYBOARDNODE.fields_by_name['route'].message_type = idcmanager__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2._STORYBOARDROUTE
_STORYBOARDNODE.fields_by_name['project'].message_type = idcmanager__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2._MICROAPPPROJECT
_STORYBOARDNODE.fields_by_name['parent'].message_type = _STORYBOARDNODE
_STORYBOARDNODE.fields_by_name['children'].message_type = _STORYBOARDNODE
DESCRIPTOR.message_types_by_name['StoryboardNode'] = _STORYBOARDNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoryboardNode = _reflection.GeneratedProtocolMessageType('StoryboardNode', (_message.Message,), {
'DESCRIPTOR' : _STORYBOARDNODE,
'__module__' : 'storyboard_node_pb2'
# @@protoc_insertion_point(class_scope:next_builder.StoryboardNode)
})
_sym_db.RegisterMessage(StoryboardNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 53.025316 | 986 | 0.775364 |
Subsets and Splits